Blog

UT Austin Class Schedule

SemesterCourse NameCategorie
2025 FallCase Studies in Machine LearningElective
2025 FallDeep LearningApplication
2026 FallParallel SystemsSystems

Using `serde_json` or `serde` data, in `datafusion`

Getting data into datafusion is not well documented, especially using serde_json or serde data.

This example shows how to convert a serde_json::Value::Array into a datafusion DataFrame, manipulate the dataframe in datafusion, then convert it back to serde_json.

# Cargo.toml
datafusion = "47.0.0"
serde_arrow = { version = "0.13.3", features = ["arrow-55"] }
// `serde_json::Value`
let json = serde_json::json!([{
    "date": "2025-06-05",
    "test": "test",
    "price": 1.01,
}]);

let ctx = SessionContext::new();

let serde_json::Value::Array(json_array) = &json else {
    return Err(anyhow::anyhow!("Expected JSON array, got different type"));
};

if json_array.is_empty() {
    return Ok(Vec::new());
}

// Configure `TracingOptions` to allow null fields and coerce numbers
let tracing_options = TracingOptions::default()
    .allow_null_fields(true)
    .coerce_numbers(true);

// Get the schema from actual data, using samples, with `TracingOptions`
let fields = Vec::<FieldRef>::from_samples(json_array, tracing_options)?;

// Convert `serde_json::Value::Array` to `RecordBatch` using `serde_arrow`
let record_batch = serde_arrow::to_record_batch(&fields, &json_array)?;

// Create a DataFrame from the `RecordBatch`
let mut df = ctx.read_batch(record_batch)?;

// Add a new column `new_col` using DataFrame API
df = df.with_column("new_col", lit("test".to_string()))?;

// Execute the DataFrame query
let result_batches = df.collect().await?;

// Convert back to `serde_json` using `serde_arrow`
let all_json_values = result_batches
    .into_iter()
    .flat_map(|batch| {
        serde_arrow::from_record_batch(&batch).unwrap_or_else(|_| Vec::new())
    })
    .collect::<Vec<serde_json::Value>>();

#[derive(Default, Debug, Clone, Deserialize, Serialize)]
pub struct TestData {
    date: String,
    test: String,
    price: f64,
    new_col: String,
}

// Convert the `serde_json::Value` to Vec<TestData>
let test_data: Vec<TestData> =
    serde_json::from_value(serde_json::Value::Array(all_json_values))?;

assert_eq!(
    test_data,
    Vec![
        TestData {
            date: "2025-06-05".to_string(),
            test: "test".to_string(),
            price: 1.01,
            new_col: "test".to_string(),
        },
    ]
);

Or you use can use this datafusion_ext

// src/utils/datafusion_ext.rs
use anyhow::Error;
use datafusion::{arrow::datatypes::FieldRef, dataframe::DataFrame, prelude::*};
use serde_arrow::schema::{SchemaLike, TracingOptions};

pub trait JsonValueExt {
    /// Converts a `serde_json::Value::Array` into a `datafusion::dataframe`
    fn to_df(&self) -> Result<DataFrame, Error>;
}

impl JsonValueExt for serde_json::Value {
    fn to_df(&self) -> Result<DataFrame, Error> {
        let ctx = SessionContext::new();

        let Self::Array(json_array) = self else {
            return Err(anyhow::anyhow!(
                "Expected `serde_json::Value::Array`, got different type"
            ));
        };

        if json_array.is_empty() {
            return Err(anyhow::anyhow!("Empty `serde_json::Value::Array` provided"));
        }

        let tracing_options = TracingOptions::default()
            .allow_null_fields(true)
            .coerce_numbers(true);

        let fields = Vec::<FieldRef>::from_samples(json_array, tracing_options)?;
        let record_batch = serde_arrow::to_record_batch(&fields, &json_array)?;

        let df = ctx.read_batch(record_batch)?;

        Ok(df)
    }
}

#[async_trait::async_trait]
pub trait DataFrameExt {
    /// Collects a `datafusion::dataframe` and deserializes it to a Vec of the
    /// specified type
    async fn to_vec<T>(&self) -> Result<Vec<T>, Error>
    where
        T: serde::de::DeserializeOwned;
}

#[async_trait::async_trait]
impl DataFrameExt for DataFrame {
    async fn to_vec<T>(&self) -> Result<Vec<T>, Error>
    where
        T: serde::de::DeserializeOwned,
    {
        let result_batches = self.clone().collect().await?;

        let all_json_values = result_batches
            .into_iter()
            .flat_map(|batch| serde_arrow::from_record_batch(&batch).unwrap_or_else(|_| Vec::new()))
            .collect::<Vec<serde_json::Value>>();

        let typed_result: Vec<T> =
            serde_json::from_value(serde_json::Value::Array(all_json_values))?;

        Ok(typed_result)
    }
}
use utils::datafusion_ext::{DataFrameExt, JsonValueExt};

let json = serde_json::json!([{
    "date": "2025-06-05",
    "test": "test",
    "price": 1.01,
}]);

let mut df = json.to_df()?;

df = df.with_column("new_col", lit("test".to_string()))?;

#[derive(Default, Debug, Clone, Deserialize, Serialize)]
pub struct TestData {
    date: String,
    test: String,
    price: f64,
    new_col: String,
}

let etfs = df.to_vec::<TestData>().await?;

assert_eq!(
    test_data,
    Vec![
        TestData {
            date: "2025-06-05".to_string(),
            test: "test".to_string(),
            price: 1.01,
            new_col: "test".to_string(),
        },
    ]
);

Deploying `Attic` Nix Binary Cache With Docker Compose, for local use and CI

Server Install

Install docker and docker compose

Example docker-compose.yaml

services:
  attic:
    container_name: attic
    image: ghcr.io/zhaofengli/attic:latest
    command: ["-f", "/attic/server.toml"]
    restart: unless-stopped
    ports:
      - 8080:8080
    networks:
      attic:
      db:
    volumes:
      - ./server.toml:/attic/server.toml
      - attic-data:/attic/storage
    env_file:
      - prod.env
    depends_on:
      db:
        condition: service_healthy
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "wget --no-verbose --tries=1 --spider http://attic:8080 || exit 1",
        ]
      interval: 30s
      timeout: 10s
      retries: 5
      start_period: 60s

  db:
    container_name: db
    image: postgres:17.2-alpine
    restart: unless-stopped
    ports:
      - 5432:5432
    networks:
      db:
    volumes:
      - postgres-data:/var/lib/postgresql/data
    env_file:
      - prod.env
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
      interval: 10s
      timeout: 5s
      retries: 5

volumes:
  attic-data:
  postgres-data:

networks:
  attic:
  db:

Example server.toml

listen = "[::]:8080"

[database]
url = "postgres://attic:attic@db:5432/attic_prod"

[storage]
type = "local"
path = "/attic/storage"

[chunking]
nar-size-threshold = 65536
min-size = 16384
avg-size = 65536
max-size = 262144

[compression]
type = "zstd"

[garbage-collection]
interval = "12 hours"

Example prod.env

POSTGRES_DB=attic_prod
POSTGRES_USER=attic
POSTGRES_PASSWORD=attic
DATABASE_URL=postgres://attic:attic@localhost:5432/attic_prod
ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64="<openssl rand 64 | base64 -w0>"

Exmaple Traefik Label

traefik:
  # ...
  command:
    # ...
    - "--entrypoints.websecure.transport.respondingTimeouts.readTimeout=0s"

attic:
  # ...
  labels:
    - "traefik.enable=true"
    - "traefik.http.routers.attic.rule=Host(`nix.example.com`)"
    - "traefik.http.routers.attic.entrypoints=websecure"
    - "traefik.http.routers.attic.tls.certresolver=myhttpchallenge"
    - "traefik.http.services.attic.loadbalancer.server.port=8080"
    - "traefik.http.routers.attic-http.middlewares=redirect-to-https"
    - "traefik.docker.network=<network name>"

Cloudflare

If you are using cloudflare make the subdomain DNS only

Create the Token

docker compose up

docker exec -it attic sh -c 'atticadm make-token --sub "{{<your username here>}}" --validity "10y" --pull "*" --push "*" --create-cache "*" --configure-cache "*" --configure-cache-retention "*" --destroy-cache "*" --delete "*" -f "./attic/server.toml"'

Check if it works

If working nix.example.com should say attic push

Client Install

Install pkg.attic-client

make sure your user is trusted

nix.settings = {
  trusted-users = [
    "root"
    "<your username here>"
  ];
};
# then login to attic
attic login <pick a name for server> https://nix.example.com <token from just create_token>

# create a cache to push to
attic cache create <cache name>

# use the cache
attic use <cache name>

# pushing to the cache
attic push <cache name> /nix/store/*/

Github Actions Install

Add the token named from just create_token, named ATTIC_TOKEN, to your repository secrets https://github.com/<username>/<repo>/settings/secrets/actions

steps:
  - uses: actions/checkout@v3
  - uses: nixbuild/nix-quick-install-action@v32
    with:
      nix_conf: |
        keep-env-derivations = true
        keep-outputs = true

  # For cacheing the attic package in github actions storage
  - name: Restore Nix store cache
    id: cache-nix-restore
    uses: nix-community/cache-nix-action/restore@v6
    with:
      primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
      restore-prefixes-first-match: nix-${{ runner.os }}-

  - run: nix run -I nixpkgs=channel:nixos-unstable nixpkgs#attic-client login <pick a name for server> https://nix.example.com ${{ secrets.ATTIC_TOKEN }} || true
  - run: nix run -I nixpkgs=channel:nixos-unstable nixpkgs#attic-client cache create <cache name> || true
  - run: nix run -I nixpkgs=channel:nixos-unstable nixpkgs#attic-client use <cache name> || true

  # For cacheing the attic package in github actions storage
  - run: nix build -I nixpkgs=channel:nixos-unstable nixpkgs#nix-fast-build
  - name: Save Nix store cache
    id: cache-nix-save
    uses: nix-community/cache-nix-action/save@v6
    with:
      primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
      gc-max-store-size-linux: 2G
      purge: true
      purge-prefixes: nix-${{ runner.os }}-
      purge-created: 0
      purge-last-accessed: 0
      purge-primary-key: never

  # `nix-fast-build` is faster then `nix flake check` in my testing
  # - run: nix flake check --all-systems
  # `--attic-cache` will fail if the cache is down
  # - run: nix run -I nixpkgs=channel:nixos-unstable nixpkgs#nix-fast-build -- --attic-cache <cache name> --no-nom --skip-cached
  - run: nix run -I nixpkgs=channel:nixos-unstable nixpkgs#nix-fast-build -- --no-nom --skip-cached

  - run: |
      for i in {1..10}; do
        nix run -I nixpkgs=channel:nixos-unstable nixpkgs#attic-client push <cache name> /nix/store/*/ && break || [ $i -eq 5 ] || sleep 5
      done