Use this file to discover all available pages before exploring further.
DuckDB provides multiple convenient methods for importing data from various file formats. Data import is designed to be simple and intuitive, often requiring just a single SQL statement.
-- Insert from another tableINSERT INTO test2SELECT * FROM test WHERE a > 100;-- Insert from CSV fileINSERT INTO usersSELECT * FROM 'users.csv';-- Insert with transformationINSERT INTO usersSELECT id, UPPER(name) AS name, emailFROM 'users.csv'WHERE email IS NOT NULL;
SELECT * FROM read_parquet('data.parquet');-- Multiple filesSELECT * FROM read_parquet(['file1.parquet', 'file2.parquet']);-- Glob patternSELECT * FROM read_parquet('data/**/*.parquet');
-- Ignore rows with errorsSELECT * FROM read_csv('data.csv', ignore_errors = TRUE);-- Pad missing columns with NULLSELECT * FROM read_csv('data.csv', null_padding = TRUE);
CREATE TABLE users_age ( id INTEGER NOT NULL, name VARCHAR(10) NOT NULL, email VARCHAR, age INTEGER);INSERT INTO users_ageSELECT *FROM read_csv( 'data/*.csv', union_by_name = TRUE, ignore_errors = TRUE);
DuckDB automatically detects types from files, but you can also specify them explicitly:
-- Automatic type detectionSELECT * FROM 'data.csv';-- Explicit type specificationSELECT * FROM read_csv('data.csv', columns = { 'id': 'INTEGER', 'created_at': 'TIMESTAMP', 'amount': 'DECIMAL(10,2)'});-- Type pushdown from target tableCREATE TABLE timestamps(ts TIMESTAMP, dt DATE);INSERT INTO timestampsSELECT ts, ts FROM read_csv('timestamp.csv');
When inserting into a table, DuckDB can push down type information to the CSV reader, improving performance and error handling.