PostgreSQL Protocol Examples
PostgreSQL Protocol Examples
Practical SQL examples and usage patterns for HeliosDB’s PostgreSQL compatibility.
Connection Examples
Basic Connection
# psql connectionpsql -h localhost -p 5432 -U admin -d mydb
# Connection stringpsql "postgresql://admin:password@localhost:5432/mydb"
# With SSLpsql "postgresql://admin@localhost/mydb?sslmode=require"Python (psycopg2)
import psycopg2
conn = psycopg2.connect( host="localhost", port=5432, database="mydb", user="admin", password="password")
cursor = conn.cursor()cursor.execute("SELECT * FROM users LIMIT 10")rows = cursor.fetchall()conn.close()Node.js (node-postgres)
const { Pool } = require('pg');
const pool = new Pool({ host: 'localhost', port: 5432, database: 'mydb', user: 'admin', password: 'password'});
const result = await pool.query('SELECT * FROM users LIMIT 10');console.log(result.rows);Go (lib/pq)
import ( "database/sql" _ "github.com/lib/pq")
db, err := sql.Open("postgres", "host=localhost port=5432 user=admin password=password dbname=mydb sslmode=disable")if err != nil { log.Fatal(err)}defer db.Close()
rows, err := db.Query("SELECT * FROM users LIMIT 10")DDL Examples
Create Tables
-- Basic tableCREATE TABLE users ( id SERIAL PRIMARY KEY, email VARCHAR(255) UNIQUE NOT NULL, name VARCHAR(100), created_at TIMESTAMP DEFAULT NOW());
-- With constraintsCREATE TABLE orders ( id BIGSERIAL PRIMARY KEY, user_id INTEGER NOT NULL REFERENCES users(id), total DECIMAL(10,2) NOT NULL CHECK (total >= 0), status VARCHAR(20) DEFAULT 'pending', created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW());
-- Partitioned tableCREATE TABLE events ( id BIGSERIAL, event_time TIMESTAMP NOT NULL, event_type VARCHAR(50), data JSONB) PARTITION BY RANGE (event_time);
CREATE TABLE events_2025_01 PARTITION OF events FOR VALUES FROM ('2025-01-01') TO ('2025-02-01');Create Indexes
-- B-tree indexCREATE INDEX idx_users_email ON users(email);
-- Unique indexCREATE UNIQUE INDEX idx_users_email_unique ON users(email);
-- Partial indexCREATE INDEX idx_active_users ON users(email) WHERE status = 'active';
-- Expression indexCREATE INDEX idx_users_lower_email ON users(LOWER(email));
-- GIN index for JSONBCREATE INDEX idx_events_data ON events USING GIN(data);
-- Covering indexCREATE INDEX idx_orders_user_total ON orders(user_id) INCLUDE (total, status);DML Examples
INSERT Operations
-- Simple insertINSERT INTO users (email, name)VALUES ('user@example.com', 'John Doe');
-- Multiple rowsINSERT INTO users (email, name) VALUES ('user1@example.com', 'User 1'), ('user2@example.com', 'User 2'), ('user3@example.com', 'User 3');
-- Insert with RETURNINGINSERT INTO users (email, name)VALUES ('new@example.com', 'New User')RETURNING id, created_at;
-- Upsert (INSERT ON CONFLICT)INSERT INTO users (email, name)VALUES ('user@example.com', 'Updated Name')ON CONFLICT (email)DO UPDATE SET name = EXCLUDED.name, updated_at = NOW();UPDATE Operations
-- Simple updateUPDATE users SET name = 'Jane Doe' WHERE id = 1;
-- Update with FROMUPDATE orders oSET status = 'shipped'FROM users uWHERE o.user_id = u.id AND u.email = 'vip@example.com';
-- Update with RETURNINGUPDATE ordersSET status = 'completed', completed_at = NOW()WHERE id = 100RETURNING id, status, completed_at;DELETE Operations
-- Simple deleteDELETE FROM users WHERE id = 1;
-- Delete with USINGDELETE FROM orders oUSING users uWHERE o.user_id = u.id AND u.status = 'inactive';
-- Delete with RETURNINGDELETE FROM ordersWHERE created_at < NOW() - INTERVAL '1 year'RETURNING id, total;PostgreSQL 17 Features
MERGE with RETURNING
-- MERGE with $action pseudo-columnMERGE INTO inventory iUSING updates u ON i.product_id = u.product_idWHEN MATCHED AND u.quantity = 0 THEN DELETEWHEN MATCHED THEN UPDATE SET quantity = u.quantity, updated_at = NOW()WHEN NOT MATCHED THEN INSERT (product_id, quantity, created_at) VALUES (u.product_id, u.quantity, NOW())RETURNING i.product_id, i.quantity, $action AS operation;
-- Result:-- product_id | quantity | operation-- ------------+----------+------------- 1 | 100 | UPDATE-- 2 | NULL | DELETE-- 3 | 50 | INSERTCOPY with ON_ERROR
-- Skip bad rows, continue loadingCOPY users (id, email, name)FROM '/data/users.csv'WITH ( FORMAT csv, HEADER true, ON_ERROR ignore);
-- Result: Loaded 9,500 of 10,000 rows (500 errors skipped)JSON_TABLE
-- Convert JSON array to rowsSELECT * FROM JSON_TABLE( '[{"name":"Alice","age":30},{"name":"Bob","age":25}]', '$[*]' COLUMNS ( row_num FOR ORDINALITY, name TEXT PATH '$.name', age INTEGER PATH '$.age' )) AS jt;
-- Result:-- row_num | name | age-- ---------+-------+------- 1 | Alice | 30-- 2 | Bob | 25Query Examples
Window Functions
-- Running totalSELECT date, amount, SUM(amount) OVER (ORDER BY date) AS running_totalFROM transactionsORDER BY date;
-- RankingSELECT name, score, RANK() OVER (ORDER BY score DESC) AS rank, DENSE_RANK() OVER (ORDER BY score DESC) AS dense_rankFROM players;
-- Partitioned windowSELECT department, employee, salary, AVG(salary) OVER (PARTITION BY department) AS dept_avg, salary - AVG(salary) OVER (PARTITION BY department) AS vs_dept_avgFROM employees;Common Table Expressions (CTEs)
-- Simple CTEWITH active_users AS ( SELECT * FROM users WHERE status = 'active')SELECT * FROM active_usersWHERE created_at > NOW() - INTERVAL '30 days';
-- Recursive CTE (hierarchy)WITH RECURSIVE org_chart AS ( -- Base case: top-level managers SELECT id, name, manager_id, 1 AS level FROM employees WHERE manager_id IS NULL
UNION ALL
-- Recursive case: subordinates SELECT e.id, e.name, e.manager_id, oc.level + 1 FROM employees e JOIN org_chart oc ON e.manager_id = oc.id)SELECT * FROM org_chart ORDER BY level, name;JSONB Operations
-- Query JSONBSELECT * FROM eventsWHERE data->>'type' = 'purchase' AND (data->'amount')::numeric > 100;
-- JSONB path queriesSELECT * FROM eventsWHERE data @? '$.items[*] ? (@.price > 50)';
-- JSONB aggregationSELECT data->>'category' AS category, COUNT(*), SUM((data->>'amount')::numeric) AS totalFROM eventsGROUP BY data->>'category';
-- Update JSONBUPDATE eventsSET data = jsonb_set(data, '{processed}', 'true')WHERE id = 100;Aggregations
-- GROUPING SETSSELECT COALESCE(region, 'ALL') AS region, COALESCE(product, 'ALL') AS product, SUM(sales) AS total_salesFROM sales_dataGROUP BY GROUPING SETS ( (region, product), (region), (product), ());
-- FILTER clauseSELECT COUNT(*) AS total, COUNT(*) FILTER (WHERE status = 'active') AS active, COUNT(*) FILTER (WHERE status = 'inactive') AS inactiveFROM users;Transaction Examples
-- Basic transactionBEGIN;UPDATE accounts SET balance = balance - 100 WHERE id = 1;UPDATE accounts SET balance = balance + 100 WHERE id = 2;COMMIT;
-- With savepointsBEGIN;INSERT INTO orders (user_id, total) VALUES (1, 100);SAVEPOINT sp1;INSERT INTO order_items (order_id, product_id) VALUES (1, 1);-- Oops, rollback just the itemROLLBACK TO sp1;INSERT INTO order_items (order_id, product_id) VALUES (1, 2);COMMIT;
-- Read-only transactionBEGIN READ ONLY;SELECT * FROM sensitive_data;COMMIT;Bulk Operations
COPY Import
-- Import from CSVCOPY users (email, name, created_at)FROM '/data/users.csv'WITH (FORMAT csv, HEADER true);
-- Import with custom delimiterCOPY orders FROM '/data/orders.tsv'WITH (FORMAT csv, DELIMITER E'\t', HEADER true);
-- Import binaryCOPY large_table FROM '/data/large.bin' WITH (FORMAT binary);COPY Export
-- Export to CSVCOPY (SELECT * FROM users WHERE status = 'active')TO '/tmp/active_users.csv'WITH (FORMAT csv, HEADER true);
-- Export specific columnsCOPY users (id, email)TO '/tmp/user_emails.csv'WITH (FORMAT csv);Migration from PostgreSQL
pg_dump Import
# Dump from PostgreSQLpg_dump -h pg-host -U pguser -d pgdb > backup.sql
# Import to HeliosDBpsql -h helios-host -U admin -d heliosdb < backup.sqlSchema Migration
-- Export schema onlypg_dump -h pg-host -s -d pgdb > schema.sql
-- Import schemapsql -h helios-host -d heliosdb < schema.sql
-- Verify tables\dtRelated: README.md | CONFIGURATION.md | COMPATIBILITY.md
Last Updated: December 2025