Cassandra CQL Examples
Cassandra CQL Examples
Practical CQL examples and usage patterns for HeliosDB’s Cassandra compatibility.
Connection Examples
Python (cassandra-driver)
from cassandra.cluster import Clusterfrom cassandra.auth import PlainTextAuthProvider
# Basic connectioncluster = Cluster(['localhost'])session = cluster.connect()
# With authenticationauth = PlainTextAuthProvider(username='admin', password='password')cluster = Cluster(['localhost'], auth_provider=auth)session = cluster.connect('my_keyspace')Java (DataStax Driver)
import com.datastax.oss.driver.api.core.CqlSession;
CqlSession session = CqlSession.builder() .addContactPoint(new InetSocketAddress("localhost", 9042)) .withLocalDatacenter("datacenter1") .withKeyspace("my_keyspace") .build();Node.js
const cassandra = require('cassandra-driver');
const client = new cassandra.Client({ contactPoints: ['localhost'], localDataCenter: 'datacenter1', keyspace: 'my_keyspace'});
await client.connect();cqlsh
# Connectcqlsh localhost 9042
# Connect with credentialscqlsh localhost 9042 -u admin -p password
# Execute filecqlsh -f schema.cqlKeyspace Management
Create Keyspace
-- Simple replicationCREATE KEYSPACE my_keyspace WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 3};
-- Network topologyCREATE KEYSPACE production WITH REPLICATION = { 'class': 'NetworkTopologyStrategy', 'dc1': 3, 'dc2': 2};
-- With durable writesCREATE KEYSPACE my_keyspace WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1} AND DURABLE_WRITES = true;Modify Keyspace
-- Alter replicationALTER KEYSPACE my_keyspace WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 5};
-- Drop keyspaceDROP KEYSPACE IF EXISTS my_keyspace;Table Management
Create Tables
-- Basic tableCREATE TABLE users ( user_id UUID PRIMARY KEY, username TEXT, email TEXT, created_at TIMESTAMP);
-- With clustering columnsCREATE TABLE user_events ( user_id UUID, event_time TIMESTAMP, event_type TEXT, event_data MAP<TEXT, TEXT>, PRIMARY KEY ((user_id), event_time)) WITH CLUSTERING ORDER BY (event_time DESC);
-- Compound partition keyCREATE TABLE sensor_data ( sensor_id UUID, day DATE, reading_time TIMESTAMP, value DOUBLE, PRIMARY KEY ((sensor_id, day), reading_time));
-- With TTLCREATE TABLE session_data ( session_id UUID PRIMARY KEY, user_id UUID, data BLOB) WITH default_time_to_live = 3600; -- 1 hour
-- With collectionsCREATE TABLE user_profiles ( user_id UUID PRIMARY KEY, name TEXT, tags SET<TEXT>, preferences MAP<TEXT, TEXT>, addresses LIST<FROZEN<address_type>>);User-Defined Types
-- Create typeCREATE TYPE address_type ( street TEXT, city TEXT, zip TEXT, country TEXT);
-- Use in tableCREATE TABLE contacts ( id UUID PRIMARY KEY, name TEXT, home_address FROZEN<address_type>, work_address FROZEN<address_type>);Indexes
-- Secondary indexCREATE INDEX ON users (email);
-- Custom index nameCREATE INDEX users_by_username ON users (username);
-- Drop indexDROP INDEX users_by_username;CRUD Operations
INSERT
-- Basic insertINSERT INTO users (user_id, username, email, created_at)VALUES (uuid(), 'alice', 'alice@example.com', toTimestamp(now()));
-- With TTLINSERT INTO users (user_id, username, email)VALUES (uuid(), 'bob', 'bob@example.com')USING TTL 86400;
-- With timestampINSERT INTO users (user_id, username, email)VALUES (uuid(), 'charlie', 'charlie@example.com')USING TIMESTAMP 1640000000000000;
-- If not exists (LWT)INSERT INTO users (user_id, username, email)VALUES (123e4567-e89b-12d3-a456-426614174000, 'unique', 'unique@example.com')IF NOT EXISTS;
-- Insert JSONINSERT INTO users JSON '{"user_id": "550e8400-e29b-41d4-a716-446655440000", "username": "json_user", "email": "json@example.com"}';SELECT
-- Basic selectSELECT * FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- Select specific columnsSELECT username, email FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- With ordering (clustering)SELECT * FROM user_eventsWHERE user_id = 550e8400-e29b-41d4-a716-446655440000ORDER BY event_time DESCLIMIT 10;
-- Time rangeSELECT * FROM user_eventsWHERE user_id = 550e8400-e29b-41d4-a716-446655440000AND event_time >= '2024-01-01'AND event_time < '2024-02-01';
-- IN clauseSELECT * FROM usersWHERE user_id IN ( 550e8400-e29b-41d4-a716-446655440000, 550e8400-e29b-41d4-a716-446655440001);
-- Token range scanSELECT * FROM usersWHERE token(user_id) > token(550e8400-e29b-41d4-a716-446655440000)LIMIT 100;
-- Allow filteringSELECT * FROM users WHERE email = 'alice@example.com' ALLOW FILTERING;
-- As JSONSELECT JSON * FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- FunctionsSELECT user_id, TTL(username), WRITETIME(username) FROM users;UPDATE
-- Basic updateUPDATE users SET email = 'new@example.com'WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- Multiple columnsUPDATE usersSET email = 'new@example.com', username = 'new_name'WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- With TTLUPDATE users USING TTL 86400SET email = 'temp@example.com'WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- Conditional update (LWT)UPDATE usersSET email = 'verified@example.com'WHERE user_id = 550e8400-e29b-41d4-a716-446655440000IF email = 'unverified@example.com';
-- Update collectionsUPDATE user_profilesSET tags = tags + {'premium'}WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
UPDATE user_profilesSET preferences['theme'] = 'dark'WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;DELETE
-- Delete rowDELETE FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- Delete columnDELETE email FROM usersWHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
-- Conditional delete (LWT)DELETE FROM usersWHERE user_id = 550e8400-e29b-41d4-a716-446655440000IF EXISTS;
-- Delete with conditionDELETE FROM usersWHERE user_id = 550e8400-e29b-41d4-a716-446655440000IF email = 'delete@example.com';
-- Delete from collectionUPDATE user_profilesSET tags = tags - {'old_tag'}WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;Batch Operations
-- Logged batch (atomic)BEGIN BATCH INSERT INTO users (user_id, username, email) VALUES (uuid(), 'user1', 'user1@example.com');
INSERT INTO user_events (user_id, event_time, event_type) VALUES (550e8400-e29b-41d4-a716-446655440000, toTimestamp(now()), 'signup');APPLY BATCH;
-- Unlogged batchBEGIN UNLOGGED BATCH UPDATE users SET email = 'new1@example.com' WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;
UPDATE users SET email = 'new2@example.com' WHERE user_id = 550e8400-e29b-41d4-a716-446655440001;APPLY BATCH;
-- Counter batchBEGIN COUNTER BATCH UPDATE page_views SET count = count + 1 WHERE page_id = 'home';
UPDATE page_views SET count = count + 1 WHERE page_id = 'about';APPLY BATCH;Prepared Statements
Python
# Prepare statementprepared = session.prepare( "INSERT INTO users (user_id, username, email) VALUES (?, ?, ?)")
# Execute with parameterssession.execute(prepared, [uuid.uuid4(), 'alice', 'alice@example.com'])
# Batch with preparedfrom cassandra.query import BatchStatement, BatchType
batch = BatchStatement(batch_type=BatchType.LOGGED)for user in users: batch.add(prepared, [user.id, user.name, user.email])session.execute(batch)Java
PreparedStatement prepared = session.prepare( "INSERT INTO users (user_id, username, email) VALUES (?, ?, ?)");
BoundStatement bound = prepared.bind( UUID.randomUUID(), "alice", "alice@example.com");session.execute(bound);Aggregations
-- CountSELECT COUNT(*) FROM users;
-- Sum and averageSELECT SUM(amount), AVG(amount) FROM transactionsWHERE account_id = 550e8400-e29b-41d4-a716-446655440000;
-- Min and maxSELECT MIN(created_at), MAX(created_at) FROM eventsWHERE user_id = 550e8400-e29b-41d4-a716-446655440000;Time Series Examples
Create Time Series Table
CREATE TABLE sensor_readings ( sensor_id UUID, bucket_date DATE, reading_time TIMESTAMP, temperature DOUBLE, humidity DOUBLE, PRIMARY KEY ((sensor_id, bucket_date), reading_time)) WITH CLUSTERING ORDER BY (reading_time DESC);Insert Readings
INSERT INTO sensor_readings (sensor_id, bucket_date, reading_time, temperature, humidity)VALUES ( 550e8400-e29b-41d4-a716-446655440000, toDate(now()), toTimestamp(now()), 23.5, 65.2);Query Patterns
-- Latest readingsSELECT * FROM sensor_readingsWHERE sensor_id = 550e8400-e29b-41d4-a716-446655440000AND bucket_date = '2024-12-01'LIMIT 100;
-- Time rangeSELECT * FROM sensor_readingsWHERE sensor_id = 550e8400-e29b-41d4-a716-446655440000AND bucket_date = '2024-12-01'AND reading_time >= '2024-12-01 00:00:00'AND reading_time < '2024-12-01 12:00:00';
-- AggregationsSELECT AVG(temperature), MAX(temperature), MIN(temperature)FROM sensor_readingsWHERE sensor_id = 550e8400-e29b-41d4-a716-446655440000AND bucket_date = '2024-12-01';Materialized Views
-- Create viewCREATE MATERIALIZED VIEW users_by_email AS SELECT * FROM users WHERE email IS NOT NULL AND user_id IS NOT NULL PRIMARY KEY (email, user_id);
-- Query viewSELECT * FROM users_by_email WHERE email = 'alice@example.com';Migration from Cassandra
Schema Export
# Export schema from Cassandracqlsh -e "DESCRIBE KEYSPACE my_keyspace" > schema.cql
# Import to HeliosDBcqlsh heliosdb-host -f schema.cqlData Migration
from cassandra.cluster import Cluster
# Source (Cassandra)source = Cluster(['cassandra-host']).connect('my_keyspace')
# Target (HeliosDB)target = Cluster(['heliosdb-host']).connect('my_keyspace')
# Migrate datarows = source.execute("SELECT * FROM users")prepared = target.prepare( "INSERT INTO users (user_id, username, email, created_at) VALUES (?, ?, ?, ?)")
for row in rows: target.execute(prepared, [row.user_id, row.username, row.email, row.created_at])Related: README.md | CONFIGURATION.md | COMPATIBILITY.md
Last Updated: December 2025