Last updated
Twitter Snowflake ID Java Implementation
Complete Java implementation for working with Twitter Snowflake IDs. Includes production-ready code for decoding existing IDs and generating new ones with thread-safety guarantees.
Key Features
Thread-Safe
Synchronized methods prevent race conditions
Twitter Snowflake ID in Java — Examples
This page provides Java code for decoding Twitter Snowflake IDs. All examples use modern Java (8+) with the java.time API and are production-ready.
Basic Decoder
// Java — basic Twitter Snowflake ID decoder
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
public class TwitterSnowflake {
// Twitter's epoch: November 4, 2010 01:42:54.657 UTC
private static final long TWITTER_EPOCH_MS = 1288834974657L;
public static ZonedDateTime decode(long id) {
long timestampMs = (id >>> 22) + TWITTER_EPOCH_MS;
return ZonedDateTime.ofInstant(
Instant.ofEpochMilli(timestampMs),
ZoneOffset.UTC
);
}
public static int getDatacenterId(long id) { return (int)((id >>> 17) & 0x1F); }
public static int getWorkerId(long id) { return (int)((id >>> 12) & 0x1F); }
public static int getSequence(long id) { return (int)(id & 0xFFF); }
public static void main(String[] args) {
long tweetId = 1529877576591609861L;
ZonedDateTime date = decode(tweetId);
System.out.println("Date (UTC): " + date.format(DateTimeFormatter.ISO_ZONED_DATE_TIME));
System.out.println("Unix (s): " + date.toEpochSecond());
System.out.println("Datacenter: " + getDatacenterId(tweetId));
System.out.println("Worker: " + getWorkerId(tweetId));
System.out.println("Sequence: " + getSequence(tweetId));
}
}
Full Decoder Class with Record
// Java 16+ — using records for structured output
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
public record SnowflakeComponents(
long id,
ZonedDateTime createdAt,
long timestampMs,
int datacenterId,
int workerId,
int sequence
) {}
public class TwitterSnowflake {
private static final long TWITTER_EPOCH_MS = 1288834974657L;
public static SnowflakeComponents decode(long id) {
long timestampMs = (id >>> 22) + TWITTER_EPOCH_MS;
ZonedDateTime createdAt = ZonedDateTime.ofInstant(
Instant.ofEpochMilli(timestampMs),
ZoneOffset.UTC
);
return new SnowflakeComponents(
id,
createdAt,
timestampMs,
(int)((id >>> 17) & 0x1F),
(int)((id >>> 12) & 0x1F),
(int)(id & 0xFFF)
);
}
// Reverse: convert a date to the minimum Snowflake ID for that moment
public static long dateToMinId(ZonedDateTime date) {
long tsMs = date.toInstant().toEpochMilli() - TWITTER_EPOCH_MS;
if (tsMs < 0) throw new IllegalArgumentException("Date is before Twitter's Snowflake epoch");
return tsMs << 22;
}
}
Important: Use >>> Not >>
// Java has two right-shift operators:
// >> (signed) — fills with sign bit (1 for negative numbers)
// >>> (unsigned) — always fills with 0
// Twitter IDs are positive longs, but using >> is still risky
// Always use >>> for Snowflake decoding
long id = 1529877576591609861L;
// CORRECT — unsigned right shift
long timestampMs = (id >>> 22) + 1288834974657L;
// POTENTIALLY WRONG — signed right shift
// (works for positive IDs, but >>> is the safe choice)
long timestampMsWrong = (id >> 22) + 1288834974657L;
Batch Processing with Streams
// Java — batch decode using streams
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.time.ZonedDateTime;
public class BatchDecoder {
private static final long TWITTER_EPOCH_MS = 1288834974657L;
public static Map<Long, ZonedDateTime> batchDecode(List<Long> ids) {
return ids.stream()
.collect(Collectors.toMap(
id -> id,
id -> {
long tsMs = (id >>> 22) + TWITTER_EPOCH_MS;
return ZonedDateTime.ofInstant(
java.time.Instant.ofEpochMilli(tsMs),
java.time.ZoneOffset.UTC
);
}
));
}
public static void main(String[] args) {
var ids = List.of(
1529877576591609861L,
1700000000000000000L,
1800000000000000000L
);
var decoded = batchDecode(ids);
decoded.forEach((id, date) ->
System.out.printf("%d → %s%n", id, date)
);
}
}
Filtering by Date Range
// Java — filter a list of tweet IDs by date range
import java.time.ZonedDateTime;
import java.time.ZoneOffset;
import java.util.List;
import java.util.stream.Collectors;
public class DateRangeFilter {
private static final long TWITTER_EPOCH_MS = 1288834974657L;
public static ZonedDateTime decodeDate(long id) {
long tsMs = (id >>> 22) + TWITTER_EPOCH_MS;
return ZonedDateTime.ofInstant(
java.time.Instant.ofEpochMilli(tsMs),
ZoneOffset.UTC
);
}
public static List<Long> filterByDateRange(
List<Long> ids,
ZonedDateTime start,
ZonedDateTime end
) {
return ids.stream()
.filter(id -> {
ZonedDateTime date = decodeDate(id);
return !date.isBefore(start) && !date.isAfter(end);
})
.sorted() // IDs are chronologically ordered
.collect(Collectors.toList());
}
}
JUnit 5 Tests
// Java — JUnit 5 tests for the Snowflake decoder
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
import java.time.ZonedDateTime;
import java.time.ZoneOffset;
class TwitterSnowflakeTest {
@Test
void decode_knownId_returnsCorrectYear() {
long id = 1529877576591609861L;
ZonedDateTime date = TwitterSnowflake.decode(id);
assertEquals(2022, date.getYear());
}
@Test
void decode_extractsValidComponents() {
long id = 1529877576591609861L;
SnowflakeComponents c = TwitterSnowflake.decode(id);
assertTrue(c.datacenterId() >= 0 && c.datacenterId() <= 31);
assertTrue(c.workerId() >= 0 && c.workerId() <= 31);
assertTrue(c.sequence() >= 0 && c.sequence() <= 4095);
}
@Test
void dateToMinId_roundTrip_isConsistent() {
ZonedDateTime date = ZonedDateTime.of(2023, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
long minId = TwitterSnowflake.dateToMinId(date);
ZonedDateTime decoded = TwitterSnowflake.decode(minId);
// Should be within 1ms
long diffMs = Math.abs(
decoded.toInstant().toEpochMilli() - date.toInstant().toEpochMilli()
);
assertTrue(diffMs < 1);
}
}
Parallel Processing for Large Datasets
// Java — parallel batch decode using parallel streams
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.time.ZonedDateTime;
public static Map<Long, ZonedDateTime> parallelBatchDecode(List<Long> ids) {
Map<Long, ZonedDateTime> results = new ConcurrentHashMap<>();
ids.parallelStream().forEach(id -> {
long tsMs = (id >>> 22) + 1288834974657L;
ZonedDateTime date = ZonedDateTime.ofInstant(
java.time.Instant.ofEpochMilli(tsMs),
java.time.ZoneOffset.UTC
);
results.put(id, date);
});
return results;
}
These Java examples cover all common use cases for Twitter Snowflake ID decoding. The key points: use long for IDs (Java handles 64-bit integers natively), use >>> (unsigned right shift), and use the java.time API for date handling.
Production Ready
Error handling and validation included
High Performance
Efficient bit operations for speed
Easy to Use
Simple API with clear examples
Maven Dependencies
Best Practices
- Use
longprimitive type for Snowflake IDs (notint) - Always use synchronized methods for ID generation
- Handle clock backwards scenarios appropriately
- Use unique worker and process IDs in distributed systems
- Consider using dependency injection for generator instances
- Add logging for debugging in production environments
Common Use Cases
- Decoding Twitter API responses
- Building Twitter integration services
- Implementing custom Snowflake ID systems
- Analyzing Twitter data chronologically
- Creating distributed ID generators
Frequently Asked Questions
Use bit shifting to extract components: timestamp = (id >> 22) + TWITTER_EPOCH, workerId = (id >> 17) & 0x1F, processId = (id >> 12) & 0x1F, sequence = id & 0xFFF. Twitter's epoch is 1288834974657L milliseconds. Convert timestamp to Date using new Date(timestamp).
Use Java's long primitive type for Twitter Snowflake IDs. Snowflake IDs are 64-bit integers that fit perfectly in Java's long (64-bit signed integer). Avoid using int (32-bit) as it's too small.
Generate Snowflake IDs by combining: (timestamp - TWITTER_EPOCH) shifted left 22 bits, workerId shifted left 17 bits, processId shifted left 12 bits, then OR with sequence. Ensure thread-safety with synchronized methods and increment sequence for IDs generated in the same millisecond.
You must implement thread-safety manually. Use synchronized methods or java.util.concurrent locks to prevent race conditions when generating IDs. Track the last timestamp and sequence number to ensure uniqueness across threads.