Last updated
Twitter Snowflake to Timestamp — Examples
Extract Unix timestamps and human-readable dates from Twitter Snowflake IDs. Every Twitter ID encodes the exact creation time in its structure. Here are complete examples.
Core Conversion
// JavaScript — Snowflake to Unix timestamp
const TWITTER_EPOCH = 1288834974657n;
function snowflakeToTimestamp(idStr) {
const id = BigInt(idStr);
const tsMs = (id >> 22n) + TWITTER_EPOCH;
const date = new Date(Number(tsMs));
return {
unixMs: Number(tsMs),
unixSeconds: Math.floor(Number(tsMs) / 1000),
iso8601: date.toISOString(),
utc: date.toUTCString()
};
}
console.log(snowflakeToTimestamp("1529877576591609861"));
# Python — Snowflake to Unix timestamp
from datetime import datetime, timezone
TWITTER_EPOCH_MS = 1288834974657
def snowflake_to_timestamp(id_str):
id_val = int(id_str)
ts_ms = (id_val >> 22) + TWITTER_EPOCH_MS
dt = datetime.fromtimestamp(ts_ms / 1000, tz=timezone.utc)
return {
"unix_ms": ts_ms,
"unix_s": ts_ms // 1000,
"iso8601": dt.isoformat(),
"utc": dt.strftime("%Y-%m-%d %H:%M:%S UTC")
}
print(snowflake_to_timestamp("1529877576591609861"))
Millisecond Precision
// Snowflake timestamps are precise to the millisecond
// More precise than Twitter's API created_at (second precision)
function getMillisecondDetail(idStr) {
const id = BigInt(idStr);
const TWITTER_EPOCH = 1288834974657n;
const tsMs = Number((id >> 22n) + TWITTER_EPOCH);
const date = new Date(tsMs);
return {
fullTimestamp: date.toISOString(),
millisecond: date.getMilliseconds(),
note: `Precise to ${date.getMilliseconds()}ms within the second`
};
}
console.log(getMillisecondDetail("1529877576591609861"));
Validating Dataset Consistency
# Python — verify that tweet IDs are in chronological order
# IDs should increase monotonically with time
TWITTER_EPOCH_MS = 1288834974657
def validate_chronological_order(tweet_ids):
"""
Check that tweet IDs are in chronological order.
If not, there may be a data quality issue.
"""
timestamps = [(id_str, (int(id_str) >> 22) + TWITTER_EPOCH_MS) for id_str in tweet_ids]
issues = []
for i in range(1, len(timestamps)):
if timestamps[i][1] < timestamps[i-1][1]:
issues.append({
"position": i,
"id": timestamps[i][0],
"previous_id": timestamps[i-1][0],
"note": "ID is older than the previous ID"
})
return {
"total": len(tweet_ids),
"issues": len(issues),
"details": issues
}
ids = ["1529877576591609861", "1700000000000000000", "1800000000000000000"]
result = validate_chronological_order(ids)
print(f"Issues found: {result['issues']}")
Polling Optimization with IDs
# Python — use last-seen ID for efficient polling
# More reliable than timestamp-based polling
import requests
from datetime import datetime, timezone
class TwitterPoller:
def __init__(self, bearer_token):
self.bearer_token = bearer_token
self.last_seen_id = None
def poll(self, query):
headers = {"Authorization": f"Bearer {self.bearer_token}"}
params = {
"query": query,
"max_results": 100,
"tweet.fields": "created_at,author_id"
}
# Use last_seen_id instead of a timestamp
# This avoids issues with duplicate timestamps and clock skew
if self.last_seen_id:
params["since_id"] = self.last_seen_id
response = requests.get(
"https://api.twitter.com/2/tweets/search/recent",
headers=headers,
params=params
)
data = response.json()
tweets = data.get("data", [])
if tweets:
# Store the highest ID as the new cursor
self.last_seen_id = max(tweets, key=lambda t: int(t["id"]))["id"]
return tweets
Viral Content Analysis
// Analyze response timing for viral content
// Millisecond precision enables fine-grained analysis
function analyzeResponseTiming(originalId, responseIds) {
const TWITTER_EPOCH = 1288834974657n;
const originalTs = Number((BigInt(originalId) >> 22n) + TWITTER_EPOCH);
const responses = responseIds.map(id => {
const ts = Number((BigInt(id) >> 22n) + TWITTER_EPOCH);
return {
id,
delayMs: ts - originalTs,
delaySeconds: (ts - originalTs) / 1000
};
});
responses.sort((a, b) => a.delayMs - b.delayMs);
return {
originalId,
originalDate: new Date(originalTs).toISOString(),
firstResponseDelayMs: responses[0]?.delayMs,
responses
};
}
const original = "1529877576591609861";
const replies = ["1529877576591609900", "1529877576591609950"];
console.log(analyzeResponseTiming(original, replies));
Machine ID Analysis
# Python — analyze machine ID distribution in a dataset
# Reveals Twitter's load balancing patterns
def analyze_machine_ids(tweet_ids):
"""
Analyze the distribution of datacenter and worker IDs.
High-traffic events show shifts in machine ID distribution.
"""
from collections import Counter
datacenter_counts = Counter()
worker_counts = Counter()
for id_str in tweet_ids:
id_val = int(id_str)
datacenter_id = (id_val >> 17) & 0x1F
worker_id = (id_val >> 12) & 0x1F
datacenter_counts[datacenter_id] += 1
worker_counts[worker_id] += 1
return {
"total_ids": len(tweet_ids),
"unique_datacenters": len(datacenter_counts),
"unique_workers": len(worker_counts),
"top_datacenters": datacenter_counts.most_common(5),
"top_workers": worker_counts.most_common(5)
}
ids = ["1529877576591609861", "1700000000000000000", "1800000000000000000"]
print(analyze_machine_ids(ids))
The Snowflake to Timestamp conversion is the foundation for all time-based analysis of Twitter data. Whether you need Unix timestamps for database storage, ISO 8601 strings for APIs, or human-readable dates for display, the same simple algorithm extracts the precise millisecond timestamp from any Twitter Snowflake ID.
Code Examples
JavaScript
const tweetId = 1382350606417817604n; const twitterEpoch = 1288834974657n; const timestamp = (tweetId >> 22n) + twitterEpoch; console.log(Number(timestamp)); // 1618592259657
Python
tweet_id = 1382350606417817604 twitter_epoch = 1288834974657 timestamp = (tweet_id >> 22) + twitter_epoch print(timestamp) # 1618592259657
Common Use Cases
- Tweet Scraping: Extract timestamps without hitting API rate limits
- Engagement Analysis: Correlate posting times with engagement metrics
- Bot Detection: Identify suspicious posting patterns from ID sequences
- Archive Building: Create chronological tweet archives from ID lists
- Trend Analysis: Map tweet volumes over time using ID timestamps
Common Errors & Solutions
Error: "Invalid snowflake ID"
Cause: ID contains non-numeric characters or is too short
Solution: Remove any spaces, letters, or special characters. Valid IDs are 15-19 digits.
Example: abc123 → Invalid | 1382350606417817604 → Valid
Error: "Date seems incorrect"
Cause: Wrong platform epoch being used
Solution: Verify you're using the correct converter for your platform (Twitter).
Tip: Twitter uses epoch: 1288834974657 ms
Error: "Date is in the future"
Cause: ID is too large, corrupted, or not a snowflake ID
Solution: Verify the ID is correct and from Twitter.
Performance & Best Practices
Batch Processing
Processing multiple IDs? Use array mapping for efficiency:
const ids = ['1382350606417817604', '1383000000000000000'];
const timestamps = ids.map(id =>
Number((BigInt(id) >> 22n) + 1288834974657n)
);
Input Validation
Always validate before converting to prevent errors:
function validateSnowflake(id) {
if (!/^\d{15,19}$/.test(id)) {
throw new Error('Invalid snowflake ID');
}
return true;
}
Frequently Asked Questions
Can I get the exact millisecond a tweet was posted?
Yes! Twitter snowflake IDs encode millisecond-precision timestamps. Our converter extracts the exact millisecond the tweet was created on Twitter's servers.
Do X (formerly Twitter) IDs use the same format?
Yes, X continues to use the same snowflake ID format with the same epoch (November 4, 2010). All tweet IDs from both Twitter and X can be converted using this tool.
Why is Twitter's epoch November 4, 2010?
Twitter launched snowflake IDs on November 4, 2010, replacing sequential IDs. This date became the epoch for all future tweet IDs.
Can I convert timestamps back to tweet IDs?
Not exactly. While you can create a snowflake ID from a timestamp, you can't recreate the original tweet ID because it also contains worker ID and sequence number.