Performance tuning: Don't fetch guest token cache with elongator

This commit is contained in:
dangered wolf 2023-08-22 01:45:28 -04:00
parent 2cd3523ef7
commit 610fb60d49
No known key found for this signature in database
GPG key ID: 41E4D37680ED8B58
2 changed files with 28 additions and 12 deletions

View file

@ -8,7 +8,13 @@ let wasElongatorDisabled = false;
declare const globalThis: { declare const globalThis: {
fetchCompletedTime: number; fetchCompletedTime: number;
} };
const generateSnowflake = () => {
const epoch = 1288834974657n; /* Twitter snowflake epoch */
const timestamp = BigInt(Date.now()) - epoch;
return String((timestamp << 22n) | 696969n);
};
globalThis.fetchCompletedTime = 0; globalThis.fetchCompletedTime = 0;
@ -76,8 +82,12 @@ export const twitterFetch = async (
let activate: Response | null = null; let activate: Response | null = null;
if (!newTokenGenerated) { if (!newTokenGenerated && !useElongator) {
const timeBefore = performance.now();
const cachedResponse = await cache.match(guestTokenRequestCacheDummy.clone()); const cachedResponse = await cache.match(guestTokenRequestCacheDummy.clone());
const timeAfter = performance.now();
console.log(`Searched cache for token, took ${timeAfter - timeBefore}ms`);
if (cachedResponse) { if (cachedResponse) {
console.log('Token cache hit'); console.log('Token cache hit');
@ -88,7 +98,7 @@ export const twitterFetch = async (
} }
} }
if (newTokenGenerated || activate === null) { if (newTokenGenerated || (activate === null && !useElongator)) {
/* We have a guest token that we can use to call API /* We have a guest token that we can use to call API
AFAIK there is no limit to how many guest tokens you can request. AFAIK there is no limit to how many guest tokens you can request.
@ -110,10 +120,10 @@ export const twitterFetch = async (
continue; continue;
} }
const guestToken = activateJson.guest_token; /* Elongator doesn't need guestToken, so we just make up a snowflake */
const guestToken = activateJson?.guest_token || generateSnowflake();
console.log(newTokenGenerated ? 'Activated guest:' : 'Using guest:', activateJson); console.log(newTokenGenerated ? 'Activated guest:' : 'Using guest:', activateJson);
console.log('Guest token:', guestToken);
/* Just some cookies to mimick what the Twitter Web App would send */ /* Just some cookies to mimick what the Twitter Web App would send */
headers['Cookie'] = [ headers['Cookie'] = [
@ -139,7 +149,9 @@ export const twitterFetch = async (
headers: headers headers: headers
}); });
const performanceEnd = performance.now(); const performanceEnd = performance.now();
console.log(`Elongator request successful after ${performanceEnd - performanceStart}ms`); console.log(
`Elongator request successful after ${performanceEnd - performanceStart}ms`
);
} else { } else {
const performanceStart = performance.now(); const performanceStart = performance.now();
apiRequest = await fetch(url, { apiRequest = await fetch(url, {
@ -147,7 +159,9 @@ export const twitterFetch = async (
headers: headers headers: headers
}); });
const performanceEnd = performance.now(); const performanceEnd = performance.now();
console.log(`Guest API request successful after ${performanceEnd - performanceStart}ms`); console.log(
`Guest API request successful after ${performanceEnd - performanceStart}ms`
);
} }
response = await apiRequest?.json(); response = await apiRequest?.json();
@ -208,7 +222,7 @@ export const twitterFetch = async (
continue; continue;
} }
/* If we've generated a new token, we'll cache it */ /* If we've generated a new token, we'll cache it */
if (event && newTokenGenerated) { if (event && newTokenGenerated && activate) {
const cachingResponse = new Response(await activate.clone().text(), { const cachingResponse = new Response(await activate.clone().text(), {
headers: { headers: {
...tokenHeaders, ...tokenHeaders,

View file

@ -12,7 +12,7 @@ import { handleProfile } from './user';
declare const globalThis: { declare const globalThis: {
fetchCompletedTime: number; fetchCompletedTime: number;
} };
const router = Router(); const router = Router();
@ -29,8 +29,8 @@ const statusRequest = async (
/* Let's return our HTML version for wayback machine (we can add other archivers too in future) */ /* Let's return our HTML version for wayback machine (we can add other archivers too in future) */
if ( if (
['archive.org', 'Wayback Machine'].some(service => ['archive.org', 'Wayback Machine'].some(
request.headers.get('Via')?.includes?.(service) service => request.headers.get('Via')?.includes?.(service)
) )
) { ) {
console.log('Request from archive.org'); console.log('Request from archive.org');
@ -450,7 +450,9 @@ export const cacheWrapper = async (
const endTime = performance.now(); const endTime = performance.now();
const timeSinceFetch = endTime - (globalThis.fetchCompletedTime || 0); const timeSinceFetch = endTime - (globalThis.fetchCompletedTime || 0);
const timeSinceStart = endTime - startTime; const timeSinceStart = endTime - startTime;
console.log(`Request took ${timeSinceStart}ms, of which ${timeSinceFetch}ms was CPU time after last fetch`); console.log(
`Request took ${timeSinceStart}ms, of which ${timeSinceFetch}ms was CPU time after last fetch`
);
return response; return response;
/* Telegram sends this from Webpage Bot, and Cloudflare sends it if we purge cache, and we respect it. /* Telegram sends this from Webpage Bot, and Cloudflare sends it if we purge cache, and we respect it.