Felix || TM9657
Felix || TM9657
CDCloudflare Developers
Created by Felix || TM9657 on 11/11/2023 in #workers-help
The script will never generate a response.
I have written a worker that uses langchainjs.
ctx.waitUntil(requestModel.call(chat));
return new CorsResponse(readable).finalize(request);
ctx.waitUntil(requestModel.call(chat));
return new CorsResponse(readable).finalize(request);
The first execution per worker works, after that I am getting the error "The script will never generate a response" Any ideas?
2 replies
CDCloudflare Developers
Created by Felix || TM9657 on 5/21/2023 in #workers-help
Your Worker failed validation because it exceeded startup limits. Global Scope.
I love developing with workers, however I continue to get errors about the startup limits (CPU time). They seem quite random and hard to debug. Pushing multiple times sometimes works. Coming from AWS Lambda, the global scope is no bad place to cache certain data. How should we do that in Cloudflare workers? E.g. Reusing Database Connections etc. Let´s take this code, it wont deploy, however I am not doing anything in the global scope at coldstart:
import {
AICache,
authorize,
CorsResponse,
OpenAIStreamHandler,
} from "@tm9657/backend-worker";
import GPT3Tokenizer from "gpt3-tokenizer";
import { Configuration, OpenAIApi } from "openai";

export interface Env {
DATABASE_URL: string;
OPENAI_KEY: string;
}

type AIRequestBody = {
prompt: string;
system?: string;
top_p?: number;
frequency_penalty?: number;
max_tokens?: number;
};

let cache: AICache | null = null;

export default {
async fetch(
request: Request,
env: Env,
ctx: ExecutionContext
): Promise<Response> {
const auth = await authorize(request);
if (!auth) return new CorsResponse("Unauthorized", 401).finalize(request);

const body: AIRequestBody = await request.json();
if (!body || !body?.prompt)
return new CorsResponse("Bad Request", 400).finalize(request);
if (!cache) cache = new AICache();
cache.init(env.DATABASE_URL);
let openai: OpenAIApi = new OpenAIApi(
new Configuration({
apiKey: env.OPENAI_KEY,
})
);

if (!openai)
return new CorsResponse("Internal Server Error", 500).finalize(request);

const usage = await cache.getUsage(auth.sub);
if (!usage.getHardLimit())
return new CorsResponse(
"Monthly API limit hit, please upgrade your subscription!",
429
).finalize(request);

const tokenizer = new GPT3Tokenizer({ type: "gpt3" });
const { readable, writable } = new TransformStream();

const openAIRequest = await openai.createChatCompletion(
{
model: "gpt-3.5-turbo",
messages: [
{ role: "system", content: body.system || "" },
{ role: "user", content: `${body.prompt}` },
],
top_p: body.top_p || 0.05,
max_tokens:
4096 - tokenizer.encode(`${body.prompt} ${body.system}`).bpe.length,
user: auth.sub,
frequency_penalty: body.frequency_penalty || 1.0,
stream: true,
},
{ responseType: "stream" }
);

const writableStream = writable.getWriter();

let total = "";
const handler = new OpenAIStreamHandler(
openAIRequest,
async (message) => {
total += message;
await writableStream.write({ total, message });
},
async () => {
await writableStream.close();
await cache?.updateUsage(
auth.sub,
"chatGPT",
tokenizer.encode(`${body.prompt} ${body.system} ${total}`).bpe.length
);
}
).promise();

return new CorsResponse(readable).finalize(request);
},
};
import {
AICache,
authorize,
CorsResponse,
OpenAIStreamHandler,
} from "@tm9657/backend-worker";
import GPT3Tokenizer from "gpt3-tokenizer";
import { Configuration, OpenAIApi } from "openai";

export interface Env {
DATABASE_URL: string;
OPENAI_KEY: string;
}

type AIRequestBody = {
prompt: string;
system?: string;
top_p?: number;
frequency_penalty?: number;
max_tokens?: number;
};

let cache: AICache | null = null;

export default {
async fetch(
request: Request,
env: Env,
ctx: ExecutionContext
): Promise<Response> {
const auth = await authorize(request);
if (!auth) return new CorsResponse("Unauthorized", 401).finalize(request);

const body: AIRequestBody = await request.json();
if (!body || !body?.prompt)
return new CorsResponse("Bad Request", 400).finalize(request);
if (!cache) cache = new AICache();
cache.init(env.DATABASE_URL);
let openai: OpenAIApi = new OpenAIApi(
new Configuration({
apiKey: env.OPENAI_KEY,
})
);

if (!openai)
return new CorsResponse("Internal Server Error", 500).finalize(request);

const usage = await cache.getUsage(auth.sub);
if (!usage.getHardLimit())
return new CorsResponse(
"Monthly API limit hit, please upgrade your subscription!",
429
).finalize(request);

const tokenizer = new GPT3Tokenizer({ type: "gpt3" });
const { readable, writable } = new TransformStream();

const openAIRequest = await openai.createChatCompletion(
{
model: "gpt-3.5-turbo",
messages: [
{ role: "system", content: body.system || "" },
{ role: "user", content: `${body.prompt}` },
],
top_p: body.top_p || 0.05,
max_tokens:
4096 - tokenizer.encode(`${body.prompt} ${body.system}`).bpe.length,
user: auth.sub,
frequency_penalty: body.frequency_penalty || 1.0,
stream: true,
},
{ responseType: "stream" }
);

const writableStream = writable.getWriter();

let total = "";
const handler = new OpenAIStreamHandler(
openAIRequest,
async (message) => {
total += message;
await writableStream.write({ total, message });
},
async () => {
await writableStream.close();
await cache?.updateUsage(
auth.sub,
"chatGPT",
tokenizer.encode(`${body.prompt} ${body.system} ${total}`).bpe.length
);
}
).promise();

return new CorsResponse(readable).finalize(request);
},
};
10 replies