"internal error" calling .fetch on bound Durable Object

I'm getting this thrown when trying to call DO fetch. I think my worker is configured incorrectly. My worker code is very simple:
async function handleAgentRequest(request, env) {
const guid = env.GUID;
const id = env.AGENT.idFromName(guid);
const stub = env.AGENT.get(id);
return await stub.fetch(request);
}

export default {
async fetch(request, env, ctx) {
try {
return await handleAgentRequest(request, env);
} catch (err) {
console.warn(err.stack);
}
},
}
async function handleAgentRequest(request, env) {
const guid = env.GUID;
const id = env.AGENT.idFromName(guid);
const stub = env.AGENT.get(id);
return await stub.fetch(request);
}

export default {
async fetch(request, env, ctx) {
try {
return await handleAgentRequest(request, env);
} catch (err) {
console.warn(err.stack);
}
},
}
What are some things to check in my worker settings to debug this?
11 Replies
avaer
avaerOPā€¢7mo ago
I am manually deploying this worker via the API, not wrangler. here are my worker settings as reported from the API:
{
"result": {
"placement": {},
"compatibility_date": "",
"compatibility_flags": [],
"usage_model": "standard",
"tags": [],
"tail_consumers": [
{
"service": "agent-worker-afb1bb7b-233a-4d2d-9999-35914f25f673"
}
],
"logpush": false,
"bindings": [
{
"class_name": "DurableObject",
"name": "AGENT",
"namespace_id": "dd92e03be01d4c01940207dd6c0d81b9",
"type": "durable_object_namespace"
},
{
"name": "AGENT_JSON",
"type": "secret_text"
},
{
"name": "AGENT_TOKEN",
"type": "secret_text"
},
{
"bucket_name": "data",
"name": "DATA",
"type": "r2_bucket"
},
{
"name": "GUID",
"type": "secret_text"
},
{
"name": "SUPABASE_PUBLIC_API_KEY",
"type": "secret_text"
},
{
"name": "SUPABASE_URL",
"type": "secret_text"
},
{
"name": "WALLET_MNEMONIC",
"type": "secret_text"
}
]
},
"success": true,
"errors": [],
"messages": []
}
{
"result": {
"placement": {},
"compatibility_date": "",
"compatibility_flags": [],
"usage_model": "standard",
"tags": [],
"tail_consumers": [
{
"service": "agent-worker-afb1bb7b-233a-4d2d-9999-35914f25f673"
}
],
"logpush": false,
"bindings": [
{
"class_name": "DurableObject",
"name": "AGENT",
"namespace_id": "dd92e03be01d4c01940207dd6c0d81b9",
"type": "durable_object_namespace"
},
{
"name": "AGENT_JSON",
"type": "secret_text"
},
{
"name": "AGENT_TOKEN",
"type": "secret_text"
},
{
"bucket_name": "data",
"name": "DATA",
"type": "r2_bucket"
},
{
"name": "GUID",
"type": "secret_text"
},
{
"name": "SUPABASE_PUBLIC_API_KEY",
"type": "secret_text"
},
{
"name": "SUPABASE_URL",
"type": "secret_text"
},
{
"name": "WALLET_MNEMONIC",
"type": "secret_text"
}
]
},
"success": true,
"errors": [],
"messages": []
}
gotten from curl https://api.cloudflare.com/client/v4/accounts/8d093faf5772cff838a12d1c9bc87afd/workers/scripts/agent-worker-afb1bb7b-233a-4d2d-9999-35914f25f673/settings does this have something to do with namespace_id on the DO? that's the only difference I see between this and a working worker with DO binding
{
"message": [
"Error: internal error\n at async handleAgentRequest (worker.mjs:56038:10)\n at async Object.fetch (worker.mjs:56043:14)"
],
"level": "warn",
"timestamp": 1717579573799
}
{
"message": [
"Error: internal error\n at async handleAgentRequest (worker.mjs:56038:10)\n at async Object.fetch (worker.mjs:56043:14)"
],
"level": "warn",
"timestamp": 1717579573799
}
avaer
avaerOPā€¢7mo ago
here is the built worker module script
avaer
avaerOPā€¢7mo ago
@Walshy @Hello, Iā€™m Allie! any ideas here? sorry to ping you but you seem knowledgeable about the stack internals
Walshy
Walshyā€¢7mo ago
what's the account id?
avaer
avaerOPā€¢7mo ago
8d093faf5772cff838a12d1c9bc87afd and Zone ID is a1f842607bf1d6725b1054e4185a14ff I have a few workers deployed there, but all of the recent ones suffer from this configuration problem
Walshy
Walshyā€¢7mo ago
interestingly it seems to not be a durable object I've never seen that error before i'm gonna escalate this so we can track it internally
avaer
avaerOPā€¢7mo ago
thanks, please keep me updated, it's a blocker to shipping šŸ™
Walshy
Walshyā€¢7mo ago
of course!
mrod
mrodā€¢6mo ago
hi there - could you provide any more details about the metadata in the script upload? specifically how you configured the binding or any migrations in the upload? any details on how you configured the worker, through PUT uploads or PATCH updates would be helpful
avaer
avaerOPā€¢6mo ago
This is what I have in my list of migrations in wrangler.toml:
[[migrations]]
tag = "v1"
new_classes = [ "DurableObject" ]
[[migrations]]
tag = "v1"
new_classes = [ "DurableObject" ]
I tried both via the API and wrangler. The support team says that the problem is that this is not configured as a durable object -- but my impression is the above is the correct way to configure a durable object. There are no errors other than the "internal error". the class is exported:
export {
DurableObject,
main_default as default
};
export {
DurableObject,
main_default as default
};
Note that this all works in wrangler dev Also, I know my migrations are taking effect because if I put the wrong class name in there it will cause an error:
Cannot apply new-class migration to class DurableObject3 that is not exported by script [code:
10070]
Cannot apply new-class migration to class DurableObject3 that is not exported by script [code:
10070]
mrod
mrodā€¢6mo ago
gotcha, taking a harder look at this today looks to me like this is a bug specifically triggered when you add a tail consumer of the same worker - can you confirm that this is goes away if you remove the tail consumer?
Want results from more Discord servers?
Add your server