7 Replies
- What's your exact discord.js
npm list discord.js
and node node -v
version?
- Not a discord.js issue? Check out #other-js-ts.
- Consider reading #how-to-get-help to improve your question!
- Explain what exactly your issue is.
- Post the full error stack trace, not just the top part!
- Show your code!
- Issue solved? Press the button!Server code:
Client code:
Dep:
Console [sever]:
Client console:
Its, admin panel for guild master in albion online. and I am developing a service that will help to avoid using TeamSpeak for communication between roles in each unique role group.
So, representatives of each role (tank, support, healer, DPS) sit in separate rooms but receive the same commands from the raid leader through a Discord bot, while the raid leader himself broadcasts through the admin panel.
seriously? I didn't even guess... could you give a simple example of such code?
const { Readable, Transform, pipeline } = require('stream');
const audioPlayer = createAudioPlayer();
let activeStream = null;
function playAudioStream(audioStream) {
const resource = createAudioResource(audioStream, { inputType: StreamType.Opus });
audioPlayer.play(resource);
console.log('Запущено воспроизведение аудио.');
}
wss.on('connection', (ws) => {
console.log('Админ зашёл на сайт голосовой панели.');
ws.on('message', (data) => {
// Преобразуйте полученные данные в формат OPUS
const audioData = new Int16Array(data);
// Создаем поток чтения из аудио данных
const readableStream = new Readable({
read() {
this.push(Buffer.from(audioData.buffer));
this.push(null);
},
});
// Создаем поток кодирования в формат OPUS
const opusEncoder = new OpusEncoder(48000, 2);
// Создаем промежуточный поток Transform, который будет принимать данные из readableStream,
// кодировать их в формат OPUS и передавать далее
const opusTransform = new Transform({
transform(chunk, encoding, callback) {
try {
const encodedFrame = opusEncoder.encode(chunk, chunk.length);
this.push(Buffer.from(encodedFrame));
callback();
} catch (err) {
callback(err);
}
},
});
// Подключаем потоки в цепочку: аудио данные -> OPUS кодирование -> Discord
pipeline(
readableStream,
opusTransform,
(err) => {
if (err) {
console.error('Ошибка во время обработки аудио:', err);
} else {
console.log('Обработка аудио завершена.');
}
}
);
// Воспроизводим аудио через Discord бота
playAudioStream(opusTransform);
activeStream = true;
});
ws.on('close', () => {
console.log('Админ вышёл с сайта голосовой панели.');
activeStream = null;
});
});
const { Readable, Transform, pipeline } = require('stream');
const audioPlayer = createAudioPlayer();
let activeStream = null;
function playAudioStream(audioStream) {
const resource = createAudioResource(audioStream, { inputType: StreamType.Opus });
audioPlayer.play(resource);
console.log('Запущено воспроизведение аудио.');
}
wss.on('connection', (ws) => {
console.log('Админ зашёл на сайт голосовой панели.');
ws.on('message', (data) => {
// Преобразуйте полученные данные в формат OPUS
const audioData = new Int16Array(data);
// Создаем поток чтения из аудио данных
const readableStream = new Readable({
read() {
this.push(Buffer.from(audioData.buffer));
this.push(null);
},
});
// Создаем поток кодирования в формат OPUS
const opusEncoder = new OpusEncoder(48000, 2);
// Создаем промежуточный поток Transform, который будет принимать данные из readableStream,
// кодировать их в формат OPUS и передавать далее
const opusTransform = new Transform({
transform(chunk, encoding, callback) {
try {
const encodedFrame = opusEncoder.encode(chunk, chunk.length);
this.push(Buffer.from(encodedFrame));
callback();
} catch (err) {
callback(err);
}
},
});
// Подключаем потоки в цепочку: аудио данные -> OPUS кодирование -> Discord
pipeline(
readableStream,
opusTransform,
(err) => {
if (err) {
console.error('Ошибка во время обработки аудио:', err);
} else {
console.log('Обработка аудио завершена.');
}
}
);
// Воспроизводим аудио через Discord бота
playAudioStream(opusTransform);
activeStream = true;
});
ws.on('close', () => {
console.log('Админ вышёл с сайта голосовой панели.');
activeStream = null;
});
});
const voiceConnectButton = document.getElementById('voiceConnect');
let mediaStream;
let isStream = false;
let socket;
async function startStream() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 48000,
channels: 2,
},
});
const intervalDuration = 1500;
mediaStream = new MediaRecorder(stream);
mediaStream.ondataavailable = async (event) => {
if (isStream) {
const audioData = await event.data.arrayBuffer();
socket.send(audioData);
console.log('Отправляемые данные:', audioData);
setTimeout(() => {
isStream = false;
voiceConnectButton.disabled = false;
voiceConnectButton.classList.remove('button-voice-active');
socket.close();
console.log('Соединение закрыто');
}, 250);
}
};
mediaStream.start(intervalDuration);
isStream = true;
voiceConnectButton.disabled = true;
voiceConnectButton.classList.add('button-voice-active');
console.log('Стриминг аудио начат');
socket = new WebSocket('ws://2.236.136.128:3098');
} catch (error) {
console.error('Failed to access the microphone:', error);
voiceConnectButton.disabled = false;
voiceConnectButton.classList.remove('button-voice-active');
}
}
const voiceConnectButton = document.getElementById('voiceConnect');
let mediaStream;
let isStream = false;
let socket;
async function startStream() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 48000,
channels: 2,
},
});
const intervalDuration = 1500;
mediaStream = new MediaRecorder(stream);
mediaStream.ondataavailable = async (event) => {
if (isStream) {
const audioData = await event.data.arrayBuffer();
socket.send(audioData);
console.log('Отправляемые данные:', audioData);
setTimeout(() => {
isStream = false;
voiceConnectButton.disabled = false;
voiceConnectButton.classList.remove('button-voice-active');
socket.close();
console.log('Соединение закрыто');
}, 250);
}
};
mediaStream.start(intervalDuration);
isStream = true;
voiceConnectButton.disabled = true;
voiceConnectButton.classList.add('button-voice-active');
console.log('Стриминг аудио начат');
socket = new WebSocket('ws://2.236.136.128:3098');
} catch (error) {
console.error('Failed to access the microphone:', error);
voiceConnectButton.disabled = false;
voiceConnectButton.classList.remove('button-voice-active');
}
}
{
"dependencies": {
"@discordjs/opus": "^0.9.0",
"@discordjs/voice": "^0.16.0",
"cookie-parser": "^1.4.6",
"discord.js": "^14.11.0",
"ejs": "^3.1.9",
"express": "^4.18.2",
"express-session": "^1.17.3",
"lamejs": "^1.2.1",
"node-lame": "^1.3.2",
"opusscript": "^0.0.8",
"unirest": "^0.6.0",
"ws": "^8.13.0"
}
}
{
"dependencies": {
"@discordjs/opus": "^0.9.0",
"@discordjs/voice": "^0.16.0",
"cookie-parser": "^1.4.6",
"discord.js": "^14.11.0",
"ejs": "^3.1.9",
"express": "^4.18.2",
"express-session": "^1.17.3",
"lamejs": "^1.2.1",
"node-lame": "^1.3.2",
"opusscript": "^0.0.8",
"unirest": "^0.6.0",
"ws": "^8.13.0"
}
}
[MAIN] Воспроизведение началось.
Админ вышёл с сайта голосовой панели.
Админ зашёл на сайт голосовой панели.
[MAIN] Воспроизведение началось.
Запущено воспроизведение аудио.
[Ошибка] AudioPlayerError: Cannot create a Buffer larger than 0x100000000 bytes
at Transform.transform [as _transform] (C:\Users\Марк\x\Web\Sex and Flex\discord.js:84:44)
///////
resource: AudioResource {
playStream: Transform {
_readableState: [ReadableState],
_events: [Object: null prototype],
_eventsCount: 6,
_maxListeners: undefined,
_writableState: [WritableState],
allowHalfOpen: true,
_transform: [Function: transform],
[Symbol(kCapture)]: false,
[Symbol(kCallback)]: null
},
edges: [],
metadata: null,
volume: undefined,
encoder: undefined,
audioPlayer: AudioPlayer {
_events: [Object: null prototype],
_eventsCount: 3,
_maxListeners: undefined,
_state: [Object],
subscribers: [Array],
behaviors: [Object],
debug: [Function (anonymous)],
[Symbol(kCapture)]: false
},
playbackDuration: 0,
started: false,
silencePaddingFrames: 5,
silenceRemaining: -1
}
}
Ошибка во время обработки аудио: Error: Cannot create a Buffer larger than 0x100000000 bytes
at Transform.transform [as _transform] (C:\Users\Марк\x\Web\Sex and Flex\discord.js:84:44)
at Transform._write (node:internal/streams/transform:175:8)
at writeOrBuffer (node:internal/streams/writable:392:12)
at _write (node:internal/streams/writable:333:10)
at Writable.write (node:internal/streams/writable:337:10)
at Readable.ondata (node:internal/streams/readable:766:22)
at Readable.emit (node:events:513:28)
at Readable.read (node:internal/streams/readable:539:10)
at flow (node:internal/streams/readable:1023:34)
at resume_ (node:internal/streams/readable:1004:3) {
code: 'ERR_BUFFER_TOO_LARGE'
}
Админ вышёл с сайта голосовой панели.
[MAIN] Воспроизведение началось.
Админ вышёл с сайта голосовой панели.
Админ зашёл на сайт голосовой панели.
[MAIN] Воспроизведение началось.
Запущено воспроизведение аудио.
[Ошибка] AudioPlayerError: Cannot create a Buffer larger than 0x100000000 bytes
at Transform.transform [as _transform] (C:\Users\Марк\x\Web\Sex and Flex\discord.js:84:44)
///////
resource: AudioResource {
playStream: Transform {
_readableState: [ReadableState],
_events: [Object: null prototype],
_eventsCount: 6,
_maxListeners: undefined,
_writableState: [WritableState],
allowHalfOpen: true,
_transform: [Function: transform],
[Symbol(kCapture)]: false,
[Symbol(kCallback)]: null
},
edges: [],
metadata: null,
volume: undefined,
encoder: undefined,
audioPlayer: AudioPlayer {
_events: [Object: null prototype],
_eventsCount: 3,
_maxListeners: undefined,
_state: [Object],
subscribers: [Array],
behaviors: [Object],
debug: [Function (anonymous)],
[Symbol(kCapture)]: false
},
playbackDuration: 0,
started: false,
silencePaddingFrames: 5,
silenceRemaining: -1
}
}
Ошибка во время обработки аудио: Error: Cannot create a Buffer larger than 0x100000000 bytes
at Transform.transform [as _transform] (C:\Users\Марк\x\Web\Sex and Flex\discord.js:84:44)
at Transform._write (node:internal/streams/transform:175:8)
at writeOrBuffer (node:internal/streams/writable:392:12)
at _write (node:internal/streams/writable:333:10)
at Writable.write (node:internal/streams/writable:337:10)
at Readable.ondata (node:internal/streams/readable:766:22)
at Readable.emit (node:events:513:28)
at Readable.read (node:internal/streams/readable:539:10)
at flow (node:internal/streams/readable:1023:34)
at resume_ (node:internal/streams/readable:1004:3) {
code: 'ERR_BUFFER_TOO_LARGE'
}
Админ вышёл с сайта голосовой панели.
Стриминг аудио начат
Отправляемые данные: ArrayBuffer(25253)byteLength: 25253
detached: falsemaxByteLength: 25253
resizable: false
[[Prototype]]: ArrayBuffer
[[Int8Array]]: Int8Array(25253)
[[Uint8Array]]: Uint8Array(25253)
[[ArrayBufferByteLength]]: 25253
[[ArrayBufferData]]: 28
Соединение закрыто
Стриминг аудио начат
Отправляемые данные: ArrayBuffer(25253)byteLength: 25253
detached: falsemaxByteLength: 25253
resizable: false
[[Prototype]]: ArrayBuffer
[[Int8Array]]: Int8Array(25253)
[[Uint8Array]]: Uint8Array(25253)
[[ArrayBufferByteLength]]: 25253
[[ArrayBufferData]]: 28
Соединение закрыто
However, I would like to dwell on the idea of using the site, as it requires several other tasks. I'm interested to see code that would redirect the audio stream from the site straight to the discord bot.
async function startStream() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 48000,
channels: 2,
},
});
const intervalDuration = 50;
mediaStream = new MediaRecorder(stream);
mediaStream.ondataavailable = async (event) => {
const audioData = await event.data.arrayBuffer();
socket.send(audioData);
console.log('Sending:', audioData);
};
mediaStream.start(intervalDuration);
console.log('Stream started');
socket = new WebSocket('ws://2.236.136.128:3098');
socket.onclose = () => {
console.log('Connection closed');
};
} catch (error) {
console.error('Failed to access the microphone:', error);
}
}
async function startStream() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 48000,
channels: 2,
},
});
const intervalDuration = 50;
mediaStream = new MediaRecorder(stream);
mediaStream.ondataavailable = async (event) => {
const audioData = await event.data.arrayBuffer();
socket.send(audioData);
console.log('Sending:', audioData);
};
mediaStream.start(intervalDuration);
console.log('Stream started');
socket = new WebSocket('ws://2.236.136.128:3098');
socket.onclose = () => {
console.log('Connection closed');
};
} catch (error) {
console.error('Failed to access the microphone:', error);
}
}
It's like a dynamic sound, the louder the sound, the longer it is. At least that's the pattern I saw.
Heh, I don't know, this is my first time working with audio data, and I've been trying to get the bot to "speak" for 9 days, so I've already gone through a lot of options.
/* Error in playAudio(combinedAudio) */
Great, I've done some work on the code, and it saves everything it received cumulatively to a webm file, and it even saved without errors. I need instructions or, more precisely, advice on how to fix and what could be causing the error:
(I've been getting this error in previous versions of the code, and I can't find any documentation on this issue on the internet)
App started on port 80
Impactium bot launched!
Second bot launched!
GG
GG
GG
GG
node:events:491
throw er; // Unhandled 'error' event
^
TypeError [ERR_INVALID_ARG_TYPE]: The "chunk" argument must be of type string or an instance of Buffer or Uint8Array. Received type number (26)
App started on port 80
Impactium bot launched!
Second bot launched!
GG
GG
GG
GG
node:events:491
throw er; // Unhandled 'error' event
^
TypeError [ERR_INVALID_ARG_TYPE]: The "chunk" argument must be of type string or an instance of Buffer or Uint8Array. Received type number (26)
And... webm file: