Compare commits
No commits in common. "master" and "watson" have entirely different histories.
|
@ -2,5 +2,3 @@ node_modules
|
|||
voice_tmp
|
||||
.env
|
||||
*.db
|
||||
.DS_Store
|
||||
gkey.json
|
|
@ -1,31 +0,0 @@
|
|||
const Voice = require("@discordjs/voice");
|
||||
|
||||
module.exports = class AudioQueue {
|
||||
constructor(connection, api) {
|
||||
this.connection = connection;
|
||||
this.api = api;
|
||||
this.queue = [];
|
||||
this.current = undefined;
|
||||
this.api.player.on(Voice.AudioPlayerStatus.Idle, this.handleStop.bind(this));
|
||||
}
|
||||
playNext() {
|
||||
if (this.queue.length == 0) {
|
||||
this.current = undefined;
|
||||
return;
|
||||
}
|
||||
this.current = this.api.play(this.queue[0]);
|
||||
}
|
||||
handleStop(current) {
|
||||
this.queue.shift();
|
||||
this.playNext();
|
||||
}
|
||||
add(element) {
|
||||
this.queue.push(element);
|
||||
if (this.queue.length == 1) this.playNext();
|
||||
}
|
||||
flush() {
|
||||
this.current.setVolume(0);
|
||||
this.queue=[];
|
||||
this.playNext();
|
||||
}
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
FROM node:18-alpine
|
||||
COPY . .
|
||||
RUN apk add ffmpeg
|
||||
RUN npm install
|
||||
ENTRYPOINT ["node", "index.js"]
|
62
adapter.js
62
adapter.js
|
@ -1,62 +0,0 @@
|
|||
const { Snowflake, Client, Guild, VoiceBasedChannel, Events, Status, GatewayDispatchEvents } = require("discord.js");
|
||||
|
||||
const adapters = new Map();
|
||||
const trackedClients = new Set();
|
||||
const trackedShards = new Map();
|
||||
|
||||
|
||||
function trackClient(client) {
|
||||
if (trackedClients.has(client)) return;
|
||||
trackedClients.add(client);
|
||||
client.ws.on(GatewayDispatchEvents.VoiceServerUpdate, (payload) => {
|
||||
adapters.get(payload.guild_id)?.onVoiceServerUpdate(payload);
|
||||
});
|
||||
client.ws.on(GatewayDispatchEvents.VoiceStateUpdate, (payload) => {
|
||||
if (payload.guild_id && payload.session_id && payload.user_id === client.user?.id) {
|
||||
adapters.get(payload.guild_id)?.onVoiceStateUpdate(payload);
|
||||
}
|
||||
});
|
||||
client.on(Events.ShardDisconnect, (_, shardId) => {
|
||||
const guilds = trackedShards.get(shardId);
|
||||
if (guilds) {
|
||||
for (const guildID of guilds.values()) {
|
||||
adapters.get(guildID)?.destroy();
|
||||
}
|
||||
}
|
||||
trackedShards.delete(shardId);
|
||||
});
|
||||
}
|
||||
|
||||
function trackGuild(guild) {
|
||||
let guilds = trackedShards.get(guild.shardId);
|
||||
if (!guilds) {
|
||||
guilds = new Set();
|
||||
trackedShards.set(guild.shardId, guilds);
|
||||
}
|
||||
guilds.add(guild.id);
|
||||
}
|
||||
|
||||
|
||||
module.exports = function (channel) {
|
||||
return (methods) => {
|
||||
adapters.set(channel.guild.id, methods);
|
||||
trackClient(channel.client);
|
||||
trackGuild(channel.guild);
|
||||
return {
|
||||
sendPayload(data) {
|
||||
console.log(channel.guild.shard.status);
|
||||
// if (channel.guild.shard.status === Status.READY) {
|
||||
console.log("Sending shard data");
|
||||
channel.guild.shard.send(data);
|
||||
return true;
|
||||
// }
|
||||
console.log("Unable to send channel payload");
|
||||
return false;
|
||||
},
|
||||
destroy() {
|
||||
console.log("Destroying adapter");
|
||||
return adapters.delete(channel.guild.id);
|
||||
},
|
||||
};
|
||||
};
|
||||
}
|
13
example.env
13
example.env
|
@ -1,13 +0,0 @@
|
|||
TOKEN=DISCORD_BOT_TOKEN_HERE
|
||||
GUILD=GUILD_ID_HERE
|
||||
CHANNEL=VOICE_CHANNEL_ID_HERE
|
||||
STRING_SET=en
|
||||
VOICE_TMP_PATH=./voice_tmp/
|
||||
DB_FILE=DATABASE_PATH_HERE
|
||||
PREFIX=+
|
||||
ANNOUNCEMENT_ENGINE=espeak
|
||||
ANNOUNCEMENT_VOICE=en
|
||||
watsonURL=WATSON_URL_HERE
|
||||
watsonAPIKey=WATSON_API_KEY_HERE
|
||||
TTS_CHANNEL=CANTTALK_TEXT_CHANNEL_ID_HERE
|
||||
GOOGLE_APPLICATION_CREDENTIALS=GOOGLE_CLOUD_KEY_HERE
|
89
index.js
89
index.js
|
@ -1,47 +1,20 @@
|
|||
const Discord = require('discord.js');
|
||||
const Voice = require("@discordjs/voice");
|
||||
const adapterCreator = require("./adapter");
|
||||
require('dotenv').config();
|
||||
const fetch = require('node-fetch');
|
||||
const fs = require('fs');
|
||||
const sha1 = require('sha1');
|
||||
const sqlite3 = require('sqlite3');
|
||||
const { open } = require('sqlite')
|
||||
const sqlite = require('sqlite3');
|
||||
|
||||
let joinedVoiceChannels = [];
|
||||
let joinedVoiceChannelConnections = new Map();
|
||||
|
||||
let modules = [];
|
||||
|
||||
let commandHandlers = new Map();
|
||||
const player = Voice.createAudioPlayer();
|
||||
|
||||
const rest = new Discord.REST({ version: '10' }).setToken(process.env["TOKEN"]);
|
||||
const bot = new Discord.Client({
|
||||
intents: [
|
||||
Discord.GatewayIntentBits.GuildMembers,
|
||||
Discord.GatewayIntentBits.GuildMessageReactions,
|
||||
Discord.GatewayIntentBits.GuildMessages,
|
||||
Discord.GatewayIntentBits.GuildPresences,
|
||||
Discord.GatewayIntentBits.GuildVoiceStates,
|
||||
Discord.GatewayIntentBits.Guilds,
|
||||
Discord.GatewayIntentBits.MessageContent
|
||||
]
|
||||
});
|
||||
const bot = new Discord.Client();
|
||||
|
||||
async function initDB() {
|
||||
console.log(__dirname);
|
||||
api.db = await open({
|
||||
filename: process.env["DB_FILE"],
|
||||
driver: sqlite3.Database
|
||||
});
|
||||
}
|
||||
const db = new sqlite.Database(process.env.DB_FILE);
|
||||
|
||||
const api = {
|
||||
player: player,
|
||||
db: undefined,
|
||||
queue: undefined,
|
||||
strings: require('./strings/' + process.env.STRING_SET + '.json'),
|
||||
db: db,
|
||||
ttsEngines: (() => {
|
||||
let engines={};
|
||||
console.log(`Registering TTS engines...`);
|
||||
|
@ -54,30 +27,13 @@ const api = {
|
|||
})
|
||||
return engines;
|
||||
})(),
|
||||
announcementVoice: process.env.ANNOUNCEMENT_VOICE,
|
||||
announcementEngine: undefined,
|
||||
|
||||
play: (file) => {
|
||||
return player.play(Voice.createAudioResource(file));
|
||||
},
|
||||
respond: (message, text, voiceText) => {
|
||||
let toSend = message.member.displayName + ", " + (voiceText ? voiceText : text);
|
||||
if (message.member.voice.channel) {
|
||||
api.queue.add(__dirname + "/sysmsg.wav");
|
||||
api.speak(message.member.voice.channel, toSend);
|
||||
} else {
|
||||
message.reply(text);
|
||||
}
|
||||
},
|
||||
|
||||
getActiveVoiceChannel: () => joinedVoiceChannels[0],
|
||||
|
||||
isInVoiceChannel: (channel) => {
|
||||
return joinedVoiceChannels.includes(channel);
|
||||
},
|
||||
|
||||
getConnectionForVoiceChannel: (channel) => {
|
||||
return joinedVoiceChannelConnections.get(channel);
|
||||
return bot.voice.connections.find((conn) => conn.channel === channel);
|
||||
},
|
||||
|
||||
generateVoice: async (string, engine, voice, params) => {
|
||||
|
@ -91,30 +47,22 @@ const api = {
|
|||
|
||||
joinChannel: async (channel) => {
|
||||
if (!api.isInVoiceChannel(channel)) {
|
||||
|
||||
const res = Voice.joinVoiceChannel({
|
||||
channelId: channel.id,
|
||||
guildId: channel.guild.id,
|
||||
adapterCreator: adapterCreator(channel)
|
||||
});
|
||||
res.subscribe(player);
|
||||
const res = await channel.join();
|
||||
joinedVoiceChannels.push(channel);
|
||||
joinedVoiceChannelConnections.set(channel, res);
|
||||
}
|
||||
},
|
||||
|
||||
leaveChannel: async (channel) => {
|
||||
if (joinedVoiceChannels.includes(channel)) {
|
||||
let con = joinedVoiceChannelConnections.get(channel);
|
||||
joinedVoiceChannels = joinedVoiceChannels.filter((chan) => chan !== channel);
|
||||
con.disconnect();
|
||||
joinedVoiceChannelConnections.delete(channel);
|
||||
await channel.leave();
|
||||
}
|
||||
},
|
||||
|
||||
speak: async (channel, message, engine = api.announcementEngine, voice = api.announcementVoice, params = {}) => {
|
||||
speak: async (channel, message, engine=api.ttsEngines.gtranslate, voice='en-us', params={}) => {
|
||||
const conn = api.getConnectionForVoiceChannel(channel);
|
||||
const filepath = await api.generateVoice(message, engine, voice, params);
|
||||
api.queue.add(filepath);
|
||||
if (conn) conn.play(filepath);
|
||||
},
|
||||
|
||||
registerCommand: async (commandString, commandFunc) => {
|
||||
|
@ -127,32 +75,23 @@ function registerModules() {
|
|||
const moduleDirectories = fs.readdirSync('./modules');
|
||||
moduleDirectories.forEach((dir) => {
|
||||
if(dir.startsWith('.')) return;
|
||||
modules.push(require(`./modules/${dir}`));
|
||||
modules.push(require(`./modules/${dir}/index.js`));
|
||||
console.log(`Loading ./modules/${dir}/index.js`)
|
||||
})
|
||||
modules.forEach((mod) => mod(bot, api));
|
||||
}
|
||||
|
||||
function handleMessage(message) {
|
||||
console.log(`I got message`);
|
||||
if (message.content.startsWith(process.env.PREFIX)) {
|
||||
const args = message.content.split(" ");
|
||||
const args = message.contents.split(" ");
|
||||
const command = args[0].substr(1, args[0].length);
|
||||
const execution = commandHandlers.get(command);
|
||||
if (command) {
|
||||
if (execution) execution(args, message);
|
||||
command(args, message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
api.announcementEngine = api.ttsEngines[process.env.ANNOUNCEMENT_ENGINE];
|
||||
|
||||
async function start() {
|
||||
await initDB();
|
||||
registerModules();
|
||||
}
|
||||
bot.login(process.env.TOKEN);
|
||||
bot.on('messageCreate', handleMessage);
|
||||
|
||||
start();
|
||||
bot.on('message', handleMessage);
|
Binary file not shown.
|
@ -1,49 +0,0 @@
|
|||
const printf=require('printf');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
module.exports = async (bot, api) => {
|
||||
bot.on('messageCreate', async (message) => {
|
||||
if (!message.content.startsWith(process.env.PREFIX)) {
|
||||
if (message.channel.id == process.env.TTS_CHANNEL) {
|
||||
let chan=message.member.voice.channel;
|
||||
let userRow = await api.db.get('select * from TTSPreferences where user_id=?', message.author.id);
|
||||
if (!userRow) {
|
||||
await api.db.run('insert into TTSPreferences (user_id,engine,voice) values (?,?,?)', [message.author.id, api.announcementEngine.shortName, api.announcementVoice]);
|
||||
userRow = await api.db.get('select * from TTSPreferences where user_id=?', message.author.id);
|
||||
}
|
||||
if (api.ttsEngines[userRow.engine]) {
|
||||
api.speak(chan,message.content, api.ttsEngines[userRow.engine], userRow.voice)
|
||||
} else {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
api.registerCommand('myvoice', async (args, message) => {
|
||||
let userEngine, userVoice;
|
||||
if (args.length > 3) {
|
||||
return api.respond(message, printf(api.strings.TOO_MANY_ARGUMENTS));
|
||||
}
|
||||
if (api.ttsEngines[args[1]]) {
|
||||
userEngine = args[1];
|
||||
if (api.ttsEngines[userEngine].validateVoice(args[2].toLowerCase())) {
|
||||
userVoice = args[2].toLowerCase();
|
||||
api.respond(message, printf(api.strings.USER_VOICE_CHANGED, userVoice, api.ttsEngines[userEngine].longName));
|
||||
} else {
|
||||
userVoice = api.ttsEngines[userEngine].getDefaultVoice();
|
||||
api.respond(message, printf(api.strings.INVALID_VOICE, userVoice, api.ttsEngines[userEngine].longName));
|
||||
}
|
||||
await api.db.run('update TTSPreferences set engine=?, voice=? where user_id=?', userEngine, userVoice, message.author.id);
|
||||
} else {
|
||||
api.respond(message, printf(api.strings.INVALID_ENGINE, args[1]));
|
||||
}
|
||||
});
|
||||
api.registerCommand('random', async (args, message) => {
|
||||
const files = fs.readdirSync(process.env["VOICE_TMP_PATH"]);
|
||||
const rnd = files[Math.floor(Math.random()*files.length)];
|
||||
console.log(rnd);
|
||||
api.queue.add(__dirname + "/../../sysmsg.wav");
|
||||
api.queue.add(process.env["VOICE_TMP_PATH"] + "/" + rnd);
|
||||
});
|
||||
}
|
|
@ -1,34 +1,25 @@
|
|||
const printf = require('printf');
|
||||
const AudioQueue = require('../../AudioQueue.js')
|
||||
|
||||
module.exports = function(bot, api) {
|
||||
bot.on('voiceStateUpdate', async (oldState, newState) => {
|
||||
if (newState.member.user.bot) return;
|
||||
if (oldState.channel && newState.channel) return;
|
||||
const channel = oldState.channel || newState.channel;
|
||||
if (!channel) return;
|
||||
if (channel.members.size < 2) {
|
||||
api.AudioQueue.flush();
|
||||
await api.leaveChannel(channel);
|
||||
|
||||
if (channel.members.array().length < 2) {
|
||||
return await api.leaveChannel(channel);
|
||||
}
|
||||
await api.joinChannel(channel);
|
||||
let joined = false;
|
||||
if (!oldState.channel) {
|
||||
joined = true;
|
||||
let conn = api.getConnectionForVoiceChannel(channel);
|
||||
if (!api.queue) api.queue = new AudioQueue(conn, api);
|
||||
}
|
||||
|
||||
let username = newState.member.displayName;
|
||||
let str = "";
|
||||
if (!joined) {
|
||||
str = printf(api.strings.USER_LEFT, username);
|
||||
str = username + " left the channel";
|
||||
} else {
|
||||
str = printf(api.strings.USER_JOINED, username);
|
||||
str = username + " joined the channel";
|
||||
}
|
||||
const filepath = await api.generateVoice(str, api.announcementEngine, api.announcementVoice);
|
||||
api.queue.add(__dirname + "/sysmsg.wav");
|
||||
api.queue.add(filepath);
|
||||
api.speak(channel, str);
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
let ChatGPTAPI = null;
|
||||
|
||||
module.exports = function (bot, api) {
|
||||
import("chatgpt").then((mod) => {
|
||||
ChatGPTAPI = mod.ChatGPTAPI;
|
||||
});
|
||||
api.registerCommand('chat', async (args, message) => {
|
||||
const response = await getChatGPTResponse(message.content.slice(6).trim());
|
||||
api.respond(message, response);
|
||||
});
|
||||
}
|
||||
|
||||
async function getChatGPTResponse(prompt) {
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
completionParams: {
|
||||
model: 'gpt-4o'
|
||||
}
|
||||
})
|
||||
|
||||
const res = await api.sendMessage(prompt);
|
||||
return res.text;
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
const gtranslate = require('node-google-translate-skidz');
|
||||
|
||||
module.exports = function (bot, api) {
|
||||
api.registerCommand('mangle', async (args, message) => {
|
||||
let str = message.content.slice(8).trim();
|
||||
let langs = process.env.MANGLE_LANGS.split(',');
|
||||
let lang;
|
||||
let i = 0;
|
||||
for (let lang of langs) {
|
||||
if (i >= langs.length - 1) break;
|
||||
let translationResult = await translate(str, lang, langs[i + 1]);
|
||||
str = translationResult.translation;
|
||||
i++;
|
||||
}
|
||||
api.respond(message, str);
|
||||
});
|
||||
}
|
||||
|
||||
async function translate(text, fromLang, toLang) {
|
||||
return new Promise((resolve, reject) => {
|
||||
gtranslate({
|
||||
text: text,
|
||||
source: fromLang,
|
||||
target: toLang
|
||||
}, (res) => {
|
||||
resolve(res);
|
||||
})
|
||||
})
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
const fetch = require('node-fetch');
|
||||
|
||||
module.exports = function (bot, api) {
|
||||
api.registerCommand('randomquote', async (args, message) => {
|
||||
const data = await fetch(process.env["QDB_URL"], {
|
||||
headers: {
|
||||
Authorization: 'Basic ' + Buffer.from(`${process.env["QDB_USER"]}:${process.env["QDB_PASS"]}`).toString('base64')
|
||||
}
|
||||
});
|
||||
const quotes = await data.json();
|
||||
const quote = quotes[Math.floor(Math.random()*quotes.length)];
|
||||
let chan=message.member.voice.channel;
|
||||
// api.queue.add(__dirname + "/sysmsg.wav");
|
||||
// api.speak(chan, `${quote.author}, on ${quote.medium}: ${quote.quote}`);
|
||||
api.respond(message, `Here's your quote: ${quote.author}, on ${author.medium}: ${quote.quote}`);
|
||||
})
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
module.exports = function (bot, api) {
|
||||
api.registerCommand("summon", async (args, message) => {
|
||||
await api.joinChannel(message.member.voice.channel);
|
||||
api.respond(message, `Hi!`);
|
||||
})
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
const printf=require('printf');
|
||||
|
||||
module.exports = function (bot, api) {
|
||||
api.registerCommand('announcevoice', (args, message) => {
|
||||
let channel = api.getActiveVoiceChannel();
|
||||
if (args.length > 3) {
|
||||
return api.respond(message, printf(api.strings.TOO_MANY_ARGUMENTS));
|
||||
}
|
||||
if (api.ttsEngines[args[1]]) {
|
||||
api.announcementEngine = api.ttsEngines[args[1]];
|
||||
if (api.announcementEngine.validateVoice(args[2])) {
|
||||
api.announcementVoice = args[2];
|
||||
api.respond(message, printf(api.strings.SYSTEM_VOICE_CHANGED, api.announcementVoice, api.announcementEngine.longName));
|
||||
} else {
|
||||
api.announcementVoice = api.announcementEngine.getDefaultVoice();
|
||||
api.respond(message, printf(api.strings.INVALID_VOICE, api.announcementVoice, api.announcementEngine.longName));
|
||||
}
|
||||
} else {
|
||||
api.respond(message, printf(api.strings.INVALID_ENGINE, args[1]));
|
||||
}
|
||||
});
|
||||
api.registerCommand('flush',()=>api.queue.flush());
|
||||
}
|
|
@ -1,14 +1,10 @@
|
|||
const AudioQueue=require('../../AudioQueue.js')
|
||||
|
||||
module.exports = function(bot, api) {
|
||||
bot.on('ready', async () => {
|
||||
console.log("Bot initialized and listening");
|
||||
const guild = await bot.guilds.fetch(process.env.GUILD);
|
||||
const channel = await bot.channels.fetch(process.env.CHANNEL);
|
||||
await api.joinChannel(channel);
|
||||
let conn=api.getConnectionForVoiceChannel(channel);
|
||||
if (!api.queue) api.queue=new AudioQueue(conn, api);
|
||||
api.queue.add(__dirname + "/../../sysstart.wav");
|
||||
api.speak(channel, api.strings.WELCOME);
|
||||
|
||||
api.speak(channel, `Hi! I'm alive. It is now ${new Date().toLocaleTimeString()} on ${new Date().toLocaleDateString()}`,api.ttsEngines.espeak, "en");
|
||||
})
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
const printf = require('printf')
|
||||
const isStringInt = require('is-string-int');
|
||||
const levenshtein = require('fast-levenshtein')
|
||||
|
||||
module.exports = function (bot, api) {
|
||||
bot.currentWBW = "";
|
||||
api.registerCommand('wbw', async (args, message) => {
|
||||
if (args.length == 1) {
|
||||
return api.respond(message, bot.currentWBW ? printf(api.strings.CURRENT_STORY, bot.currentWBW) : printf(api.strings.NO_STORY));
|
||||
}
|
||||
if (args.length > 2) {
|
||||
return api.respond(message, printf(api.strings.TOO_MANY_ARGUMENTS));
|
||||
} else {
|
||||
if (isStringInt(args[1])) {
|
||||
let story = await api.db.get('select * from WBWStories where story_id=?', parseInt(args[1]))
|
||||
if (!story) {
|
||||
return api.respond(message, api.strings.WBW_INVALID_ID)
|
||||
} else {
|
||||
return api.respond(message, story.story_text)
|
||||
}
|
||||
} else {
|
||||
let lastUser = await api.db.get('select value from BotState where key="last_wbw"');
|
||||
if (message.author.id == lastUser.value && bot.currentWBW != "") {
|
||||
let lastWord = (bot.currentWBW.indexOf(" ") == bot.currentWBW.lastIndexOf(" ")) ? bot.currentWBW : bot.currentWBW.slice(bot.currentWBW.slice(0,-1).lastIndexOf(' ') + 1);
|
||||
console.log(args[1], lastWord, levenshtein.get(args[1], lastWord))
|
||||
if (levenshtein.get(args[1], lastWord) <= 3) {
|
||||
bot.currentWBW = bot.currentWBW.replace(new RegExp(lastWord + "([^" + lastWord + "]*)$"), args[1] + "$1 ");
|
||||
api.respond(message, printf(api.strings.WBW_REPLACED, lastWord, args[1]))
|
||||
} else {
|
||||
return api.respond(message, printf(api.strings.WBW_TOO_DIFFERENT))
|
||||
}
|
||||
} else {
|
||||
bot.currentWBW += args[1] + ' ';
|
||||
api.respond(message, printf(api.strings.WBW_NEW_WORD))
|
||||
let toSay = bot.currentWBW.indexOf(".") == -1 ? bot.currentWBW : bot.currentWBW.slice(bot.currentWBW.lastIndexOf('.') + 2);
|
||||
api.speak(message.member.voice.channel, toSay)
|
||||
await api.db.run('update BotState set value=? where key="last_wbw"', message.author.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
api.registerCommand('newwbw', async (args, message) => {
|
||||
await api.db.run('insert into WBWStories (story_text) values(?)', bot.currentWBW);
|
||||
bot.currentWBW = '';
|
||||
api.respond(message, printf(api.strings.WBW_RESET))
|
||||
})
|
||||
}
|
File diff suppressed because it is too large
Load Diff
19
package.json
19
package.json
|
@ -4,31 +4,18 @@
|
|||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1",
|
||||
"start": "node index.js"
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@discordjs/voice": "^0.15.0",
|
||||
"@google-cloud/text-to-speech": "^3.1.3",
|
||||
"chatgpt": "^5.1.2",
|
||||
"discord.js": "^14.8.0",
|
||||
"discord.js": "^12.5.3",
|
||||
"dotenv": "^8.2.0",
|
||||
"fast-levenshtein": "^3.0.0",
|
||||
"google-tts-api": "^2.0.2",
|
||||
"is-string-int": "^1.0.1",
|
||||
"libsodium-wrappers": "^0.7.11",
|
||||
"microsoft-cognitiveservices-speech-sdk": "^1.16.0",
|
||||
"node-fetch": "^2.6.1",
|
||||
"node-google-translate-skidz": "^1.1.2",
|
||||
"opusscript": "^0.0.8",
|
||||
"printf": "^0.6.1",
|
||||
"sam-js": "^0.1.2",
|
||||
"sha1": "^1.1.1",
|
||||
"sqlite": "^4.0.21",
|
||||
"sqlite3": "^5.0.2",
|
||||
"wavefile": "^11.0.0"
|
||||
"sqlite3": "^5.0.2"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"WELCOME": "Beep boop. I'm a bot. Hi.",
|
||||
"USER_JOINED": "%s joined the channel.",
|
||||
"USER_LEFT": "%s left the channel.",
|
||||
"SYSTEM_VOICE_CHANGED": "My new voice is %s from %s",
|
||||
"USER_VOICE_CHANGED": "Your new voice is %s from %s",
|
||||
"INVALID_ENGINE": "%s is not a valid engine name.",
|
||||
"INVALID_VOICE": "invalid voice name. Using default voice %s for %s instead.",
|
||||
"TOO_MANY_ARGUMENTS": "too many arguments for command.",
|
||||
"CURRENT_STORY": "Here's the current story: %s",
|
||||
"NO_STORY": "No story in progress at the moment.",
|
||||
"WBW_REPLACED": "Replaced %s with %s",
|
||||
"WBW_TOO_DIFFERENT": "This word is too different from the last word.",
|
||||
"WBW_NEW_WORD": "Added to story.",
|
||||
"WBW_RESET": "The story has been reset.",
|
||||
"WBW_INVALID_ID": "No story with that ID."
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"WELCOME": "Hola hola, soy un bot.",
|
||||
"USER_JOINED": "%s se ha unido al canal.",
|
||||
"USER_LEFT": "%s ha salido del canal.",
|
||||
"SYSTEM_VOICE_CHANGED": "Mi nueva voz es %s de %s",
|
||||
"USER_VOICE_CHANGED": "Tu nueva voz es %s de %s",
|
||||
"INVALID_ENGINE": "%s no es un nombre de motor válido.",
|
||||
"INVALID_VOICE": "Nombre de voz no válido. Usando voz por defecto %s para %s.",
|
||||
"TOO_MANY_ARGUMENTS": "Demasiados argumentos para el comando."
|
||||
}
|
BIN
sysmsg.wav
BIN
sysmsg.wav
Binary file not shown.
BIN
sysstart.wav
BIN
sysstart.wav
Binary file not shown.
9
t.js
9
t.js
|
@ -1,9 +0,0 @@
|
|||
const t=require('node-google-translate-skidz')
|
||||
|
||||
t({
|
||||
text: 'this is a test of the meow',
|
||||
'source': 'en',
|
||||
target: 'ta'
|
||||
}, (res) => {
|
||||
console.log(res);
|
||||
});
|
Binary file not shown.
|
@ -1,17 +1,11 @@
|
|||
const fs=require('fs');
|
||||
|
||||
module.exports=class {
|
||||
constructor(shortName, longName, fileExtension, supportedParameters = []) {
|
||||
this.shortName = shortName;
|
||||
constructor(longName, fileExtension, supportedParameters=[]) {
|
||||
this.longName=longName;
|
||||
this.fileExtension=fileExtension;
|
||||
}
|
||||
getInternalVoiceName(str) {
|
||||
return this.voices ? this.voices[str] : str;
|
||||
}
|
||||
getDefaultVoice() { }
|
||||
validateVoice(voice) { return this.voices ? this.voices[voice] : true; }
|
||||
async getSpeech(text, voice = this.getDefaultVoice(), params) { }
|
||||
async getSpeech(text, voice, params) {}
|
||||
async getSpeechFile(text, filepath, voice, params) {
|
||||
const data = await this.getSpeech(text, voice, params);
|
||||
const contents = await data.arrayBuffer();
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
const BaseEngine = require('../BaseEngine');
|
||||
const sdk = require("microsoft-cognitiveservices-speech-sdk");
|
||||
const fetch = require('node-fetch');
|
||||
|
||||
module.exports = class AzureTTS extends BaseEngine {
|
||||
constructor() {
|
||||
super("azure", "Microsoft Azure TTS", "wav");
|
||||
this.voices = {};
|
||||
this.populateVoiceList();
|
||||
}
|
||||
|
||||
getDefaultVoice() {
|
||||
return "Aria";
|
||||
}
|
||||
|
||||
getSpeechFile(text, filepath, voice = this.getDefaultVoice(), params = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const speechConfig = sdk.SpeechConfig.fromSubscription(process.env.AZURE_API_KEY, process.env.AZURE_REGION);
|
||||
speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm;
|
||||
speechConfig.speechSynthesisVoiceName = this.voices[voice];
|
||||
const audioConfig = sdk.AudioConfig.fromAudioFileOutput(filepath);
|
||||
const synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
|
||||
synthesizer.speakTextAsync(text,
|
||||
result => {
|
||||
synthesizer.close();
|
||||
if (result) {
|
||||
// return result as stream
|
||||
resolve(filepath);
|
||||
}
|
||||
},
|
||||
error => {
|
||||
console.log(error);
|
||||
synthesizer.close();
|
||||
reject(error);
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
async populateVoiceList() {
|
||||
const opts = {
|
||||
headers: {
|
||||
'Ocp-Apim-Subscription-Key': process.env.AZURE_API_KEY
|
||||
}
|
||||
}
|
||||
const res = await fetch(process.env.AZURE_LIST_ENDPOINT, opts);
|
||||
const json = await res.json();
|
||||
json.forEach((voice) => {
|
||||
if (this.voices[voice.DisplayName.toLowerCase()]) {
|
||||
if (voice.Name.includes('Neural')) {
|
||||
this.voices[voice.DisplayName.toLowerCase()] = voice.ShortName;
|
||||
}
|
||||
} else {
|
||||
this.voices[voice.DisplayName.toLowerCase()] = voice.ShortName;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
const BaseEngine = require('../BaseEngine');
|
||||
const fetch = require('node-fetch');
|
||||
const querystring = require('querystring');
|
||||
|
||||
module.exports = class extends BaseEngine {
|
||||
constructor() {
|
||||
super('eleven',"Eleven Labs TTS", "mp3");
|
||||
this.voices = {};
|
||||
this.populateVoiceList();
|
||||
}
|
||||
async populateVoiceList() {
|
||||
const url = "https://api.elevenlabs.io/v1/voices";
|
||||
const authorization = process.env.XI_API_KEY;
|
||||
const opts = {
|
||||
method: "get",
|
||||
headers: {
|
||||
'xi-api-key': authorization
|
||||
},
|
||||
}
|
||||
const res = await fetch(url, opts);
|
||||
const voices = await res.json();
|
||||
voices.voices.forEach((i) => {
|
||||
let voiceName = i.name.toLowerCase();
|
||||
this.voices[voiceName] = i.voice_id;
|
||||
});
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'Guillem';
|
||||
}
|
||||
async getSpeech(text, voice = this.getSpeechVoice(), params = {}) {
|
||||
const url = "https://api.elevenlabs.io/v1/text-to-speech/" + this.getInternalVoiceName(voice);
|
||||
const authorization = process.env.XI_API_KEY;
|
||||
const opts = {
|
||||
method: "post",
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'voice_id': this.getInternalVoiceName(voice),
|
||||
'xi-api-key': authorization
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model_id: 'eleven_multilingual_v2',
|
||||
text: text
|
||||
})
|
||||
};
|
||||
return fetch(url, opts);
|
||||
}
|
||||
};
|
|
@ -3,12 +3,9 @@ const { spawn } = require('child_process')
|
|||
|
||||
module.exports=class extends BaseEngine {
|
||||
constructor() {
|
||||
super('espeak','ESpeak', 'wav')
|
||||
super('ESpeak','wav')
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'en';
|
||||
}
|
||||
async getSpeechFile(text, filepath, voice = this.getDefaultVoice(), params = {}) {
|
||||
async getSpeechFile(text, filepath, voice='en', params={}) {
|
||||
let proc=await spawn('espeak', ['-v', voice, '-w',filepath, '--stdin']);
|
||||
proc.stdin.end(text);
|
||||
}
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
const BaseEngine = require('../BaseEngine')
|
||||
const sdk = require('@google-cloud/text-to-speech');
|
||||
const fs = require('fs');
|
||||
const util = require('util');
|
||||
|
||||
module.exports = class GoogleCloudTTS extends BaseEngine {
|
||||
constructor() {
|
||||
super('google', 'Google Cloud TTS', 'wav');
|
||||
this.client = new sdk.TextToSpeechClient();
|
||||
this.client.initialize();
|
||||
this.voices = {};
|
||||
this.populateVoiceList();
|
||||
}
|
||||
async populateVoiceList() {
|
||||
const [result] = await this.client.listVoices({});
|
||||
const voiceList = result.voices;
|
||||
voiceList.forEach((voice) => {
|
||||
|
||||
this.voices[voice.name.toLowerCase()] = { name: voice.name, lang: voice.languageCodes[0] };
|
||||
});
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'en-US-Wavenet-A';
|
||||
}
|
||||
async getSpeechFile(text, filepath, voice = this.getDefaultVoice(), params = {}) {
|
||||
const request = {
|
||||
input: { text: text },
|
||||
voice: { name: this.voices[voice].name, languageCode: this.voices[voice].lang },
|
||||
audioConfig: { audioEncoding: 'LINEAR16' },
|
||||
};
|
||||
let [response] = await this.client.synthesizeSpeech(request);
|
||||
const writeFile = util.promisify(fs.writeFile);
|
||||
await writeFile(filepath, response.audioContent, 'binary');
|
||||
return filepath;
|
||||
}
|
||||
}
|
|
@ -4,12 +4,9 @@ const tts = require('google-tts-api');
|
|||
|
||||
module.exports= class extends BaseEngine {
|
||||
constructor() {
|
||||
super('gtranslate', "Google Translate TTS", "mp3");
|
||||
super("Google Translate TTS","mp3");
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'en-us';
|
||||
}
|
||||
async getSpeech(text, voice = this.getDefaultVoice(), params = {}) {
|
||||
async getSpeech(text, voice='en-us', params={}) {
|
||||
const url = tts.getAudioUrl(text, {lang: voice});
|
||||
return fetch(url);
|
||||
}
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
const BaseEngine = require('../BaseEngine');
|
||||
const fetch = require('node-fetch');
|
||||
const tts = require('google-tts-api');
|
||||
|
||||
module.exports = class extends BaseEngine {
|
||||
constructor() {
|
||||
super('openai', "OpenAI TTS", "mp3");
|
||||
this.voices = {
|
||||
alloy: "alloy",
|
||||
echo: "echo",
|
||||
fable: "fable",
|
||||
onyx: "onyx",
|
||||
nova: "nova",
|
||||
shimmer: "shimmer"
|
||||
}
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'Alloy';
|
||||
}
|
||||
async getSpeech(text, voice = this.getDefaultVoice(), params = {}) {
|
||||
const url = `https://api.openai.com/v1/audio/speech`;
|
||||
const opts = {
|
||||
method: "post",
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${process.env["OPENAI_API_KEY"]}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: "tts-1-hd",
|
||||
input: text,
|
||||
voice: voice,
|
||||
})
|
||||
}
|
||||
console.log(opts);
|
||||
return fetch(url, opts);
|
||||
}
|
||||
};
|
|
@ -1,25 +0,0 @@
|
|||
const BaseEngine = require('../BaseEngine')
|
||||
const Sam = require('sam-js');
|
||||
const wavefile = require('wavefile');
|
||||
const fs = require('fs');
|
||||
|
||||
module.exports = class extends BaseEngine {
|
||||
constructor() {
|
||||
super('sam', 'Software Automatic Mouth', 'wav')
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'sam';
|
||||
}
|
||||
async getSpeechFile(text, filepath, voice = this.getDefaultVoice(), params = {}) {
|
||||
let sam = new Sam();
|
||||
let phonetic = false;
|
||||
if (text[0] == "$") {
|
||||
text = text.slice(1);
|
||||
phonetic = true;
|
||||
}
|
||||
const buf = sam.buf8(text, phonetic);
|
||||
const file = new wavefile.WaveFile();
|
||||
file.fromScratch(1, 22050, 8, buf);
|
||||
fs.writeFileSync(filepath, file.toBuffer());
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
const BaseEngine = require('../BaseEngine');
|
||||
const fetch = require('node-fetch');
|
||||
const querystring = require('querystring');
|
||||
const fs = require("fs");
|
||||
|
||||
module.exports = class extends BaseEngine {
|
||||
constructor() {
|
||||
super('unreal', "Unreal Speech TTS", "mp3");
|
||||
this.voices = {
|
||||
scarlett: 'Scarlett',
|
||||
liv: 'Liv',
|
||||
dan: 'Dan',
|
||||
will: 'Will',
|
||||
amy: 'Amy'
|
||||
};
|
||||
}
|
||||
|
||||
getDefaultVoice() {
|
||||
return 'Liv';
|
||||
}
|
||||
|
||||
async getSpeechFile(text, filepath, voice = this.getDefaultVoice(), params = {}) {
|
||||
const url = "https://api.v6.unrealspeech.com/speech";
|
||||
const authorization = process.env.UNREAL_API_KEY;
|
||||
const opts = {
|
||||
method: "post",
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${authorization}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
Bitrate: "320k",
|
||||
Temperature: 0.1,
|
||||
VoiceId: this.getInternalVoiceName(voice),
|
||||
Text: text,
|
||||
AudioFormat: "mp3"
|
||||
})
|
||||
};
|
||||
const res = await fetch(url, opts);
|
||||
const json = await res.json();
|
||||
const data = await fetch(json.OutputUri);
|
||||
const contents = await data.arrayBuffer();
|
||||
const buf = Buffer.from(contents);
|
||||
fs.writeFileSync(filepath, buf);
|
||||
return filepath;
|
||||
}
|
||||
};
|
|
@ -4,37 +4,13 @@ const querystring = require('querystring');
|
|||
|
||||
module.exports= class extends BaseEngine {
|
||||
constructor() {
|
||||
super('watson',"IBM Watson TTS", "ogg");
|
||||
this.voices = {};
|
||||
this.populateVoiceList();
|
||||
super("IBM Watson TTS","ogg");
|
||||
}
|
||||
async populateVoiceList() {
|
||||
const url = process.env.watsonURL + "/v1/voices";
|
||||
const authorization = this.IBMAuthString();
|
||||
const opts = {
|
||||
method: "get",
|
||||
headers: {
|
||||
'Authorization': authorization
|
||||
},
|
||||
}
|
||||
const res = await fetch(url, opts);
|
||||
const voices = await res.json();
|
||||
voices.voices.forEach((i) => {
|
||||
let voiceName = i.description.substring(0, i.description.indexOf(':')).toLowerCase();
|
||||
this.voices[voiceName] = i.name;
|
||||
});
|
||||
}
|
||||
getDefaultVoice() {
|
||||
return 'Michael';
|
||||
}
|
||||
IBMAuthString() {
|
||||
async getSpeech(text, voice='en-us', params={}) {
|
||||
const url = process.env.watsonURL+"/v1/synthesize";
|
||||
let buff=new Buffer('apikey:'+process.env.watsonAPIKey);
|
||||
let b64auth=buff.toString('base64');
|
||||
return 'Basic ' + b64auth;
|
||||
}
|
||||
async getSpeech(text, voice = this.getSpeechVoice(), params = {}) {
|
||||
const url = process.env.watsonURL + "/v1/synthesize?voice=" + this.getInternalVoiceName(voice);
|
||||
const authorization = this.IBMAuthString();
|
||||
const authorization='Basic '+b64auth;
|
||||
const opts={
|
||||
method: "post",
|
||||
headers: {
|
||||
|
|
Loading…
Reference in New Issue