Spaces:
Sleeping
Sleeping
Aleksey Khlopkov
commited on
Commit
·
8730736
1
Parent(s):
99bafa0
gifs, re, clear old msg
Browse files
main.py
CHANGED
|
@@ -1,4 +1,7 @@
|
|
|
|
|
| 1 |
from typing import Final
|
|
|
|
|
|
|
| 2 |
from telegram import Update
|
| 3 |
from telegram.ext import Application, MessageHandler, filters, ContextTypes
|
| 4 |
from typing import Optional
|
|
@@ -16,6 +19,14 @@ CHAT_ID: Final = int(os.environ.get("CHAT_ID"))
|
|
| 16 |
|
| 17 |
CHECKPOINT_PATH: Final = "models/seq2seq/checkpoint/150_checkpoint.tar"
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
torch.manual_seed(0)
|
| 20 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 21 |
|
|
@@ -25,17 +36,39 @@ chatbot.eval_mode()
|
|
| 25 |
|
| 26 |
|
| 27 |
def handle_response(text: str) -> Optional[str]:
|
| 28 |
-
response_chance =
|
| 29 |
if random.random() < response_chance:
|
| 30 |
return chatbot(text)
|
| 31 |
return None
|
| 32 |
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
| 35 |
if update.message.chat_id == CHAT_ID:
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
await context.bot.sendMessage(update.message.chat_id, response, reply_to_message_id=update.message.id)
|
| 40 |
|
| 41 |
|
|
@@ -47,6 +80,8 @@ async def error(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|
| 47 |
|
| 48 |
def main() -> None:
|
| 49 |
"""Run the bot."""
|
|
|
|
|
|
|
| 50 |
application = Application.builder().token(TOKEN).build()
|
| 51 |
|
| 52 |
application.add_handler(MessageHandler(filters.TEXT, handle_message))
|
|
|
|
| 1 |
+
import time
|
| 2 |
from typing import Final
|
| 3 |
+
import requests
|
| 4 |
+
import re
|
| 5 |
from telegram import Update
|
| 6 |
from telegram.ext import Application, MessageHandler, filters, ContextTypes
|
| 7 |
from typing import Optional
|
|
|
|
| 19 |
|
| 20 |
CHECKPOINT_PATH: Final = "models/seq2seq/checkpoint/150_checkpoint.tar"
|
| 21 |
|
| 22 |
+
romantiki_gif_id = "CgACAgIAAxkBAAE4zMlojLmMwqrxG5e2rnYS2f9_PZZgVwACL2oAAjbWyUqiyR5II6u6YDYE"
|
| 23 |
+
bezumtsi_gif_id = "CgACAgIAAxkBAAE4zMtojLmiH_CGW5cT7G0QVXHR7D4g6wAC53UAApkBmEmM-VxqunRc6zYE"
|
| 24 |
+
|
| 25 |
+
last_maxim_insult = 1.0
|
| 26 |
+
last_gif_sent = 1.0
|
| 27 |
+
maxim_insult_cooldown = 5.0
|
| 28 |
+
gif_sent_cooldown = 5.0
|
| 29 |
+
|
| 30 |
torch.manual_seed(0)
|
| 31 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 32 |
|
|
|
|
| 36 |
|
| 37 |
|
| 38 |
def handle_response(text: str) -> Optional[str]:
|
| 39 |
+
response_chance = 0.02
|
| 40 |
if random.random() < response_chance:
|
| 41 |
return chatbot(text)
|
| 42 |
return None
|
| 43 |
|
| 44 |
|
| 45 |
+
def edit_response(text: str) -> str:
|
| 46 |
+
text = re.sub(r'\s+([,.!?;])\s+', r'\1 ', text)
|
| 47 |
+
|
| 48 |
+
return text
|
| 49 |
+
|
| 50 |
+
|
| 51 |
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
| 52 |
if update.message.chat_id == CHAT_ID:
|
| 53 |
+
response: Optional[str] = ""
|
| 54 |
+
global last_maxim_insult, last_gif_sent
|
| 55 |
+
if update.message.from_user.username == "WhoReadThisWillDie" and \
|
| 56 |
+
time.time() - last_maxim_insult >= maxim_insult_cooldown:
|
| 57 |
+
response = "Максим, иди нахуй"
|
| 58 |
+
last_maxim_insult = time.time()
|
| 59 |
+
elif "роман" in update.message.text.lower() and \
|
| 60 |
+
time.time() - last_gif_sent >= gif_sent_cooldown:
|
| 61 |
+
await context.bot.send_animation( chat_id=update.message.chat_id, animation=romantiki_gif_id)
|
| 62 |
+
last_gif_sent = time.time()
|
| 63 |
+
elif "безу" in update.message.text.lower() and \
|
| 64 |
+
time.time() - last_gif_sent >= gif_sent_cooldown:
|
| 65 |
+
await context.bot.send_animation(chat_id=update.message.chat_id, animation=bezumtsi_gif_id)
|
| 66 |
+
last_gif_sent = time.time()
|
| 67 |
+
else:
|
| 68 |
+
text = update.message.text.replace(BOT_USERNAME, '').strip().lower()
|
| 69 |
+
response = edit_response(handle_response(text))
|
| 70 |
+
|
| 71 |
+
if response != "":
|
| 72 |
await context.bot.sendMessage(update.message.chat_id, response, reply_to_message_id=update.message.id)
|
| 73 |
|
| 74 |
|
|
|
|
| 80 |
|
| 81 |
def main() -> None:
|
| 82 |
"""Run the bot."""
|
| 83 |
+
requests.post(f"https://api.telegram.org/bot{TOKEN}/getUpdates?offset=-1")
|
| 84 |
+
|
| 85 |
application = Application.builder().token(TOKEN).build()
|
| 86 |
|
| 87 |
application.add_handler(MessageHandler(filters.TEXT, handle_message))
|