[falcon.py] big test (run_in_executor with job.submit INSIDE)
Browse files
falcon.py
CHANGED
|
@@ -16,6 +16,39 @@ falcon_client = Client("HuggingFaceH4/falcon-chat", HF_TOKEN)
|
|
| 16 |
BOT_USER_ID = 1086256910572986469 if os.getenv("TEST_ENV", False) else 1102236653545861151
|
| 17 |
FALCON_CHANNEL_ID = 1079459939405279232 if os.getenv("TEST_ENV", False) else 1119313248056004729
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
async def try_falcon(interaction, prompt):
|
| 20 |
"""Generates text based on a given prompt"""
|
| 21 |
try:
|
|
@@ -35,10 +68,17 @@ async def try_falcon(interaction, prompt):
|
|
| 35 |
thread = await message.create_thread(name=f'{prompt}', auto_archive_duration=60) # interaction.user
|
| 36 |
await thread.send(f"[DISCLAIMER: HuggingBot is a **highly experimental** beta feature; The Falcon " \
|
| 37 |
f"model and system prompt can be found here: https://huggingface.co/spaces/HuggingFaceH4/falcon-chat]")
|
| 38 |
-
|
| 39 |
-
|
|
|
|
| 40 |
if os.environ.get('TEST_ENV') == 'True':
|
| 41 |
-
print("Running
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
job = falcon_client.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking, similar to run_in_executor (but better)
|
| 43 |
while job.done() is False:
|
| 44 |
pass
|
|
@@ -49,15 +89,9 @@ async def try_falcon(interaction, prompt):
|
|
| 49 |
full_generation = file_paths[-1]
|
| 50 |
with open(full_generation, 'r') as file:
|
| 51 |
data = json.load(file)
|
| 52 |
-
output_text = data[-1][-1]
|
|
|
|
| 53 |
|
| 54 |
-
threadid_conversation[thread.id] = full_generation
|
| 55 |
-
falcon_userid_threadid_dictionary[thread.id] = interaction.user.id
|
| 56 |
-
print(output_text)
|
| 57 |
-
await thread.send(f"{output_text}")
|
| 58 |
-
|
| 59 |
-
except Exception as e:
|
| 60 |
-
print(f"Error: {e}")
|
| 61 |
|
| 62 |
async def continue_falcon(message):
|
| 63 |
"""Continues a given conversation based on chathistory"""
|
|
|
|
| 16 |
BOT_USER_ID = 1086256910572986469 if os.getenv("TEST_ENV", False) else 1102236653545861151
|
| 17 |
FALCON_CHANNEL_ID = 1079459939405279232 if os.getenv("TEST_ENV", False) else 1119313248056004729
|
| 18 |
|
| 19 |
+
|
| 20 |
+
def falcon_test(prompt, instructions):
|
| 21 |
+
global falcon_userid_threadid_dictionary
|
| 22 |
+
global threadid_conversation
|
| 23 |
+
|
| 24 |
+
# setup
|
| 25 |
+
job = falcon_client.submit(fn_index=5)
|
| 26 |
+
while job.done() is False:
|
| 27 |
+
pass
|
| 28 |
+
else:
|
| 29 |
+
file_path = job.outputs()
|
| 30 |
+
with open(file_path, 'r') as file:
|
| 31 |
+
chathistory = json.load(file)
|
| 32 |
+
|
| 33 |
+
# text generation
|
| 34 |
+
job = falcon_client.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1)
|
| 35 |
+
while job.done() is False:
|
| 36 |
+
pass
|
| 37 |
+
else:
|
| 38 |
+
if os.environ.get('TEST_ENV') == 'True':
|
| 39 |
+
print("falcon text gen job done")
|
| 40 |
+
file_paths = job.outputs()
|
| 41 |
+
full_generation = file_paths[-1]
|
| 42 |
+
with open(full_generation, 'r') as file:
|
| 43 |
+
data = json.load(file)
|
| 44 |
+
output_text = data[-1][-1]
|
| 45 |
+
|
| 46 |
+
threadid_conversation[thread.id] = full_generation
|
| 47 |
+
falcon_userid_threadid_dictionary[thread.id] = interaction.user.id
|
| 48 |
+
print(output_text)
|
| 49 |
+
return output_text
|
| 50 |
+
|
| 51 |
+
|
| 52 |
async def try_falcon(interaction, prompt):
|
| 53 |
"""Generates text based on a given prompt"""
|
| 54 |
try:
|
|
|
|
| 68 |
thread = await message.create_thread(name=f'{prompt}', auto_archive_duration=60) # interaction.user
|
| 69 |
await thread.send(f"[DISCLAIMER: HuggingBot is a **highly experimental** beta feature; The Falcon " \
|
| 70 |
f"model and system prompt can be found here: https://huggingface.co/spaces/HuggingFaceH4/falcon-chat]")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
if os.environ.get('TEST_ENV') == 'True':
|
| 75 |
+
print("Running falcon run_in_executor... (part 1)")
|
| 76 |
+
loop = asyncio.get_running_loop()
|
| 77 |
+
result = await loop.run_in_executor(None, falcon_test, prompt, instructions)
|
| 78 |
+
|
| 79 |
+
await thread.send(f"{output_text}")
|
| 80 |
+
|
| 81 |
+
'''
|
| 82 |
job = falcon_client.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking, similar to run_in_executor (but better)
|
| 83 |
while job.done() is False:
|
| 84 |
pass
|
|
|
|
| 89 |
full_generation = file_paths[-1]
|
| 90 |
with open(full_generation, 'r') as file:
|
| 91 |
data = json.load(file)
|
| 92 |
+
output_text = data[-1][-1]
|
| 93 |
+
'''
|
| 94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
async def continue_falcon(message):
|
| 97 |
"""Continues a given conversation based on chathistory"""
|