Commit 845e8ac5 by Mingju

finish apis

parent 9fc844ec
import aiohttp
from chroma_memory_stream import MemoryStream
import chroma_retrieve
import run_gpt
api_file = 'key.txt'
def get_key(api_key_f):
with open(api_key_f, 'r') as f:
key = f.read().strip()
return key
def run_gpt_event_poignancy_prompt(api_key_f, record):
raise NotImplementedError
key = get_key(api_key_f)
for _ in range(10):
try:
return int(run_gpt.gpt_request(key, record, 'prompt/poignancy_event.txt'))
except:
pass
def run_gpt_chat_poignancy_prompt(api_key_f, chat_history):
raise NotImplementedError
key = get_key(api_key_f)
for _ in range(10):
try:
return int(run_gpt.gpt_request(key, chat_history, 'prompt/poignancy_chat.txt'))
except:
pass
def run_gpt_skill_poignancy_prompt(api_key_f, skill_desc):
raise NotImplementedError
key = get_key(api_key_f)
for _ in range(10):
try:
return int(run_gpt.gpt_request(key, skill_desc, 'prompt/poignancy_skill.txt'))
except:
pass
def run_gpt_summary_event_prompt(api_key_f, record):
raise NotImplementedError
key = get_key(api_key_f)
return run_gpt.gpt_request(key, record, 'prompt/summary_event.txt')
def run_gpt_summary_chat_prompt(api_key_f, chat_history):
raise NotImplementedError
key = get_key(api_key_f)
return run_gpt.gpt_request(key, chat_history, 'prompt/summary_chat.txt')
def parse_observations(observation:dict):
description = ''
......@@ -63,7 +87,7 @@ class MemoryAPIs(object):
description = chat_history
code = 'NULL'
poignancy = run_gpt_chat_poignancy_prompt(api_key_f=api_file, chat_history=description)
poignancy = poignancy if poignancy is not None else 4
self.memory_stream.add_record_in_mem(r_type, description, code,
summary, now_time, poignancy)
......@@ -75,7 +99,7 @@ class MemoryAPIs(object):
r_type = 'event'
code = 'NULL'
poignancy = run_gpt_event_poignancy_prompt(api_key_f=api_file, record=description)
poignancy = poignancy if poignancy is not None else 4
self.memory_stream.add_record_in_mem(r_type, description, code,
summary, now_time, poignancy)
pass
......@@ -90,7 +114,7 @@ class MemoryAPIs(object):
code = skill_info['code']
summary = 'NULL'
poignancy = run_gpt_skill_poignancy_prompt(api_key_f=api_file, skill_desc=description)
poignancy = poignancy if poignancy is not None else 4
self.memory_stream.add_record_in_mem(r_type, description, code,
summary, now_time, poignancy)
......
On the scale of 1 to 10, where 1 is purely mundane (e.g., routine greetings) and 10 is extremely poignant (e.g., a conversation about how to dig a diamond), rate the likely poignancy of the following conversation.
Conversation:
!<INPUT 0>!
The output should ONLY contain ONE integer value on the scale of 1 to 10.
Rate (return a number between 1 to 10):
\ No newline at end of file
On the scale of 1 to 10, where 1 is purely mundane (e.g., Remain still) and 10 is extremely poignant (e.g., find a diamand, face a zombie), rate the likely poignancy of the following skill.
Event:
!<INPUT 0>!
The output should ONLY contain ONE integer value on the scale of 1 to 10.
Rate (return a number between 1 to 10):
\ No newline at end of file
On the scale of 1 to 10, where 1 is rarely used and 10 is extremely poignant (e.g., mining, make a specific tool), rate the likely poignancy of the following skill.
Skill:
!<INPUT 0>!
The output should ONLY contain ONE integer value on the scale of 1 to 10.
Rate (return a number between 1 to 10):
\ No newline at end of file
Conversation:
!<INPUT 0>!
Summarize the conversation above in a few sentences:
This is a conversation about
\ No newline at end of file
Event:
!<INPUT 0>!
Summarize the Event in a few sentences:
This is a event about
\ No newline at end of file
import openai
import json
def generate_prompt(curr_input, prompt_lib_file):
"""
Takes in the current input (e.g. comment that you want to classifiy) and
the path to a prompt file. The prompt file contains the raw str prompt that
will be used, which contains the following substr: !<INPUT>! -- this
function replaces this substr with the actual curr_input to produce the
final promopt that will be sent to the GPT3 server.
ARGS:
curr_input: the input we want to feed in (IF THERE ARE MORE THAN ONE
INPUT, THIS CAN BE A LIST.)
prompt_lib_file: the path to the promopt file.
RETURNS:
a str prompt that will be sent to OpenAI's GPT server.
"""
if type(curr_input) == type("string"):
curr_input = [curr_input]
curr_input = [str(i) for i in curr_input]
f = open(prompt_lib_file, "r")
prompt = f.read()
f.close()
for count, i in enumerate(curr_input):
prompt = prompt.replace(f"!<INPUT {count}>!", i)
if "<commentblockmarker>###</commentblockmarker>" in prompt:
prompt = prompt.split("<commentblockmarker>###</commentblockmarker>")[1]
return prompt.strip()
def gpt_request(key, curr_input, prompt_temp_file):
prompt = generate_prompt(curr_input, prompt_temp_file)
pass
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment