forked from All-Hands-AI/OpenHands
-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathllm_debugger.py
93 lines (81 loc) · 2.66 KB
/
llm_debugger.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#! /usr/bin/env python3.11
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import litellm
import toml
number = 1
model = 'gemini_pro'
model = 'gemini_flash'
model = 'groq'
model = 'ollama'
with open('evaluation/benchmarks/swe_bench/config.toml', 'r') as f:
environ = f.read()
config = toml.loads(environ)
selection_id = config['selected_ids'][0].split('-')[-1]
folder = f'{model}_{selection_id}'
prompt = f'logs/llm/{folder}/{number:03d}_prompt.log'
response = f'logs/llm/{folder}/{number:03d}_response.log'
with open(prompt, 'r') as file:
prompt_content = file.read()
with open(response, 'r') as file:
response_content = file.read()
config = 'config.toml'
with open(config, 'r') as file:
config_content = toml.load(file)['llm']
eval = 1
if eval:
config_content = config_content[model]
model = config_content['model']
api_key = config_content.get('api_key')
base_url = config_content.get('base_url')
question = 'Why did you use insert content before line 60?'
question = 'Why are you searching for header_rows?'
question = 'Why did you search for header_rows in ui.py?'
question = '''
why searched for Myclass?
'''
inst = '\n\nJust tell only the reason for your action.'
# inst = f'give analysis; then give Step {number}: and produce the solution along with thought process.'
question += inst
new_prompt = f"""
INITIAL PROMPT:
{prompt_content}
INITIAL RESPONSE:
{response_content}
DEBUGGER:
{question}
"""
messages = [
{
'role': 'system',
'content': 'You are the assistant. Your responses are wrong. The debugger will ask you questions and provide you with the initial prompt abd initial response. Answer the questions and provide the corrected response.',
},
{'role': 'user', 'content': new_prompt},
]
while True:
response = litellm.completion(
model=model,
messages=messages,
api_key=api_key,
base_url=base_url,
)
resp = response['choices'][0]['message']['content']
print(resp)
question = input('> ')
if question == 'q':
with open(prompt, 'r') as file:
prompt_content = file.read()
response = litellm.completion(
model=model,
messages=[{'role': 'user', 'content': prompt_content}],
api_key=api_key,
base_url=base_url,
)
resp = response['choices'][0]['message']['content']
print(resp)
break
messages.append({'role': 'assistant', 'content': 'Assistant: ' + resp})
messages.append({'role': 'user', 'content': 'User: ' + question})
inst = 'Reply in one line.'
messages.append({'role': 'system', 'content': 'System: ' + inst})