Merge remote-tracking branch 'upstream/main'

pull/22/head
Alessandro Annini 2023-04-15 10:01:19 +02:00
commit 05803e8592
6 zmienionych plików z 90 dodań i 35 usunięć

2
.env.sample 100644
Wyświetl plik

@ -0,0 +1,2 @@
OPENAI_API_KEY=your_api_key
#DEFAULT_MODEL=gpt-3.5-turbo

5
.gitignore vendored
Wyświetl plik

@ -1,2 +1,5 @@
venv
openai_key.txt
.venv
.env
env/
.vscode/

Wyświetl plik

@ -13,8 +13,11 @@ For a quick demonstration see my [demo video on twitter](https://twitter.com/bio
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
cp .env.sample .env
Add your openAI api key to `openai_key.txt` - _warning!_ by default this uses GPT-4 and may make many repeated calls to the api.
Add your openAI api key to `.env`
_warning!_ By default wolverine uses GPT-4 and may make many repeated calls to the api.
## Example Usage
@ -26,13 +29,21 @@ You can also run with other models, but be warned they may not adhere to the edi
python wolverine.py --model=gpt-3.5-turbo buggy_script.py "subtract" 20 3
If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default model line in `.env`:
DEFAULT_MODEL=gpt-3.5-turbo
## Future Plans
This is just a quick prototype I threw together in a few hours. There are many possible extensions and contributions are welcome:
- add flags to customize usage, such as asking for user confirmation before running changed code
- further iterations on the edit format that GPT responds in. Currently it struggles a bit with indentation, but I'm sure that can be improved
- a suite of example buggy files that we can test prompts on to ensure reliablity and measure improvement
- a suite of example buggy files that we can test prompts on to ensure reliability and measure improvement
- multiple files / codebases: send GPT everything that appears in the stacktrace
- graceful handling of large files - should we just send GPT relevant classes / functions?
- extension to languages other than python
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=biobootloader/wolverine&type=Date)](https://star-history.com/#biobootloader/wolverine)

Wyświetl plik

@ -4,10 +4,13 @@ Because you are part of an automated system, the format you respond in is very s
In addition to the changes, please also provide short explanations of the what went wrong. A single explanation is required, but if you think it's helpful, feel free to provide more explanations for groups of more complicated changes. Be careful to use proper indentation and spacing in your changes. An example response could be:
Be ABSOLUTELY SURE to include the CORRECT INDENTATION when making replacements.
example response:
[
{"explanation": "this is just an example, this would usually be a brief explanation of what went wrong"},
{"operation": "InsertAfter", "line": 10, "content": "x = 1\ny = 2\nz = x * y"},
{"operation": "Delete", "line": 15, "content": ""},
{"operation": "Replace", "line": 18, "content": "x += 1"},
{"operation": "Replace", "line": 18, "content": " x += 1"},
{"operation": "Delete", "line": 20, "content": ""}
]

Wyświetl plik

@ -13,6 +13,7 @@ multidict==6.0.4
openai==0.27.2
pycodestyle==2.10.0
pyflakes==3.0.1
python-dotenv==1.0.0
requests==2.28.2
six==1.16.0
termcolor==2.2.0

Wyświetl plik

@ -5,13 +5,20 @@ import os
import shutil
import subprocess
import sys
import openai
from termcolor import cprint
from dotenv import load_dotenv
# Set up the OpenAI API
with open("openai_key.txt") as f:
openai.api_key = f.read().strip()
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4")
with open("prompt.txt") as f:
SYSTEM_PROMPT = f.read()
def run_script(script_name, script_args):
@ -25,18 +32,49 @@ def run_script(script_name, script_args):
return result.decode("utf-8"), 0
def run_node(script_name, script_args):
script_args = [str(arg) for arg in script_args]
def json_validated_response(model, messages):
"""
This function is needed because the API can return a non-json response.
This will run recursively until a valid json response is returned.
todo: might want to stop after a certain number of retries
"""
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.5,
)
messages.append(response.choices[0].message)
content = response.choices[0].message.content
# see if json can be parsed
try:
result = subprocess.check_output(
["node", script_name, *script_args], stderr=subprocess.STDOUT
json_start_index = content.index(
"["
) # find the starting position of the JSON data
json_data = content[
json_start_index:
] # extract the JSON data from the response string
json_response = json.loads(json_data)
except (json.decoder.JSONDecodeError, ValueError) as e:
cprint(f"{e}. Re-running the query.", "red")
# debug
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
# append a user message that says the json is invalid
messages.append(
{
"role": "user",
"content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.",
}
)
except subprocess.CalledProcessError as e:
return e.output.decode("utf-8"), e.returncode
return result.decode("utf-8"), 0
# rerun the api call
return json_validated_response(model, messages)
except Exception as e:
cprint(f"Unknown error: {e}", "red")
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
raise e
return json_response
def send_error_to_gpt(file_path, args, error_message, model):
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
with open(file_path, "r") as f:
file_lines = f.readlines()
@ -45,11 +83,7 @@ def send_error_to_gpt(file_path, args, error_message, model):
file_with_lines.append(str(i + 1) + ": " + line)
file_with_lines = "".join(file_with_lines)
with open("prompt.txt") as f:
initial_prompt_text = f.read()
prompt = (
initial_prompt_text + "\n\n"
"Here is the script that needs fixing:\n\n"
f"{file_with_lines}\n\n"
"Here are the arguments it was provided:\n\n"
@ -61,27 +95,27 @@ def send_error_to_gpt(file_path, args, error_message, model):
)
# print(prompt)
messages = [
{
"role": "system",
"content": SYSTEM_PROMPT,
},
{
"role": "user",
"content": prompt,
},
]
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "user",
"content": prompt,
}
],
temperature=1.0,
)
return response.choices[0].message.content.strip()
return json_validated_response(model, messages)
def apply_changes(file_path, changes_json):
def apply_changes(file_path, changes: list):
"""
Pass changes as loaded json (list of dicts)
"""
with open(file_path, "r") as f:
original_file_lines = f.readlines()
changes = json.loads(changes_json)
# Filter out explanation elements
operation_changes = [change for change in changes if "operation" in change]
explanations = [
@ -124,7 +158,7 @@ def apply_changes(file_path, changes_json):
print(line, end="")
def main(script_name, *script_args, revert=False, model="gpt-3.5-turbo"):
def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL):
if revert:
backup_file = script_name + ".bak"
if os.path.exists(backup_file):
@ -160,6 +194,7 @@ def main(script_name, *script_args, revert=False, model="gpt-3.5-turbo"):
error_message=output,
model=model,
)
apply_changes(script_name, json_response)
cprint("Changes applied. Rerunning...", "blue")