kopia lustrzana https://github.com/biobootloader/wolverine
Merge branch 'main' into add_unit_tests
commit
fb6466a810
|
@ -23,7 +23,7 @@ _warning!_ By default wolverine uses GPT-4 and may make many repeated calls to t
|
|||
|
||||
To run with gpt-4 (the default, tested option):
|
||||
|
||||
python wolverine.py buggy_script.py "subtract" 20 3
|
||||
python wolverine.py examples/buggy_script.py "subtract" 20 3
|
||||
|
||||
You can also run with other models, but be warned they may not adhere to the edit format as well:
|
||||
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
import sys
|
||||
import fire
|
||||
"""
|
||||
Run With: `wolverine examples/buggy_script.py "subtract" 20 3`
|
||||
Purpose: Show self-regenerating fixing of subtraction operator
|
||||
"""
|
||||
|
||||
def add_numbers(a, b):
|
||||
return a + b
|
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env python3
|
||||
import fire
|
||||
|
||||
"""
|
||||
Run With: with `python wolverine.py examples/buggy_script_2.py`
|
||||
Purpose: Fix singleton code bug in Python
|
||||
"""
|
||||
|
||||
class SingletonClass(object):
|
||||
def __new__(cls):
|
||||
cls.instance = super(SingletonClass, cls).__new__(cls)
|
||||
return cls.instance
|
||||
|
||||
def check_singleton_works():
|
||||
"""
|
||||
check that singleton pattern is working
|
||||
"""
|
||||
singleton = SingletonClass()
|
||||
new_singleton = SingletonClass()
|
||||
singleton.a = 1
|
||||
new_singleton.a = 2
|
||||
should_be_4 = (singleton.a + new_singleton.a)
|
||||
assert should_be_4 == 4
|
||||
|
||||
if __name__=="__main__":
|
||||
fire.Fire(check_singleton_works)
|
||||
|
51
wolverine.py
51
wolverine.py
|
@ -20,7 +20,6 @@ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4")
|
|||
with open("prompt.txt") as f:
|
||||
SYSTEM_PROMPT = f.read()
|
||||
|
||||
|
||||
def run_script(script_name, script_args):
|
||||
script_args = [str(arg) for arg in script_args]
|
||||
"""
|
||||
|
@ -70,7 +69,10 @@ def json_validated_response(model, messages):
|
|||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.",
|
||||
"content": (
|
||||
"Your response could not be parsed by json.loads. "
|
||||
"Please restate your last message as pure JSON."
|
||||
),
|
||||
}
|
||||
)
|
||||
# rerun the api call
|
||||
|
@ -146,9 +148,13 @@ def apply_changes(file_path, changes: list, confirm=False):
|
|||
elif operation == "InsertAfter":
|
||||
file_lines.insert(line, content + "\n")
|
||||
|
||||
# Ask for user confirmation before writing changes
|
||||
print("\nChanges to be made:")
|
||||
# Print explanations
|
||||
cprint("Explanations:", "blue")
|
||||
for explanation in explanations:
|
||||
cprint(f"- {explanation}", "blue")
|
||||
|
||||
# Display changes diff
|
||||
print("\nChanges to be made:")
|
||||
diff = difflib.unified_diff(original_file_lines, file_lines, lineterm="")
|
||||
for line in diff:
|
||||
if line.startswith("+"):
|
||||
|
@ -158,8 +164,8 @@ def apply_changes(file_path, changes: list, confirm=False):
|
|||
else:
|
||||
print(line, end="")
|
||||
|
||||
# Checking if user used confirm flag
|
||||
if confirm:
|
||||
# check if user wants to apply changes or exit
|
||||
confirmation = input("Do you want to apply these changes? (y/n): ")
|
||||
if confirmation.lower() != "y":
|
||||
print("Changes not applied")
|
||||
|
@ -167,27 +173,20 @@ def apply_changes(file_path, changes: list, confirm=False):
|
|||
|
||||
with open(file_path, "w") as f:
|
||||
f.writelines(file_lines)
|
||||
|
||||
# Print explanations
|
||||
cprint("Explanations:", "blue")
|
||||
for explanation in explanations:
|
||||
cprint(f"- {explanation}", "blue")
|
||||
|
||||
# Show the diff
|
||||
print("\nChanges:")
|
||||
diff = difflib.unified_diff(
|
||||
original_file_lines, file_lines, lineterm="")
|
||||
for line in diff:
|
||||
if line.startswith("+"):
|
||||
cprint(line, "green", end="")
|
||||
elif line.startswith("-"):
|
||||
cprint(line, "red", end="")
|
||||
else:
|
||||
print(line, end="")
|
||||
|
||||
print("Changes applied.")
|
||||
|
||||
|
||||
def check_model_availability(model):
|
||||
available_models = [x['id'] for x in openai.Model.list()["data"]]
|
||||
if model not in available_models:
|
||||
print(
|
||||
f"Model {model} is not available. Perhaps try running with "
|
||||
"`--model=gpt-3.5-turbo` instead? You can also configure a "
|
||||
"default model in the .env"
|
||||
)
|
||||
exit()
|
||||
|
||||
|
||||
def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=False):
|
||||
if revert:
|
||||
backup_file = script_name + ".bak"
|
||||
|
@ -199,6 +198,9 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
|
|||
print(f"No backup file found for {script_name}")
|
||||
sys.exit(1)
|
||||
|
||||
# check if model is available
|
||||
check_model_availability(model)
|
||||
|
||||
# Make a backup of the original script
|
||||
shutil.copy(script_name, script_name + ".bak")
|
||||
|
||||
|
@ -209,10 +211,10 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
|
|||
cprint("Script ran successfully.", "blue")
|
||||
print("Output:", output)
|
||||
break
|
||||
|
||||
else:
|
||||
cprint("Script crashed. Trying to fix...", "blue")
|
||||
print("Output:", output)
|
||||
|
||||
json_response = send_error_to_gpt(
|
||||
file_path=script_name,
|
||||
args=script_args,
|
||||
|
@ -226,3 +228,4 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
|
|||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
||||
|
||||
|
|
Ładowanie…
Reference in New Issue