kopia lustrzana https://github.com/biobootloader/wolverine
Porównaj commity
22 Commity
20c3051d5c
...
81143a3852
Autor | SHA1 | Data |
---|---|---|
biobootloader | 81143a3852 | |
BioBootloader | b5bdfe6db9 | |
BioBootloader | 93db7757cc | |
biobootloader | 756ddbdd61 | |
BioBootloader | 660c55a01f | |
biobootloader | 46171ad713 | |
BioBootloader | 761bbdb331 | |
BioBootloader | 42a9986aad | |
BioBootloader | d71fe5daef | |
BioBootloader | ef083507d6 | |
nervousapps | 5fafe7c1fb | |
nervousapps | 3cb64c623a | |
nervousapps | afe0db33ce | |
nervousapps | bcd1db9f7b | |
nervousapps | adbe5f31de | |
nervousapps | 489d593156 | |
nervousapps | 0730ad8c9b | |
nervousapps | 7447a151c0 | |
nervousapps | 5eba2d9e1b | |
nervousapps | 5c91c4ccba | |
nervousapps | 3f333a4df7 | |
nervousapps | 9f75fc9123 |
2
.flake8
2
.flake8
|
@ -1,2 +0,0 @@
|
|||
[flake8]
|
||||
max-line-length = 120
|
|
@ -0,0 +1,30 @@
|
|||
name: Python package
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Lint with ruff
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
ruff --format=github --select=E9,F63,F7,F82 --target-version=py37 --exclude examples/ .
|
||||
# default set of ruff rules with GitHub Annotations
|
||||
ruff --format=github --target-version=py37 --exclude examples/ .
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
|
@ -37,6 +37,14 @@ You can also use flag `--confirm=True` which will ask you `yes or no` before mak
|
|||
|
||||
python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True
|
||||
|
||||
## Environment variables
|
||||
|
||||
| env name | description | default value |
|
||||
| ------------------- | ----------------------------------------------------------------- | ------------- |
|
||||
| OPENAI_API_KEY | OpenAI API key | None |
|
||||
| DEFAULT_MODEL | GPT model to use | "gpt-4" |
|
||||
| VALIDATE_JSON_RETRY | Number of retries when requesting OpenAI API (-1 means unlimites) | -1 |
|
||||
|
||||
## Future Plans
|
||||
|
||||
This is just a quick prototype I threw together in a few hours. There are many possible extensions and contributions are welcome:
|
||||
|
|
|
@ -14,3 +14,5 @@ example response:
|
|||
{"operation": "Replace", "line": 18, "content": " x += 1"},
|
||||
{"operation": "Delete", "line": 20, "content": ""}
|
||||
]
|
||||
|
||||
From now, your response must be only the json object, no talking, no comments.
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
-r requirements.txt
|
||||
pytest==7.3.1
|
|
@ -1,22 +1,26 @@
|
|||
aiohttp==3.8.4
|
||||
aiosignal==1.3.1
|
||||
async-timeout==4.0.2
|
||||
attrs==22.2.0
|
||||
attrs==23.1.0
|
||||
certifi==2022.12.7
|
||||
charset-normalizer==3.1.0
|
||||
exceptiongroup==1.1.1
|
||||
fire==0.5.0
|
||||
flake8==6.0.0
|
||||
frozenlist==1.3.3
|
||||
idna==3.4
|
||||
mccabe==0.7.0
|
||||
iniconfig==2.0.0
|
||||
multidict==6.0.4
|
||||
openai==0.27.2
|
||||
pycodestyle==2.10.0
|
||||
pyflakes==3.0.1
|
||||
openai==0.27.4
|
||||
packaging==23.1
|
||||
pluggy==1.0.0
|
||||
pytest==7.3.1
|
||||
pytest-mock==3.10.0
|
||||
python-dotenv==1.0.0
|
||||
requests==2.28.2
|
||||
requests==2.29.0
|
||||
ruff==0.0.263
|
||||
six==1.16.0
|
||||
termcolor==2.2.0
|
||||
termcolor==2.3.0
|
||||
tomli==2.0.1
|
||||
tqdm==4.65.0
|
||||
urllib3==1.26.15
|
||||
yarl==1.8.2
|
||||
yarl==1.9.2
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
"""
|
||||
Conftest
|
||||
"""
|
||||
import os
|
||||
import pytest
|
||||
import tempfile
|
||||
|
||||
|
||||
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def temp_file():
|
||||
# Create a temporary file
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
|
||||
f.write("first line\nsecond line\nthird line")
|
||||
file_path = f.name
|
||||
yield file_path
|
||||
# Clean up the temporary file
|
||||
os.remove(file_path)
|
||||
|
||||
|
||||
def mock_open_ai_response_object(mocker, content: str):
|
||||
"""
|
||||
Mocks the response object from the openai api.
|
||||
"""
|
||||
mock_generator_object = mocker.MagicMock()
|
||||
mock_message_object = mocker.MagicMock()
|
||||
mock_message_object.configure_mock(**{"message.content": content})
|
||||
mock_generator_object.configure_mock(**{"choices": [mock_message_object]})
|
||||
return mock_generator_object
|
|
@ -0,0 +1,8 @@
|
|||
Explanation: The function `subtract_numbers` is never defined in the script, causing a `NameError` when it is called in the `calculate` function.
|
||||
|
||||
[
|
||||
{"explanation": "The 'subtract_numbers' function is never defined in the script."},
|
||||
{"operation": "InsertAfter", "line": 12, "content": "\n# Define subtract_numbers function\ndef subtract_numbers(a, b):\n return a - b\n"},
|
||||
{"operation": "Replace", "line": 18, "content": " if operation == \"add\":\n result = add_numbers(num1, num2)\n elif operation == \"subtract\":\n result = subtract_numbers(num1, num2)\n elif operation == \"multiply\":\n result = multiply_numbers(num1, num2)\n elif operation == \"divide\":\n result = divide_numbers(num1, num2)\n else:\n print(\"Invalid operation\")\n"},
|
||||
{"operation": "Replace", "line": 30, "content": " return result\n"}
|
||||
]
|
|
@ -0,0 +1,7 @@
|
|||
Explanation: The function `subtract_numbers` is never defined in the script, causing a `NameError` when it is called in the `calculate` function.
|
||||
|
||||
[
|
||||
{"explanation": "The 'subtract_numbers' function is never defined in the script."},
|
||||
{"operation": "InsertAfter", "line": 12, "content": "\n# Define subtract_numbers function\ndef subtract_numbers(a, b):\n return a - b\n"},
|
||||
{"operation": "Replace", "line": 18, "content": " if operation == \"add\":\n result = add_numbers(num1, num2)\n elif operation == \"subtract\":\n result = subtract_numbers(num1, num2)\n elif operation == \"multiply\":\n result = multiply_numbers(num1, num2)\n elif operation == \"divide\":\n result = divide_numbers(num1, num2)\n else:\n print(\"Invalid operation\")\n"},
|
||||
{"operation": "Replace", "line": 30, "content": " return result\n"}
|
|
@ -1,19 +1,11 @@
|
|||
import os
|
||||
import json
|
||||
import pytest
|
||||
import tempfile
|
||||
from wolverine import apply_changes, json_validated_response
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def temp_file():
|
||||
# Create a temporary file
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
|
||||
f.write("first line\nsecond line\nthird line")
|
||||
file_path = f.name
|
||||
yield file_path
|
||||
# Clean up the temporary file
|
||||
os.remove(file_path)
|
||||
from .conftest import (
|
||||
mock_open_ai_response_object,
|
||||
TEST_FILES_DIR
|
||||
)
|
||||
|
||||
|
||||
def test_apply_changes_replace(temp_file):
|
||||
|
@ -54,3 +46,40 @@ def test_apply_changes_insert(temp_file):
|
|||
content = f.read()
|
||||
assert content == 'first line\nsecond line\ninserted line\nthird line'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("chat_completion_response, nb_retry, fail", [
|
||||
(os.path.join(TEST_FILES_DIR, "cc_resp.txt"), 3, False),
|
||||
(os.path.join(TEST_FILES_DIR, "cc_resp_fail.txt"), 3, True),
|
||||
(os.path.join(TEST_FILES_DIR, "cc_resp_fail.txt"), 10, True),
|
||||
])
|
||||
def test_json_validated_response(mocker, chat_completion_response, nb_retry, fail):
|
||||
# Open the test file
|
||||
with open(chat_completion_response, 'r') as file:
|
||||
response = file.read()
|
||||
# Mock the openAi chat completion API call
|
||||
mocker.patch(
|
||||
"openai.ChatCompletion.create",
|
||||
return_value=mock_open_ai_response_object(mocker=mocker, content=response))
|
||||
# ChatCompletion returned an invalid response
|
||||
if fail:
|
||||
with pytest.raises(Exception) as err:
|
||||
json_response = json_validated_response("gpt-4", [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "prompt"
|
||||
}
|
||||
],
|
||||
nb_retry=nb_retry
|
||||
)
|
||||
# Check that the exception is raised after nb_retry time
|
||||
assert err.value == "No valid json response found after 3 tries. Exiting."
|
||||
else:
|
||||
json_response = json_validated_response("gpt-4", [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "prompt"
|
||||
}
|
||||
],
|
||||
nb_retry=nb_retry
|
||||
)
|
||||
assert json_response
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .wolverine import apply_changes, json_validated_response
|
||||
from .wolverine import apply_changes, json_validated_response # noqa
|
||||
|
|
|
@ -6,21 +6,27 @@ import subprocess
|
|||
import sys
|
||||
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from typing import List, Dict
|
||||
from termcolor import cprint
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Set up the OpenAI API
|
||||
load_dotenv()
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Default model is GPT-4
|
||||
DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4")
|
||||
|
||||
# Nb retries for json_validated_response, default to -1, infinite
|
||||
VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1))
|
||||
|
||||
with open("prompt.txt", encoding="utf-8") as file:
|
||||
SYSTEM_PROMPT = file.read()
|
||||
# Read the system prompt
|
||||
with open(os.path.join(os.path.dirname(__file__), "..", "prompt.txt"), "r") as f:
|
||||
SYSTEM_PROMPT = f.read()
|
||||
|
||||
|
||||
def run_script(script_name, script_args):
|
||||
def run_script(script_name: str, script_args: List) -> str:
|
||||
"""
|
||||
If script_name.endswith(".py") then run with python
|
||||
else run with node
|
||||
|
@ -33,62 +39,71 @@ def run_script(script_name, script_args):
|
|||
)
|
||||
|
||||
try:
|
||||
result = subprocess.check_output(
|
||||
subprocess_args,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
result = subprocess.check_output(subprocess_args, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as error:
|
||||
return error.output.decode("utf-8"), error.returncode
|
||||
return result.decode("utf-8"), 0
|
||||
|
||||
|
||||
def json_validated_response(model, messages):
|
||||
def json_validated_response(
|
||||
model: str, messages: List[Dict], nb_retry: int = VALIDATE_JSON_RETRY
|
||||
) -> Dict:
|
||||
"""
|
||||
This function is needed because the API can return a non-json response.
|
||||
This will run recursively until a valid json response is returned.
|
||||
todo: might want to stop after a certain number of retries
|
||||
This will run recursively VALIDATE_JSON_RETRY times.
|
||||
If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json
|
||||
response is returned.
|
||||
"""
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=0.5,
|
||||
)
|
||||
messages.append(response.choices[0].message)
|
||||
content = response.choices[0].message.content
|
||||
# see if json can be parsed
|
||||
try:
|
||||
json_start_index = content.index(
|
||||
"["
|
||||
) # find the starting position of the JSON data
|
||||
json_data = content[
|
||||
json_start_index:
|
||||
] # extract the JSON data from the response string
|
||||
json_response = json.loads(json_data)
|
||||
except (json.decoder.JSONDecodeError, ValueError) as error:
|
||||
cprint(f"{error}. Re-running the query.", "red")
|
||||
# debug
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
# append a user message that says the json is invalid
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
"Your response could not be parsed by json.loads. "
|
||||
"Please restate your last message as pure JSON."
|
||||
),
|
||||
}
|
||||
json_response = {}
|
||||
if nb_retry != 0:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=0.5,
|
||||
)
|
||||
# rerun the api call
|
||||
return json_validated_response(model, messages)
|
||||
except Exception as error:
|
||||
cprint(f"Unknown error: {error}", "red")
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
raise error
|
||||
return json_response
|
||||
messages.append(response.choices[0].message)
|
||||
content = response.choices[0].message.content
|
||||
# see if json can be parsed
|
||||
try:
|
||||
json_start_index = content.index(
|
||||
"["
|
||||
) # find the starting position of the JSON data
|
||||
json_data = content[
|
||||
json_start_index:
|
||||
] # extract the JSON data from the response string
|
||||
json_response = json.loads(json_data)
|
||||
return json_response
|
||||
except (json.decoder.JSONDecodeError, ValueError) as e:
|
||||
cprint(f"{e}. Re-running the query.", "red")
|
||||
# debug
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
# append a user message that says the json is invalid
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
"Your response could not be parsed by json.loads. "
|
||||
"Please restate your last message as pure JSON."
|
||||
),
|
||||
}
|
||||
)
|
||||
# dec nb_retry
|
||||
nb_retry -= 1
|
||||
# rerun the api call
|
||||
return json_validated_response(model, messages, nb_retry)
|
||||
except Exception as e:
|
||||
cprint(f"Unknown error: {e}", "red")
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
raise e
|
||||
raise Exception(
|
||||
f"No valid json response found after {VALIDATE_JSON_RETRY} tries. Exiting."
|
||||
)
|
||||
|
||||
|
||||
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
||||
with open(file_path) as f:
|
||||
def send_error_to_gpt(
|
||||
file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL
|
||||
) -> Dict:
|
||||
with open(file_path, "r") as f:
|
||||
file_lines = f.readlines()
|
||||
|
||||
file_with_lines = []
|
||||
|
@ -122,7 +137,7 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
|||
return json_validated_response(model, messages)
|
||||
|
||||
|
||||
def apply_changes(file_path, changes: list, confirm=False):
|
||||
def apply_changes(file_path: str, changes: List, confirm: bool = False):
|
||||
"""
|
||||
Pass changes as loaded json (list of dicts)
|
||||
"""
|
||||
|
@ -180,7 +195,7 @@ def apply_changes(file_path, changes: list, confirm=False):
|
|||
|
||||
|
||||
def check_model_availability(model):
|
||||
available_models = [x['id'] for x in openai.Model.list()["data"]]
|
||||
available_models = [x["id"] for x in openai.Model.list()["data"]]
|
||||
if model not in available_models:
|
||||
print(
|
||||
f"Model {model} is not available. Perhaps try running with "
|
||||
|
|
Ładowanie…
Reference in New Issue