kopia lustrzana https://github.com/hc-psy/blender-gpt
commit
4d5c72db8c
16
README.md
16
README.md
|
@ -1,3 +1,15 @@
|
|||
# blender-gpt-zh
|
||||
# Blender GPT Add-on
|
||||
|
||||
Blender add-on powered with open-ai chat gpt 中文版
|
||||
Introducing our newly developed Blender Add-on that harnesses the potential of OpenAI's latest GPT-4 (and GPT-3.5) to transform simple prompts into remarkable 3D models and animations. No more hassle of manually modeling complex 3D elements, let AI do the work!
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Multi-Lingual Support:** Our user-friendly interface is not confined to a single language. Choose from English, Simplified Chinese, or Traditional Chinese for a comfortable and personalized user experience. Also, you can speak to your software in the language you're comfortable with!
|
||||
- **Choose Your Model:** Take control of the AI you're working with. Our Add-on allows you to toggle between Chat GPT-3 and Chat GPT-4 models based on your preference or task complexity, Also, your accessibility and budget 💰.
|
||||
- **Adjustable Creativity:** Enjoy flexibility in design with adjustable "creativity" settings. This allows you to control the randomness and creativity of the AI, offering results ranging from highly deterministic (0) to wildly creative (1). Having said that, 0 is recommended.
|
||||
- **UI Features:** Our user-friendly interface lets you maintain chat history, delete them at will, view produced codes, and much more. Experience the verisimilar simplicity and convenience of OpenAI chat GPT within the comfort of your Blender workspace.
|
||||
- **Speed and Robustness:** We take pride in our add-on's performance. Experience faster inference times compared to similar products, and witness more robust results, ensuring your creativity is never hindered.
|
||||
|
||||
## Get Started
|
||||
|
||||
Install our Add-on and dive into the limitless realm of AI-powered 3D modeling.
|
||||
|
|
|
@ -93,26 +93,30 @@ def chatgpt(context):
|
|||
model=models[lan],
|
||||
messages=messages,
|
||||
temperature=temperatures[lan],
|
||||
stream=True,
|
||||
# stream=True,
|
||||
max_tokens=2000,
|
||||
)
|
||||
|
||||
try:
|
||||
events = []
|
||||
final_txt = ''
|
||||
# events = []
|
||||
# final_txt = ''
|
||||
|
||||
# becuase stream = true so use delta to concatentate
|
||||
for e in response:
|
||||
if len(e['choices'][0]['delta']) == 0:
|
||||
continue
|
||||
# # becuase stream = true so use delta to concatentate
|
||||
# for e in response:
|
||||
# if len(e['choices'][0]['delta']) == 0:
|
||||
# continue
|
||||
|
||||
if 'role' in e['choices'][0]['delta']:
|
||||
continue
|
||||
# if 'role' in e['choices'][0]['delta']:
|
||||
# continue
|
||||
|
||||
events.append(e)
|
||||
event_text = e['choices'][0]['delta']['content']
|
||||
final_txt += event_text
|
||||
print(final_txt, flush=True, end='\r')
|
||||
# events.append(e)
|
||||
# event_text = e['choices'][0]['delta']['content']
|
||||
# final_txt += event_text
|
||||
# print(final_txt, flush=True, end='\r')
|
||||
|
||||
# return post_process(final_txt)
|
||||
|
||||
final_txt = response['choices'][0]['message']['content']
|
||||
|
||||
return post_process(final_txt)
|
||||
|
|
@ -95,7 +95,7 @@ class BLENDERGPT_OT_SEND_MSG(Operator):
|
|||
|
||||
# TODO: connect to GPT
|
||||
prf = context.preferences
|
||||
openai.api_key = prf.addons["blendergpt-zh"].preferences.openai_key
|
||||
openai.api_key = prf.addons["blender-gpt"].preferences.openai_key
|
||||
|
||||
if not openai.api_key:
|
||||
if int(context.scene.lan) == 0:
|
||||
|
@ -112,40 +112,33 @@ class BLENDERGPT_OT_SEND_MSG(Operator):
|
|||
scene.on_finish = True
|
||||
# bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
|
||||
|
||||
lan = int(context.scene.lan)
|
||||
prompts = [scene.prompt_input_0,
|
||||
scene.prompt_input_1, scene.prompt_input_2]
|
||||
|
||||
if len(scene.history) == 0 or scene.history[-1].type == 'GPT':
|
||||
if int(context.scene.lan) == 0:
|
||||
if scene.prompt_input_0 == "":
|
||||
if prompts[lan] == "":
|
||||
if lan == 0:
|
||||
self.report({'ERROR'}, f"錯誤: 請輸入指令")
|
||||
scene.on_finish = False
|
||||
return {'CANCELLED'}
|
||||
|
||||
msg = scene.history.add()
|
||||
msg.type = 'USER'
|
||||
msg.content = scene.prompt_input_0
|
||||
elif int(context.scene.lan) == 1:
|
||||
if scene.prompt_input_1 == "":
|
||||
elif lan == 1:
|
||||
self.report({'ERROR'}, f"错误: 请输入指令")
|
||||
scene.on_finish = False
|
||||
return {'CANCELLED'}
|
||||
|
||||
msg = scene.history.add()
|
||||
msg.type = 'USER'
|
||||
msg.content = scene.prompt_input_1
|
||||
else:
|
||||
if scene.prompt_input_2 == "":
|
||||
else:
|
||||
self.report({'ERROR'}, f"Error: Please enter the prompt")
|
||||
scene.on_finish = False
|
||||
return {'CANCELLED'}
|
||||
scene.on_finish = False
|
||||
return {'CANCELLED'}
|
||||
|
||||
msg = scene.history.add()
|
||||
msg.type = 'USER'
|
||||
msg.content = scene.prompt_input_2
|
||||
try:
|
||||
code_exe_blender = chatgpt(context)
|
||||
except Exception as e:
|
||||
self.report({'ERROR'}, f"Error: {e}")
|
||||
scene.on_finish = False
|
||||
return {'CANCELLED'}
|
||||
|
||||
code_exe_blender = chatgpt(context)
|
||||
|
||||
scene.prompt_input_0 = ""
|
||||
scene.prompt_input_1 = ""
|
||||
scene.prompt_input_2 = ""
|
||||
if len(scene.history) == 0 or scene.history[-1].type == 'GPT':
|
||||
msg = scene.history.add()
|
||||
msg.type = 'USER'
|
||||
msg.content = prompts[lan]
|
||||
prompts[lan] = ""
|
||||
|
||||
if code_exe_blender:
|
||||
msg = scene.history.add()
|
|
@ -3,7 +3,7 @@ from bpy.types import AddonPreferences
|
|||
|
||||
|
||||
class BLENDERGPT_AddonPreferences(AddonPreferences):
|
||||
bl_idname = "blendergpt-zh"
|
||||
bl_idname = "blender-gpt"
|
||||
|
||||
openai_key: props.StringProperty(
|
||||
name="OPENAI API Key",
|
Ładowanie…
Reference in New Issue