Skip to content

Commit cccf633

Browse files
committed
support workflow command
1 parent c3732d4 commit cccf633

6 files changed

Lines changed: 458 additions & 14 deletions

File tree

devchat/_cli/prompt.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import json
2+
import sys
23
from typing import List, Optional
34
import rich_click as click
5+
from devchat.engine import run_command
46
from devchat.assistant import Assistant
57
from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig
68
from devchat.store import Store
@@ -24,10 +26,15 @@
2426
help='Path to a JSON file with functions for the prompt.')
2527
@click.option('-n', '--function-name',
2628
help='Specify the function name when the content is the output of a function.')
29+
@click.option('-s', '--store', is_flag=True, default=False,
30+
help='Save the conversation to the store.')
31+
@click.option('-a', '--auto', is_flag=True, default=True,
32+
help='Answer question by function-calling.')
2733
def prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
2834
instruct: Optional[List[str]], context: Optional[List[str]],
2935
model: Optional[str], config_str: Optional[str] = None,
30-
functions: Optional[str] = None, function_name: Optional[str] = None):
36+
functions: Optional[str] = None, function_name: Optional[str] = None,
37+
store: Optional[bool] = False, auto: Optional[bool] = True):
3138
"""
3239
This command performs interactions with the specified large language model (LLM)
3340
by sending prompts and receiving responses.
@@ -82,9 +89,9 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
8289
openai_config = OpenAIChatConfig(model=model, **parameters_data)
8390

8491
chat = OpenAIChat(openai_config)
85-
store = Store(repo_chat_dir, chat)
92+
chat_store = Store(repo_chat_dir, chat)
8693

87-
assistant = Assistant(chat, store, config.max_input_tokens)
94+
assistant = Assistant(chat, chat_store, config.max_input_tokens, store)
8895

8996
functions_data = None
9097
if functions is not None:
@@ -94,5 +101,16 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
94101
parent=parent, references=reference,
95102
function_name=function_name)
96103

104+
click.echo(assistant.prompt.formatted_header())
105+
command_result = run_command(
106+
model,
107+
assistant.prompt.messages,
108+
content,
109+
parent,
110+
context_contents,
111+
auto)
112+
if command_result is not None:
113+
sys.exit(command_result[0])
114+
97115
for response in assistant.iterate_response():
98116
click.echo(response, nl=False)

devchat/assistant.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import openai
55
from devchat.message import Message
66
from devchat.chat import Chat
7+
from devchat.openai.openai_prompt import OpenAIPrompt
78
from devchat.store import Store
89
from devchat.utils import get_logger
910

@@ -12,7 +13,7 @@
1213

1314

1415
class Assistant:
15-
def __init__(self, chat: Chat, store: Store, max_prompt_tokens: int):
16+
def __init__(self, chat: Chat, store: Store, max_prompt_tokens: int, need_store: bool):
1617
"""
1718
Initializes an Assistant object.
1819
@@ -23,6 +24,11 @@ def __init__(self, chat: Chat, store: Store, max_prompt_tokens: int):
2324
self._store = store
2425
self._prompt = None
2526
self.token_limit = max_prompt_tokens
27+
self._need_store = need_store
28+
29+
@property
30+
def prompt(self) -> OpenAIPrompt:
31+
return self._prompt
2632

2733
@property
2834
def available_tokens(self) -> int:
@@ -92,7 +98,6 @@ def iterate_response(self) -> Iterator[str]:
9298
Iterator[str]: An iterator over response strings from the chat API.
9399
"""
94100
if self._chat.config.stream:
95-
first_chunk = True
96101
created_time = int(time.time())
97102
config_params = self._chat.config.dict(exclude_unset=True)
98103
for chunk in self._chat.stream_response(self._prompt):
@@ -114,21 +119,20 @@ def iterate_response(self) -> Iterator[str]:
114119
chunk['choices'][0]['delta']['role']='assistant'
115120

116121
delta = self._prompt.append_response(json.dumps(chunk))
117-
if first_chunk:
118-
first_chunk = False
119-
yield self._prompt.formatted_header()
120122
yield delta
121123
if not self._prompt.responses:
122124
raise RuntimeError("No responses returned from the chat API")
123-
self._store.store_prompt(self._prompt)
124-
yield self._prompt.formatted_footer(0) + '\n'
125+
if self._need_store:
126+
self._store.store_prompt(self._prompt)
127+
yield self._prompt.formatted_footer(0) + '\n'
125128
for index in range(1, len(self._prompt.responses)):
126129
yield self._prompt.formatted_full_response(index) + '\n'
127130
else:
128131
response_str = self._chat.complete_response(self._prompt)
129132
self._prompt.set_response(response_str)
130133
if not self._prompt.responses:
131134
raise RuntimeError("No responses returned from the chat API")
132-
self._store.store_prompt(self._prompt)
135+
if self._need_store:
136+
self._store.store_prompt(self._prompt)
133137
for index in range(len(self._prompt.responses)):
134138
yield self._prompt.formatted_full_response(index) + '\n'

devchat/engine/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
11
from .command_parser import parse_command, Command, CommandParser
22
from .namespace import Namespace
33
from .recursive_prompter import RecursivePrompter
4+
from .router import run_command
5+
from .command_runner import CommandRunner
46

57
__all__ = [
68
'parse_command',
79
'Command',
810
'CommandParser',
911
'Namespace',
10-
'RecursivePrompter'
12+
'RecursivePrompter',
13+
'run_command',
14+
'CommandRunner'
1115
]

devchat/engine/command_runner.py

Lines changed: 181 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,181 @@
1+
"""
2+
Run Command with a input text.
3+
"""
4+
import os
5+
import sys
6+
import json
7+
import subprocess
8+
from typing import List
9+
import shlex
10+
11+
import openai
12+
13+
from devchat.utils import get_logger
14+
from . import Command
15+
16+
17+
logger = get_logger(__name__)
18+
19+
20+
# Equivalent of CommandRun in Python\which executes subprocesses
21+
class CommandRunner:
22+
def __init__(self, model_name: str):
23+
self.process = None
24+
self._model_name = model_name
25+
26+
def _call_function_by_llm(self,
27+
command_name: str,
28+
command: Command,
29+
history_messages: List[dict]):
30+
"""
31+
command needs multi parameters, so we need parse each
32+
parameter by LLM from input_text
33+
"""
34+
properties = {}
35+
required = []
36+
for key, value in command.parameters.items():
37+
properties[key] = {}
38+
for key1, value1 in value.dict().items():
39+
if key1 not in ['type', 'description', 'enum'] or value1 is None:
40+
continue
41+
properties[key][key1] = value1
42+
required.append(key)
43+
44+
tools = [
45+
{
46+
"type": "function",
47+
"function": {
48+
"name": command_name,
49+
"description": command.description,
50+
"parameters": {
51+
"type": "object",
52+
"properties": properties,
53+
"required": required,
54+
},
55+
}
56+
}
57+
]
58+
59+
client = openai.OpenAI(
60+
api_key=os.environ.get("OPENAI_API_KEY", None),
61+
base_url=os.environ.get("OPENAI_API_BASE", None)
62+
)
63+
64+
connection_error = ''
65+
for _1 in range(3):
66+
try:
67+
response = client.chat.completions.create(
68+
messages=history_messages,
69+
model="gpt-3.5-turbo-16k",
70+
stream=False,
71+
tools=tools,
72+
tool_choice={"type": "function", "function": {"name": command_name}}
73+
)
74+
75+
respose_message = response.dict()["choices"][0]["message"]
76+
if not respose_message['tool_calls']:
77+
return None
78+
tool_call = respose_message['tool_calls'][0]['function']
79+
if tool_call['name'] != command_name:
80+
return None
81+
parameters = json.loads(tool_call['arguments'])
82+
return parameters
83+
except (ConnectionError, openai.APIConnectionError) as err:
84+
connection_error = err
85+
continue
86+
except Exception as err:
87+
print("Exception:", err, file=sys.stderr, flush=True)
88+
logger.exception("Call command by LLM error: %s", err)
89+
return None
90+
print("Connect Error:", connection_error, file=sys.stderr, flush=True)
91+
return None
92+
93+
94+
def run_command(self,
95+
command_name: str,
96+
command: Command,
97+
history_messages: List[dict],
98+
input_text: str,
99+
parent_hash: str,
100+
context_contents: List[str]):
101+
"""
102+
if command has parameters, then generate command parameters from input by LLM
103+
if command.input is "required", and input is null, then return error
104+
"""
105+
if command.parameters and len(command.parameters) > 0:
106+
if not self._model_name.startswith("gpt-"):
107+
return None
108+
109+
arguments = self._call_function_by_llm(command_name, command, history_messages)
110+
if not arguments:
111+
print("No valid parameters generated by LLM", file=sys.stderr, flush=True)
112+
return (-1, "")
113+
return self.run_command_with_parameters(
114+
command,
115+
{
116+
"input": input_text,
117+
**arguments
118+
},
119+
parent_hash,
120+
context_contents)
121+
122+
return self.run_command_with_parameters(
123+
command,
124+
{
125+
"input": input_text
126+
},
127+
parent_hash,
128+
context_contents)
129+
130+
131+
def run_command_with_parameters(self,
132+
command: Command,
133+
parameters: dict[str, str],
134+
parent_hash: str,
135+
context_contents: List[str]):
136+
"""
137+
replace $xxx in command.steps[0].run with parameters[xxx]
138+
then run command.steps[0].run
139+
"""
140+
try:
141+
# add environment variables to parameters
142+
if parent_hash:
143+
os.environ['PARENT_HASH'] = parent_hash
144+
if context_contents:
145+
os.environ['CONTEXT_CONTENTS'] = json.dumps(context_contents)
146+
for env_var in os.environ:
147+
parameters[env_var] = os.environ[env_var]
148+
parameters["command_python"] = os.environ['command_python']
149+
150+
command_run = command.steps[0]["run"]
151+
# Replace parameters in command run
152+
for parameter in parameters:
153+
command_run = command_run.replace('$' + parameter, str(parameters[parameter]))
154+
155+
# Run command_run
156+
env = os.environ.copy()
157+
if 'PYTHONPATH' in env:
158+
del env['PYTHONPATH']
159+
# result = subprocess.run(command_run, shell=True, env=env)
160+
# return result
161+
process = subprocess.Popen(
162+
shlex.split(command_run),
163+
stdout=subprocess.PIPE,
164+
stderr=subprocess.STDOUT,
165+
text=True
166+
)
167+
168+
# 实时读取输出并打印
169+
stdout = ''
170+
while True:
171+
output = process.stdout.readline()
172+
if output == '' and process.poll() is not None:
173+
break
174+
if output:
175+
stdout += output
176+
print(output, end='\n')
177+
rc = process.poll()
178+
return (rc, stdout)
179+
except Exception as err:
180+
print("Exception:", type(err), err, file=sys.stderr, flush=True)
181+
return (-1, "")

0 commit comments

Comments
 (0)