diff --git a/mindmate/commands/chat.py b/mindmate/commands/chat.py index c9aa0f3..35a270d 100644 --- a/mindmate/commands/chat.py +++ b/mindmate/commands/chat.py @@ -24,7 +24,7 @@ def model_option_callback(ctx, param, value): @click.option('-m', '--model', required=True, default='text-davinci-003', show_default=True, type=str, callback=model_option_callback, help='select targeted model to utilize') @click.option('-p', '--prompt', required=True, show_default=False, type=str, help='Your prompt to AI') @click.option('-s', '--stream', required=False, default=True, show_default=True, type=bool, help='stream AI response on your terminal') -@click.option('--max-tokens', required=False, default=100, show_default=True, type=int, help='stream AI response on your terminal') +@click.option('--max-tokens', required=False, default=3000, show_default=True, type=int, help='stream AI response on your terminal') def chat(platform, model, prompt, stream, max_tokens): """offers text-based response to your prompt""" click.echo(help.generic_message()) diff --git a/mindmate/services/openai.py b/mindmate/services/openai.py index 751cac2..0b90aa6 100644 --- a/mindmate/services/openai.py +++ b/mindmate/services/openai.py @@ -80,7 +80,7 @@ def ask_ai_with_stream(self, prompt: Prompt, model: str, max_tokens=5000, n=1) - try: completion = openai.Completion.create( model=model, - temperature=0, + temperature=0.2, stream=True, max_tokens=max_tokens, user=self.openai_id,