-
Notifications
You must be signed in to change notification settings - Fork 63
/
.env.template
25 lines (23 loc) · 1.37 KB
/
.env.template
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
OPENAI_API_KEY=<your-api-key>
# Path to your code, defaults to itself.
# Even if you are on windows this is POSIX style path.
# For example C:\Users\user\code\project
# would be C:/Users/user/code/project
CODE_DIR=./
# What files do you want to process?
FILE_EXTENSIONS_TO_PROCESS=.js,.tsx,.ts,.jsx
IGNORE_LIST=node_modules,coverage,public,__tests__
# Currently all the models support either 'gpt-3.5-turbo' or 'gpt-4' (if you have access to it)
CODER_MODEL=gpt-3.5-turbo
CODE_READER_MODEL=gpt-3.5-turbo
GET_FILES_MODEL=gpt-3.5-turbo
INDEXER_MODEL=gpt-3.5-turbo
REVIEWER_MODEL=gpt-3.5-turbo
TASK_COMPLEXITY_MODEL=gpt-3.5-turbo
OPENAI_MAX_TOKEN_REPLY=null # Max tokens to return from OpenAI
MODEL_TEMPERATURE=0.01 # range 0-1, 0 being the most conservative, 1 being the most creative
MODEL_PRESENCE_PENALTY=0 # range -2 - 2 Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
MODEL_FREQUENCY_PENALTY=0 # range -2 - 2 Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
MODEL_USER=autopilot # Identify this usage
MAX_TOKEN_COUNT_SINGLE_FILE=1800; # Files above this token size would not be processed
MAX_TOKEN_COUNT_SUMMARIES_CHUNK=2000; # Summaries would be chunked to this max size and looped over