Pilot
Pilot
example
# OPENAI or AZURE or OPENROUTER
ENDPOINT=OPENAI
# OPENAI_ENDPOINT=https://api.openai.com/v1/chat/completions
OPENAI_ENDPOINT=
OPENAI_API_KEY=
AZURE_API_KEY=
AZURE_ENDPOINT=
OPENROUTER_API_KEY=
# Folders which shouldn't be tracked in workspace (useful to ignore folders created by compiler)
# IGNORE_PATHS=folder1,folder2
# Database
# DATABASE_TYPE=postgres
DB_NAME=gpt-pilot
DB_HOST=
DB_PORT=
DB_USER=
DB_PASSWORD=
# USE_GPTPILOT_FOLDER=true
# Set extra buffer to wait on top of detected retry time when rate limmit is hit. defaults to 6
# RATE_LIMIT_EXTRA_BUFFER=
File: __init__.py
File: db_init.py
from dotenv import load_dotenv
load_dotenv()
from database.database import create_tables, drop_tables
drop_tables()
create_tables()
File: instructions.comment.py
# init CLI
# 1. show the type of the app that needs to be created
# 1.c ask user to press enter if it's ok, or to add the type of the app they want
# if it's not ok, check if the wanted app CAN be created
# if it can, print confirmation message and continue
# if it can't, print error message and exit
# 2. ask user for the main definition of the app
# start the processing queue
import sys
import traceback
try:
from dotenv import load_dotenv
except ImportError:
raise RuntimeError('Python environment for GPT Pilot is not completely set up: required package "python-dotenv" is missing.') from
load_dotenv()
def init():
# Check if the "euclid" database exists, if not, create it
if not database_exists():
create_database()
arguments = get_arguments()
return arguments
if __name__ == "__main__":
ask_feedback = True
project = None
run_exit_fn = True
args = init()
try:
# sys.argv.append('--ux-test=' + 'continue_development')
if '--api-key' in args:
os.environ["OPENAI_API_KEY"] = args['--api-key']
if '--api-endpoint' in args:
os.environ["OPENAI_ENDPOINT"] = args['--api-endpoint']
if '--get-created-apps-with-steps' in args:
run_exit_fn = False
File: pytest.ini
[pytest]
testpaths = .
pythonpath = .
python_files = test_*.py
markers =
slow: marks tests as slow (deselect with '-m "not slow"')
uses_tokens: Integration tests which use tokens
ux_test: Tests which are used to test the UX
daily: tests which should be run daily
File: test_main_e2e.py
import os
import builtins
import pytest
from unittest.mock import patch
from dotenv import load_dotenv
load_dotenv()
# Then
for field in ['app_id', 'user_id', 'email']:
assert args[field] is not None
@pytest.mark.slow
@pytest.mark.uses_tokens
@pytest.mark.skip(reason="Uses lots of tokens")
@pytest.mark.parametrize("endpoint, model", [
("OPENAI", "gpt-4"),
("OPENROUTER", "openai/gpt-3.5-turbo"),
("OPENROUTER", "meta-llama/codellama-34b-instruct"),
("OPENROUTER", "google/palm-2-chat-bison"),
("OPENROUTER", "google/palm-2-codechat-bison"),
# TODO: See https://github.com/1rgs/jsonformer-claude/blob/main/jsonformer_claude/main.py
# https://github.com/guidance-ai/guidance - token healing
("OPENROUTER", "anthropic/claude-2"),
])
def test_end_to_end(endpoint, model, monkeypatch):
# Given
monkeypatch.setenv('ENDPOINT', endpoint)
monkeypatch.setenv('MODEL_NAME', model)
create_tables()
args = init()
builtins.print, ipc_client_instance = get_custom_print(args)
project = Project(args)
File: code_execution.py
MAX_COMMAND_DEBUG_TRIES = 3
MAX_RECURSION_LAYER = 3
MIN_COMMAND_RUN_TIME = 2000 # 2sec
MAX_COMMAND_RUN_TIME = 60000 # 1min
MAX_COMMAND_OUTPUT_LENGTH = 50000
MAX_QUESTIONS_FOR_BUG_REPORT = 5
File: common.py
import os
DEFAULT_IGNORE_PATHS = [
'.git',
'.gpt-pilot',
'.idea',
'.vscode',
'.next',
'.DS_Store',
'__pycache__',
"site-packages",
'node_modules',
'package-lock.json',
'venv',
'dist',
'build',
'target',
"*.min.js",
"*.min.css",
"*.svg",
"*.csv",
"*.log",
"go.sum",
]
IGNORE_PATHS = DEFAULT_IGNORE_PATHS + [
folder for folder
in os.environ.get('IGNORE_PATHS', '').split(',')
if folder
]
IGNORE_SIZE_THRESHOLD = 50000 # 50K+ files are ignored by default
PROMPT_DATA_TO_IGNORE = {'directory_tree', 'name'}
File: convert_to_playground_convo.js
let messages = {{messages}}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
textarea_to_fill = last_user_document.querySelector('textarea');
textarea_to_fill.focus();
document.execCommand("insertText", false, other_messages[i].content);
await sleep(100);
}
}
fill_playground(messages)
File: function_calls.py
def process_user_stories(stories):
return stories
def process_user_tasks(tasks):
return tasks
def process_os_technologies(technologies):
return technologies
File: ipc.py
MESSAGE_TYPE = {
'verbose': 'verbose',
'stream': 'stream',
'user_input_request': 'user_input_request', # Displayed above the
'hint': 'hint', # Hint text, eg "Do you want to add anything else? If not, just press ENTER."
'info': 'info', # JSON data can be sent to progress `progress_stage`
'local': 'local',
'run_command': 'run_command', # Command to run server needed for extension only
'project_folder_name': 'project_folder_name', # Project folder name for extension only
'button': 'button', # Button text for extension only
'buttons-only': 'buttons-only', # Button text for extension only but input field is disabled
'exit': 'exit', # Exit message to let extension know we are done
'ipc': 'ipc', # Regular print message that is for extension only
'openFile': 'openFile', # Open a file in extension
'loadingFinished': 'loadingFinished', # Marks end of loading project
'loopTrigger': 'loopTrigger', # Trigger loop feedback popup in extension
'progress': 'progress', # Progress bar for extension only
'projectStats': 'projectStats', # Project stats for extension only
'keyExpired': 'keyExpired', # (Free trial) key expired message - for extension only
}
LOCAL_IGNORE_MESSAGE_TYPES = [
'info',
'project_folder_name',
'run_command',
'button',
'buttons-only',
'exit',
'ipc',
'openFile',
'loadingFinished',
'loopTrigger',
'progress',
'projectStats',
'keyExpired',
]
File: llm.py
import os
MAX_GPT_MODEL_TOKENS = int(os.getenv('MAX_TOKENS', 8192))
MIN_TOKENS_FOR_GPT_RESPONSE = 600
MAX_QUESTIONS = 5
END_RESPONSE = "EVERYTHING_CLEAR"
API_CONNECT_TIMEOUT = 30 # timeout for connecting to the API and sending the request (seconds)
API_READ_TIMEOUT = 300 # timeout for receiving the response (seconds)
File: messages.py
CHECK_AND_CONTINUE = 'Is everything working? Let me know if something needs to be changed for this task or type "continue
WHEN_USER_DONE = 'Once you have completed, enter "continue"'
AFFIRMATIVE_ANSWERS = ['', 'y', 'yes', 'ok', 'okay', 'sure', 'absolutely', 'indeed', 'correct', 'affirmative']
NEGATIVE_ANSWERS = ['n', 'no', 'skip', 'negative', 'not now', 'cancel', 'decline', 'stop']
STUCK_IN_LOOP = 'I\'m stuck in a loop'
NONE_OF_THESE = 'none of these'
MAX_PROJECT_NAME_LENGTH = 50
File: telemetry.py
LARGE_REQUEST_THRESHOLD = 50000 # tokens
SLOW_REQUEST_THRESHOLD = 300 # seconds
LOOP_THRESHOLD = 3 # number of iterations in task to be considered a loop
File: postgres.py
from peewee import PostgresqlDatabase
from database.config import DB_NAME, DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DATABASE_TYPE
if DATABASE_TYPE == "postgres":
import psycopg2
from psycopg2.extensions import quote_ident
def get_postgres_database():
return PostgresqlDatabase(DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT)
def create_postgres_database():
conn = psycopg2.connect(
dbname='postgres',
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
conn.autocommit = True
cursor = conn.cursor()
safe_db_name = quote_ident(DB_NAME, conn)
cursor.execute(f"CREATE DATABASE {safe_db_name}")
cursor.close()
conn.close()
File: sqlite.py
from peewee import SqliteDatabase
from database.config import DB_NAME
def get_sqlite_database():
return SqliteDatabase(DB_NAME)
File: __init__.py
File: app.py
from peewee import ForeignKeyField, CharField
class App(BaseModel):
user = ForeignKeyField(User, backref='apps')
app_type = CharField(null=True)
name = CharField(null=True)
status = CharField(null=True)
File: architecture.py
# from peewee import
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class Architecture(ProgressStep):
if DATABASE_TYPE == 'postgres':
architecture = BinaryJSONField()
else:
File: development.py
from database.models.components.progress_step import ProgressStep
class Development(ProgressStep):
class Meta:
table_name = 'development'
File: development_planning.py
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class DevelopmentPlanning(ProgressStep):
if DATABASE_TYPE == 'postgres':
development_plan = BinaryJSONField()
else:
development_plan = JSONField() # Custom JSON field for SQLite
class Meta:
table_name = 'development_planning'
File: development_steps.py
from peewee import ForeignKeyField, AutoField, TextField, IntegerField, CharField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class DevelopmentSteps(BaseModel):
id = AutoField() # This will serve as the primary key
app = ForeignKeyField(App, on_delete='CASCADE')
prompt_path = TextField(null=True)
llm_req_num = IntegerField(null=True)
token_limit_exception_raised = TextField(null=True)
if DATABASE_TYPE == 'postgres':
messages = BinaryJSONField(null=True)
llm_response = BinaryJSONField(null=False)
prompt_data = BinaryJSONField(null=True)
else:
messages = JSONField(null=True) # Custom JSON field for SQLite
llm_response = JSONField(null=False) # Custom JSON field for SQLite
prompt_data = JSONField(null=True)
class Meta:
table_name = 'development_steps'
indexes = (
(('app', 'previous_step', 'high_level_step'), True),
)
File: environment_setup.py
from database.models.components.progress_step import ProgressStep
File: file_snapshot.py
import logging
log = logging.getLogger(__name__)
class SmartBlobField(BlobField):
"""
A binary blob field that can also accept/return utf-8 strings.
This is a temporary workaround for the fact that we're passing either binary
or string contents to the database. Once this is cleaned up, we should only
accept binary content and explcitily convert from/to strings as needed.
"""
class FileSnapshot(BaseModel):
app = ForeignKeyField(App, on_delete='CASCADE')
development_step = ForeignKeyField(DevelopmentSteps, backref='files', on_delete='CASCADE')
file = ForeignKeyField(File, on_delete='CASCADE', null=True)
content = SmartBlobField()
class Meta:
table_name = 'file_snapshot'
indexes = (
(('development_step', 'file'), True),
)
File: files.py
from pathlib import Path
from os.path import commonprefix, join, sep
from peewee import AutoField, CharField, TextField, ForeignKeyField
class File(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
name = CharField()
path = CharField()
File: project_description.py
from peewee import TextField
from database.models.components.progress_step import ProgressStep
class ProjectDescription(ProgressStep):
prompt = TextField()
summary = TextField()
class Meta:
table_name = 'project_description'
File: user.py
from peewee import CharField
class User(BaseModel):
email = CharField(unique=True)
password = CharField()
File: user_apps.py
from peewee import AutoField, CharField, ForeignKeyField
class UserApps(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
user = ForeignKeyField(User, on_delete='CASCADE')
workspace = CharField(null=True)
class Meta:
table_name = 'user_apps'
indexes = (
(('app', 'user'), True),
)
File: user_inputs.py
from peewee import AutoField, ForeignKeyField, TextField, CharField
class UserInputs(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
query = TextField(null=True)
user_input = TextField(null=True)
hint = TextField(null=True)
previous_step = ForeignKeyField('self', null=True, column_name='previous_step')
high_level_step = CharField(null=True)
class Meta:
table_name = 'user_inputs'
indexes = (
File: user_tasks.py
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class UserTasks(ProgressStep):
if DATABASE_TYPE == 'postgres':
user_tasks = BinaryJSONField()
else:
user_tasks = JSONField() # Custom JSON field for SQLite
class Meta:
table_name = 'user_tasks'
File: __init__.py
File: base_models.py
from peewee import Model, UUIDField, DateTimeField
from datetime import datetime
from uuid import uuid4
class BaseModel(Model):
id = UUIDField(primary_key=True, default=uuid4)
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)
class Meta:
database = database
File: progress_step.py
from peewee import ForeignKeyField, CharField, BooleanField, DateTimeField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField
class ProgressStep(BaseModel):
app = ForeignKeyField(App, primary_key=True, on_delete='CASCADE')
step = CharField()
if DATABASE_TYPE == 'postgres':
app_data = BinaryJSONField()
data = BinaryJSONField(null=True)
messages = BinaryJSONField(null=True)
File: Agent.py
class Agent:
def __init__(self, role, project):
self.role = role
self.project = project
File: AgentConvo.py
import json
import re
import subprocess
import uuid
from os.path import sep
class AgentConvo:
"""
Represents a conversation with an agent.
Args:
agent: An instance of the agent participating in the conversation.
"""
Args:
prompt_path: The path to a prompt.
prompt_data: Data associated with the prompt.
function_calls: Optional function calls to be included in the message.
should_log_message: Flag if final response should be logged.
Returns:
The response from the agent.
"""
# craft message
File: Debugger.py
import platform
import uuid
import re
import traceback
class Debugger:
def __init__(self, agent):
self.agent = agent
self.recursion_layer = 0
Args:
convo (AgentConvo): The conversation object.
command (dict, optional): The command to debug. Default is None.
user_input (str, optional): User input for debugging. Default is None.
Should provide `command` or `user_input`.
issue_description (str, optional): Description of the issue to debug. Default is None.
ask_before_debug (bool, optional): True if we have to ask user for permission to start debugging.
task_steps (list, optional): The steps of the task to debug. Default is None.
step_index (int, optional): The index of the step to debug. Default is None.
Returns:
bool: True if debugging was successful, False otherwise.
"""
logger.info('Debugging %s', command)
self.recursion_layer += 1
self.agent.project.current_task.add_debugging_task(self.recursion_layer, command, user_input, issue_description)
if self.recursion_layer > MAX_RECURSION_LAYER:
self.recursion_layer = 0
# TooDeepRecursionError kills all debugging loops and goes back to the point where first debug was called
# it does not retry initial step but instead calls dev_help_needed()
raise TooDeepRecursionError()
function_uuid = str(uuid.uuid4())
convo.save_branch(function_uuid)
success = False
for i in range(MAX_COMMAND_DEBUG_TRIES):
if success:
break
if ask_before_debug or i > 0:
print('yes/no', type='button')
answer = ask_user(self.agent.project, 'Can I start debugging this issue [Y/n/error details]?', require_some_input=False)
if answer.lower() in NEGATIVE_ANSWERS:
self.recursion_layer -= 1
File: Project.py
import json
import os
from pathlib import Path
from typing import Tuple
import peewee
from playhouse.shortcuts import model_to_dict
class Project:
def __init__(
self,
args,
*,
ipc_client_instance=None,
):
"""
Initialize a project.
Args:
args (dict): Project arguments - app_id, (app_type, name), user_id, email, password, step
name (str, optional): Project name. Default is None.
description (str, optional): Project description. Default is None.
user_stories (list, optional): List of user stories. Default is None.
user_tasks (list, optional): List of user tasks. Default is None.
architecture (str, optional): Project architecture. Default is None.
development_plan (str, optional): Development plan. Default is None.
current_step (str, optional): Current step in the project. Default is None.
"""
self.args = args
self.llm_req_num = 0
self.command_runs_count = 0
File: __init__.py
File: cli.py
import psutil
import subprocess
import os
import signal
import threading
import queue
import time
import platform
from typing import Dict, Union
interrupted = False
Args:
command (str): The command to run.
root_path (str): The directory in which to run the command.
q_stdout (Queue): A queue to capture stdout.
q_stderr (Queue): A queue to capture stderr.
Returns:
subprocess.Popen: The subprocess object.
"""
logger.info(f'Running `{command}` on {platform.system()}')
if platform.system() == 'Windows': # Check the operating system
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=root_path
)
File: exceptions.py
import json
class ApiKeyNotDefinedError(Exception):
def __init__(self, env_key: str):
self.env_key = env_key
super().__init__(f"API Key has not been configured: {env_key}")
class CommandFinishedEarly(Exception):
def __init__(self, message='Command finished before timeout. Handling early completion...'):
self.message = message
super().__init__(message)
class TokenLimitError(Exception):
def __init__(self, tokens_in_messages, max_tokens=MAX_GPT_MODEL_TOKENS):
self.tokens_in_messages = tokens_in_messages
self.max_tokens = max_tokens
super().__init__(f"Token limit error happened with {tokens_in_messages}/{max_tokens} tokens in messages!")
class TooDeepRecursionError(Exception):
def __init__(self, message='Recursion is too deep!'):
self.message = message
super().__init__(message)
class ApiError(Exception):
def __init__(self, message, response=None):
self.message = message
self.response = response
self.response_json = None
if response and hasattr(response, "text"):
try:
self.response_json = json.loads(response.text)
except Exception: # noqa
pass
super().__init__(message)
class GracefulExit(Exception):
def __init__(self, message='Graceful exit'):
self.message = message
super().__init__(message)
File: files.py
from pathlib import Path
import os
from typing import Optional, Union
class IPCClient:
def __init__(self, port):
self.ready = False
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to the external process...")
try:
client.connect(('localhost', int(port)))
self.client = client
print("Connected!")
except ConnectionRefusedError:
self.client = None
print("Connection refused, make sure you started the external process")
def listen(self):
if self.client is None:
print("Not connected to the external process!")
return
while True:
data = b''
while True:
data = data + self.client.recv(512 * 1024)
try:
message = json.loads(data)
break
except json.JSONDecodeError:
# This means we still got an incomplete message, so
# we should continue to receive more data.
continue
if message['type'] == 'response':
# self.client.close()
return message['content']
File: test_AgentConvo.py
import builtins
import os.path
from dotenv import load_dotenv
from database.database import database
from const.function_calls import IMPLEMENT_TASK
from helpers.agents.Developer import Developer
from helpers.AgentConvo import AgentConvo
File: test_Debugger.py
import builtins
import json
import pytest
from unittest.mock import patch, MagicMock
from dotenv import load_dotenv
load_dotenv()
from utils.custom_print import get_custom_print
from helpers.agents.Developer import Developer
from helpers.AgentConvo import AgentConvo
from helpers.Debugger import Debugger
from helpers.test_Project import create_project
from test.mock_questionary import MockQuestionary
################## NOTE: this test needs to be ran in debug with breakpoints ##################
@pytest.mark.uses_tokens
@patch('pilot.helpers.AgentConvo.get_saved_development_step')
@patch('pilot.helpers.AgentConvo.save_development_step')
@patch('utils.questionary.save_user_input')
@patch('helpers.cli.run_command')
@patch('helpers.cli.save_command_run')
# @patch('pilot.helpers.cli.execute_command', return_value=('', 'DONE', 0))
def test_debug(
# mock_execute_command,
mock_save_command, mock_run_command,
mock_save_input, mock_save_step, mock_get_saved_step):
# Given
builtins.print, ipc_client_instance = get_custom_print({})
project = create_project()
project.current_step = 'coding'
developer = Developer(project)
project.developer = developer
convo = AgentConvo(developer)
convo.load_branch = lambda x: None
debugger = Debugger(developer)
# TODO: mock agent.project.developer.execute_task
# convo.messages.append()
convo.construct_and_add_message_from_prompt('dev_ops/ran_command.prompt', {
'cli_response': '''
stderr:
```
node:internal/modules/cjs/loader:1080
throw err;
^
def create_project():
os.environ["AUTOFIX_FILE_PATHS"] = "false"
project = Project({
'app_id': 'test-project',
'name': 'TestProject',
'app_type': ''
})
project.set_root_path(test_root)
project.app = 'test'
project.current_step = 'test'
return project
class TestProject:
@pytest.mark.parametrize('file_path, file_name, expected', [
('file.txt', 'file.txt', f'{test_root}/file.txt'),
('', 'file.txt', f'{test_root}/file.txt'),
('path/', 'file.txt', f'{test_root}/path/file.txt'),
('path/to/', 'file.txt', f'{test_root}/path/to/file.txt'),
('path/to/file.txt', 'file.txt', f'{test_root}/path/to/file.txt'),
('./path/to/file.txt', 'to/file.txt', f'{test_root}/path/to/file.txt'),
('./package.json', 'package.json', f'{test_root}/package.json'),
])
def test_get_full_path(self, file_path, file_name, expected):
# Given
project = create_project()
# When
relative_path, absolute_path = project.get_full_file_path(file_path, file_name)
# Then
assert absolute_path == str(Path(expected))
@pytest.mark.parametrize(
("file_path", "file_name", "expected_path", "expected_absolute_path"), [
('', '', '/', f'{test_root}/'),
('', '.', '/', f'{test_root}/'),
('', '.env', '/', f'{test_root}/.env'),
('', '~/', '/', f'{test_root}/'),
('', f'{test_root}/', '/', f'{test_root}/'),
('', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
('', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
('', 'server.js', '/', f'{test_root}/server.js'),
('', 'folder1', '/folder1', f'{test_root}/folder1/'),
('', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('.', '', '/', f'{test_root}/'),
('.', '.', '/', f'{test_root}/'),
File: test_cli.py
import platform
from unittest.mock import patch, MagicMock, call
import pytest
@pytest.mark.xfail()
@patch("helpers.cli.os")
@patch("helpers.cli.subprocess")
def test_terminate_process_not_running(mock_subprocess, mock_os):
terminate_process(1234, 'not running')
mock_subprocess.run.assert_not_called()
mock_os.killpg.assert_not_called()
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout, force=True)
# Then
assert cli_response is not None
assert llm_response == 'DONE'
assert exit_code is not None
mock_terminate_process.assert_called_once_with(1234)
@patch('helpers.cli.ask_user', return_value='')
@patch('helpers.cli.run_command')
@patch("helpers.cli.terminate_process")
def test_execute_command_enter(mock_terminate_process, mock_run, mock_ask):
# Given
project = create_project()
command = 'echo hello'
timeout = 1000
mock_run.side_effect = mock_run_command
# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout)
# Then
File: AGENTS.md
Roles are defined in `const.common.ROLES`.
Each agent's role is described to the LLM by a prompt in `pilot/prompts/system_messages/{role}.prompt`
## Product Owner
`project_description`, `user_stories`, `user_tasks`
## Architect
`architecture`
TODO:
- README.md
- .gitignore
- .editorconfig
- LICENSE
- CI/CD
- IaC, Dockerfile
## Tech Lead
`development_planning`
## Dev Ops
`environment_setup`
**TODO: no prompt**
## Developer (full_stack_developer)
`create_scripts`, `coding`
## Code Monkey
`create_scripts`, `coding`, `implement_changes`
ARCHITECTURE_STEP = 'architecture'
WARN_SYSTEM_DEPS = ["docker", "kubernetes", "microservices"]
WARN_FRAMEWORKS = ["react", "react.js", "next.js", "vue", "vue.js", "svelte", "angular"]
WARN_FRAMEWORKS_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/Using-GPT-Pilot-with-frontend-frameworks"
class Architect(Agent):
def __init__(self, project):
super().__init__('architect', project)
self.convo_architecture = None
def get_architecture(self):
print(json.dumps({
"project_stage": "architecture"
}), type='info')
self.project.current_step = ARCHITECTURE_STEP
# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], ARCHITECTURE_STEP)
if step and not should_execute_step(self.project.args['step'], ARCHITECTURE_STEP):
step_already_finished(self.project.args, step)
self.project.architecture = None
self.project.system_dependencies = None
self.project.package_dependencies = None
self.project.project_template = None
db_data = step["architecture"]
if db_data:
if isinstance(db_data, dict):
self.project.architecture = db_data["architecture"]
self.project.system_dependencies = db_data["system_dependencies"]
self.project.package_dependencies = db_data["package_dependencies"]
self.project.project_template = db_data.get("project_template")
elif isinstance(db_data, list):
self.project.architecture = ""
self.project.system_dependencies = [
{
"name": dep,
"description": "",
"test": "",
"required_locally": False
} for dep in db_data
]
self.project.package_dependencies = []
self.project.project_template = None
return
File: CodeMonkey.py
import os.path
import re
from typing import Optional
from traceback import format_exc
from difflib import unified_diff
# Constant for indicating missing new line at the end of a file in a unified diff
NO_EOL = "\ No newline at end of file"
MAX_REVIEW_RETRIES = 3
class CodeMonkey(Agent):
save_dev_steps = True
def get_original_file(
self,
code_changes_description: str,
step: dict[str, str],
files: list[dict],
) -> tuple[str, str]:
"""
Get the original file content and name.
for f in files:
# Take into account that step path might start with "/"
if (f['path'] == step['path'] or (os.path.sep + f['path'] == step['path'])) and f['name'] == step['name'] and f['content']:
file_content = f['content']
break
else:
# If we didn't have the match (because of incorrect or double use of path separators or similar), fallback to directly loading the file
try:
file_content = get_file_contents(abs_path, self.project.root_path)['content']
if isinstance(file_content, bytes):
File: Developer.py
import platform
import uuid
import re
import json
ENVIRONMENT_SETUP_STEP = 'environment_setup'
class Developer(Agent):
def __init__(self, project):
super().__init__('full_stack_developer', project)
self.review_count = 0
self.run_command = None
self.save_dev_steps = True
self.debugger = Debugger(self)
# DEVELOPMENT
if not self.project.skip_steps:
logger.info("Starting to create the actual code...")
total_tasks = len(self.project.development_plan)
progress_thresholds = [50] # Percentages of progress when documentation is created
documented_thresholds = set()
PROJECT_DESCRIPTION_STEP = 'project_description'
USER_STORIES_STEP = 'user_stories'
USER_TASKS_STEP = 'user_tasks'
class ProductOwner(Agent):
def __init__(self, project):
super().__init__('product_owner', project)
# If this app_id already did this step, just get all data from DB and don't ask user again
if self.project.app is not None:
step = get_progress_steps(self.project.args['app_id'], PROJECT_DESCRIPTION_STEP)
if step and not should_execute_step(self.project.args['step'], PROJECT_DESCRIPTION_STEP):
step_already_finished(self.project.args, step)
self.project.set_root_path(setup_workspace(self.project.args))
self.project.project_description = step['summary']
self.project.project_description_messages = step['messages']
self.project.main_prompt = step['prompt']
return
# PROJECT DESCRIPTION
self.project.current_step = PROJECT_DESCRIPTION_STEP
is_example_project = False
self.project.args['name'] = clean_filename(project_name)
File: SpecWriter.py
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from utils.files import count_lines_of_code
from utils.style import color_green_bold, color_yellow_bold
from prompts.prompts import ask_user
from const.messages import AFFIRMATIVE_ANSWERS
from utils.exit import trace_code_event
INITIAL_PROJECT_HOWTO_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/How-to-write-a-good-initial-project-description"
class SpecWriter(Agent):
def __init__(self, project):
super().__init__('spec_writer', project)
self.save_dev_steps = True
convo = AgentConvo(self)
convo.construct_and_add_message_from_prompt('spec_writer/ask_questions.prompt', {})
num_questions = 0
skipped = False
user_response = initial_prompt
while True:
llm_response = convo.send_message('utils/python_string.prompt', {
"content": user_response,
})
if not llm_response:
continue
num_questions += 1
llm_response = llm_response.strip()
if len(llm_response) > 500:
print('continue', type='button')
user_response = ask_user(
self.project,
"Can we proceed with this project description? If so, just press ENTER. Otherwise, please tell me what's missing or what you'd like
hint="Does this sound good, and does it capture all the information about your project?",
require_some_input=False
)
if user_response:
user_response = user_response.strip()
if user_response.lower() in AFFIRMATIVE_ANSWERS + ['continue']:
break
else:
print('skip questions', type='button')
user_response = ask_user(self.project, llm_response)
if user_response and user_response.lower() == 'skip questions':
llm_response = convo.send_message(
'utils/python_string.prompt',
{
'content': 'This is enough clarification, you have all the information. Please output the spec now, without additional comments or qu
}
)
File: TechLead.py
from utils.utils import step_already_finished
from helpers.Agent import Agent
from utils.style import color_green_bold
from helpers.AgentConvo import AgentConvo
DEVELOPMENT_PLANNING_STEP = 'development_planning'
class TechLead(Agent):
def __init__(self, project):
super().__init__('tech_lead', project)
self.save_dev_steps = False
self.convo_feature_plan = AgentConvo(self)
def create_development_plan(self):
self.project.current_step = DEVELOPMENT_PLANNING_STEP
self.convo_development_plan = AgentConvo(self)
# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], DEVELOPMENT_PLANNING_STEP)
if step and not should_execute_step(self.project.args['step'], DEVELOPMENT_PLANNING_STEP):
step_already_finished(self.project.args, step)
self.project.development_plan = step['development_plan']
return
existing_summary = apply_project_template(self.project)
# DEVELOPMENT PLANNING
print(color_green_bold("Starting to create the action plan for development...\n"), category='agent:tech-lead')
logger.info("Starting to create the action plan for development...")
llm_response = self.convo_development_plan.send_message('development/plan.prompt',
{
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"architecture": self.project.architecture,
"technologies": self.project.system_dependencies + self.project.package_dependencies,
"existing_summary": existing_summary,
"files": self.project.get_all_coded_files(),
"task_type": 'app',
}, DEVELOPMENT_PLAN)
self.project.development_plan = llm_response['plan']
save_progress(self.project.args['app_id'], self.project.current_step, {
"development_plan": self.project.development_plan, "app_data": generate_app_data(self.project.args)
})
return
class TechnicalWriter(Agent):
def __init__(self, project):
super().__init__('technical_writer', project)
self.save_dev_steps = True
def create_license(self):
# check if LICENSE file exists and if not create one. We want to create it only once.
return
def create_readme(self):
print(color_green('Creating README.md'))
convo = AgentConvo(self)
llm_response = convo.send_message('documentation/create_readme.prompt', {
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"directory_tree": self.project.get_directory_tree(True),
"files": self.project.get_all_coded_files(),
"previous_features": self.project.previous_features,
"current_feature": self.project.current_feature,
}, GET_DOCUMENTATION_FILE)
self.project.save_file(llm_response)
return convo
def create_api_documentation(self):
# create API documentation
return
File: __init__.py
from .Architect import Architect, ARCHITECTURE_STEP
from .Developer import Developer, ENVIRONMENT_SETUP_STEP
from .TechLead import TechLead
File: test_Developer.py
import builtins
import json
import os
File: test_TechLead.py
import builtins
import os
import pytest
from unittest.mock import patch
from dotenv import load_dotenv
load_dotenv()
class TestTechLead:
def setup_method(self):
builtins.print, ipc_client_instance = get_custom_print({})
name = 'TestTechLead'
self.project = Project({
'app_id': 'test-tech-lead',
'name': name,
'app_type': ''
},
name=name,
architecture=[],
user_stories=[]
)
self.project.set_root_path(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../workspace/TestTechLead')))
self.project.technologies = []
self.project.project_description = '''
The project entails creating a web-based chat application, tentatively named "chat_app."
This application does not require user authentication or chat history storage.
It solely supports one-on-one messaging, excluding group chats or multimedia sharing like photos, videos, or files.
Additionally, there are no specific requirements for real-time functionality, like live typing indicators or read receipts.
The development of this application will strictly follow a monolithic structure, avoiding the use of microservices, as per the client's d
The development process will include the creation of user stories and tasks, based on detailed discussions with the client.
'''
self.project.user_stories = [
'User Story 1: As a user, I can access the web-based "chat_app" directly without needing to authenticate or log in. Do you want to
'User Story 2: As a user, I can start one-on-one conversations with another user on the "chat_app". Do you want to add anything e
'User Story 3: As a user, I can send and receive messages in real-time within my one-on-one conversation on the "chat_app". Do
'User Story 4: As a user, I do not need to worry about deleting or storing my chats because the "chat_app" does not store chat his
'User Story 5: As a user, I will only be able to send text messages, as the "chat_app" does not support any kind of multimedia sha
'User Story 6: As a user, I will not see any live typing indicators or read receipts since the "chat_app" does not provide any addition
]
self.project.architecture = ['Node.js', 'Socket.io', 'Bootstrap', 'JavaScript', 'HTML5', 'CSS3']
self.project.current_step = DEVELOPMENT_PLANNING_STEP
@pytest.mark.uses_tokens
@patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
@patch('helpers.agents.TechLead.save_progress', return_value=None)
@patch('helpers.agents.TechLead.get_progress_steps', return_value=None)
def test_create_development_plan(self, mock_get_saved_step, mock_save_progress, mock_get_progress_steps):
self.techLead = TechLead(self.project)
File: logger.py
import os
import re
import logging
def setup_logger():
# Create a custom format for your logs
log_format = "%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(levelname)s: %(message)s"
if os.getenv('DEBUG') == 'true':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def filter_sensitive_fields(record):
# TODO: also remove escape sequences for colors, bold etc
if isinstance(record.args, dict): # check if args is a dictionary
args = record.args.copy()
for field in sensitive_fields:
if field in args:
args[field] = '*****'
record.args = args
return True
File: __init__.py
File: prompts.py
# prompts/prompts.py
from utils.style import color_white_bold
from const import common
from const.llm import MAX_QUESTIONS, END_RESPONSE
from utils.llm_connection import create_gpt_chat_completion
from utils.utils import get_sys_message, get_prompt
from utils.questionary import styled_select, styled_text
from logger.logger import logger
from helpers.exceptions import ApiError
def ask_for_app_type():
return 'App'
answer = styled_select(
"What type of app do you want to build?",
choices=common.APP_TYPES
)
if answer is None:
print("Exiting application.")
exit(0)
def ask_for_main_app_definition(project):
question = 'Describe your app in as much detail as possible.'
print(question, type='ipc')
description = ask_user(
project,
question
)
if description is None:
print("No input provided!")
return
return description
def ask_user(project, question: str, require_some_input=True, hint: str = None, ignore_user_input_count: bool = False):
while True:
if hint is not None:
print(color_white_bold(hint) + '\n')
File: test_prompts.py
from .prompts import get_prompt
def test_prompt_ran_command_None_exit():
# When
prompt = get_prompt('dev_ops/ran_command.prompt', {
'cli_response': 'stdout:\n```\nsuccess\n```',
'command': './scripts/run_tests',
'additional_message': 'Some additional message\n',
'exit_code': None
})
# Then
assert prompt == '''
Some additional message
stdout:
```
success
```
Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE
def test_prompt_ran_command_0_exit():
# When
prompt = get_prompt('dev_ops/ran_command.prompt', {
'cli_response': 'stdout:\n```\nsuccess\n```',
'command': './scripts/run_tests',
'additional_message': 'Some additional message\n',
'exit_code': 0
})
# Then
assert prompt == '''
Some additional message
stdout:
```
success
```
Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE
def test_parse_task_no_processes():
# When
prompt = get_prompt('development/parse_task.prompt', {
'running_processes': {}
})
# Then
File: change_in_tech.prompt
Do you want to change any of the technologies listed?
File: technologies.prompt
You're designing the architecture and technical specifications for a new project.
If the project requirements call out for specific technology, use that. Otherwise, if working on a web app, prefer Node.js for the bac
Based on these details, think step by step to design the architecture for the project and choose technologies to use in building it.
{% if templates %}You have an option to use a project template that implements standard boilerplate/scaffolding so you can start f
If no project templates are a good match, don't pick any! It's better to start from scratch than to use a template that is not a good fit
Contains:
{{ tpl['summary']}}
{% endfor %}{% endif %}
*IMPORTANT*: You must follow these rules while creating your project:
* You must only list *system* dependencies, ie. the ones that need to be installed (typically as admin) to set up the programming la
* If there are several popular options (such as Nginx or Apache for web server), pick one that would be more suitable for the app in
* DO NOT include text editors, IDEs, shells, OpenSSL, CLI tools such as git, AWS, or Stripe clients, or other utilities in your list. on
* If a dependency (such as database) has a cloud alternative or can be installed on another computer (ie. isn't required on this com
Output only your response in JSON format like in this example, without other commentary:
```json
{
"architecture": "Detailed description of the architecture of the application",
"system_dependencies": [
{
"name": "Node.js",
"description": "JavaScript runtime for building apps. This is required to be able to run the app you're building.",
"test": "node --version",
"required_locally": true
},
{
"name": "MongoDB",
"description": "NoSQL database. If you don't want to install MongoDB locally, you can use a cloud version such as MongoDB Atlas
"test": "mongosh --version",
"required_locally": false
},
...
],
"package_dependencies": [
{
File: execution_order.prompt
All the steps will be executed in order in which you give them, so it is very important that you think about all steps before you start
File: features_list.prompt
{% if previous_features %}
Here is the list of features that were previously implemented on top of initial high level description of "{{ name }}":
```
{% for feature in previous_features %}
- {{ loop.index }}. {{ feature['summary'] }}
{% endfor %}
```
{% endif %}{% if current_feature %}Here is the feature that you are implementing right now:
```
{{ current_feature }}
```
{% endif %}
File: file_naming.prompt
When creating and naming new files, ensure the file naming (camelCase, kebab-case, underscore_case, etc) is consistent with th
File: file_size_limit.prompt
**IMPORTANT**
When you think about in which file should the new code go to, always try to make files as small as possible and put code in more s
File: files_list.prompt
{% if files|length > 0 %}Here are files that are currently implemented:
---START_OF_FILES---{% for file in files %}
**{{ file.path }}/{{ file.name }}** ({{ file.lines_of_code }} lines of code):
```
{{ file.content }}
```
{% endfor %}
---END_OF_FILES---
{% endif -%}
File: human_intervention_explanation.prompt
**IMPORTANT**
You must not tell me to run a command in the database or anything OS related - only if some dependencies need to be installed. I
Avoid using "Human Intervention" if possible. You should NOT use "Human Intervention" for anything else than steps that you can
Here are a few examples when and how to use "Human Intervention":
------------------------start_of_example_1---------------------------
Here is an example of good response for the situation where it seems like 3rd party API, in this case Facebook, is not working:
* "Human Intervention"
"1. Check latest Facebook API documentation for updates on endpoints, parameters, or authentication.
2. Verify Facebook API key/authentication and request format to ensure they are current and correctly implemented.
3. Use REST client tools like Postman or cURL to directly test the Facebook API endpoints.
4. Check the Facebook API's status page for any reported downtime or service issues.
5. Try calling the Facebook API from a different environment to isolate the issue."
------------------------end_of_example_1---------------------------
------------------------start_of_example_2---------------------------
Here is an example of good response for the situation where the user needs to enable some settings in their Gmail account:
* "Human Intervention"
"To enable sending emails from your Node.js app via your Gmail, account, you need to do the following:
1. Log in to your Gmail account.
2. Go to 'Manage your Google Account' > Security.
File: list_running_processes.prompt
{% if running_processes -%}
Note that the following processes are already running:
command_id: {{ key }}
command: {{ data[0] }}
{%- endfor -%}
{%- endif -%}
File: logs_and_error_handling.prompt
**IMPORTANT**: Logging
Whenever you write code, make sure to log code execution so that when a developer looks at the CLI output, they can understand
File: no_microservices.prompt
**IMPORTANT**
Do not use, create or suggest any microservices. Ensure that the architecture for this task remains strictly monolithic. DO not sugg
File: project_details.prompt
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```
{% endif %}{% if user_stories %}Here are user stories that specify how users use "{{ name }}":
```
{% for story in user_stories %}
- {{ story }}
{% endfor %}
```
{% endif %}{% if user_tasks %}Here are user tasks that specify what users need to do to interact with "{{ name }}":
```
{% for task in user_tasks %}
- {{ task }}
{% endfor %}
```
{% endif %}{% if technologies %}Here are the technologies that {% if task_type == 'feature' %}that were used{% else %}you need
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}
{% endif %}
File: project_tasks.prompt
Before we go into the coding part, I want you to split the development process of creating this {{ task_type }} into smaller tasks so
Each task needs to be related only to the development of this {{ task_type }} and nothing else - once the {{ task_type }} is fully wor
**IMPORTANT**
As an experienced tech lead you always follow rules on how to create tasks. Dividing project into tasks is extremely important job
Now, based on the project details provided{% if task_type == 'feature' %} and new feature description{% endif %}, think task by ta
File: single_question.prompt
**IMPORTANT**
Here are the instructions for Asking Additional Questions:
Direct Questions Only: If there are any points that are not clear, you should draft direct questions to clarify them. Do not include an
Concise and Focused: Each question should be concise and focus on one aspect of the project. Do not merge multiple queries int
No Lead-ins or Conclusions: After receiving an answer to a question, proceed directly to the next question without adding any than
Neutral Tone: Ensure that your questions are neutral and don't imply any assumptions. The objective is to gather information, not
Examples:
Instead of "Thank you for that information. My next question is: Should A be bigger than B?", simply ask "Should A be bigger than
Instead of "Based on what you said earlier, do we need to prioritize X over Y?", just ask "Do we need to prioritize X over Y?".
Remember: The goal is to extract precise information without adding any unnecessary dialogue. Your questions should be straigh
I want your response to be only one question at a time. I will ask you again when I am ready for next question.
Ask maximum of {{MAX_QUESTIONS}} questions and after that I want you to respond with "{{END_RESPONSE}}".
If everything is clear before asking those {{MAX_QUESTIONS}} questions, you write the response in the following format:
"{{END_RESPONSE}}"
File: steps_list.prompt
{% if task_steps and step_index is not none -%}
The current task has been split into multiple steps, and each step is one of the following:
* `command` - command to run
* `save_file` - create or update a file
* `human_intervention` - if the human needs to do something
{% if step_index > 0 %}Here is the list of steps that have been executed:
{% for step in task_steps %}{% if loop.index0 < step_index %}
{%- if step.type in ['save_file', 'code_change', 'modify_file'] -%}
{%- set type_content = step.get(step.type, None) -%}
{%- if type_content -%}
{%- if 'content' in type_content -%}
{%- set _ = type_content.update({'content': '...' }) -%}
{%- endif -%}
{%- if 'code_change_description' in type_content -%}
{%- set _ = type_content.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- else -%}
{%- if 'code_change_description' in step -%}
{%- set _ = step.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{{ step }}
{% endif %}{% endfor %}{% endif %}
Here is the step you are currently debugging:
{{ task_steps[step_index] }}
{% if step_index < task_steps|length - 1 %}Here are steps that will be executed once debugging is done:
{% for step in task_steps %}{% if loop.index0 > step_index %}
{%- if step.type in ['save_file', 'code_change', 'modify_file'] -%}
{%- set type_content = step.get(step.type, None) -%}
{%- if type_content -%}
{%- if 'content' in type_content -%}
{%- set _ = type_content.update({'content': '...' }) -%}
{%- endif -%}
File: summary_instructions.prompt
**IMPORTANT**
Here are the instructions for Writing the Summary:
1. **Stick to the Facts**: Every sentence should be informative and relevant. Length is not an issue as long as all pertinent details
2. **Avoid Subjectivity and Mentioning The Client or Any External Entities**: Do not mention phrases like "the client wants" or "the
3. **Use Active Voice**: Use active rather than passive voice. For instance, "The project includes 5 subdomains" instead of "It was
4. **Be Direct**: Replace indirect phrases with direct statements. For example, instead of saying "The client said there might be a
5. **Prioritize Clarity**: Each statement should be clear and easy to understand. Refrain from using jargon unless it's widely recog
6. **Organize Information**: Group related items to ensure a coherent flow in your summary, making it more understandable for re
**Examples**:
- Instead of "The client expressed a preference for blue in our last meeting", write "The primary color is blue".
- Instead of "We've chosen to build on WordPress after reviewing potential platforms", write "The project will be built on WordPress
Remember: The goal of the summary is to provide a concise and accurate overview, focusing strictly on its factual aspects.
File: debug.prompt
{{ steps_list }}
{% if issue_description -%}
You wanted me to check this - `{{ issue_description }}` but there was a problem
{%- else -%}
Now, we need to debug this issue
{%- endif -%}
{% if command %} and we need to be able to execute `{{ command }}` successfully. {% endif %}.
{% if user_input %}I looked into this issue. I want you to take my findings as important part of debugging this issue. Here are my fin
```
{{ user_input }}
```{% endif -%}
I want you to create a list of steps that are needed to debug this issue.
* `command` - command to run (must be able to run on a {{ os }} machine, assume current working directory is project root folder)
* `code_change` - step will change the code and you need to thoroughly describe what needs to be implemented. I will implement
* `human_intervention` - if you need the human to do something, use this type of step and explain in details what you want the hum
{{ execution_order }}
Also, make sure that at least the last step has `check_if_fixed` set to TRUE.
{{ file_size_limit }}
{# After this, you need to decide what to do next. You can rerun the command `{{ command }}` to check if the problem is fixed or ru
File: ran_command.prompt
{{ steps_list }}
{%- if task_steps and step_index is not none -%}
When trying to see if command was ran successfully, take into consideration steps that were previously executed and steps that w
{%- endif %}
{%- if additional_message %}{{ additional_message }}{% endif %}
I ran the command `{{ command }}`. The output was:
{#%- if error_response %}, it {{ error_response }}{% endif %#}
{{ cli_response }}
Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE
File: bug_report_summary.prompt
You are working on this app:
```
{{ app_summary }}
```
{% if task_review_description %}
User was given instructions on how to test if the app is working correctly. Here are the instructions:
```
{{ task_review_description }}
```
{% endif %}
Your job is now to write issue explanation that will be sent to developer, strictly following these rules:
- If there are some logs you MUST copy all logs in whole! Do not omit any logs! This is also true for code snippets or stack traces!
- Explanation must be very clear and factual, keep it as short as possible. Do not make assumptions, only state the facts.
- When writing explanation of the issue, it is good to cover all categories that you have information on. If you don't have information
- Omit all information that turns out to be irrelevant for this issue (e.g. after asking additional questions it turns out that first user me
- Write issue explanation as if you are talking directly to developer (in first person). Do not mention "user", talk as if you found the t
- Do not use any subjective thoughts, just facts.
- Write only issue explanation. Do not write any recap, summary or ideas how to solve the problem at the end. Do not write introdu
- Do not write any new code, only if something is provided by user.
- Have in mind that developer is smart and he will understand everything as long as you provide all information that you have and
- Have in mind that issue might not be related to your current development task.
- Do not use user instructions when creating issue explanation, that is only for you to understand the issue better.
File: context.prompt
{%- if directory_tree %}
The project directory tree looks like:
{{ directory_tree }}
{% endif -%}
{% if running_processes -%}
Note that the following processes are already running:
command_id: {{ key }}
command: {{ data[0] }}
{%- endfor -%}
{%- endif -%}
File: define_user_review_goal.prompt
How can a human user test if this task was completed successfully?
Please list actions, step by step, in order, that the user should take to verify the task. After each action, describe what the expected
**IMPORTANT**
Follow these important rules when compiling a list of actions the user will take:
File: error.prompt
I got the following error:
```
{{ error }}
```
Specify what needs to be done to fix this error either in the code or what command (or commands) needs to be run to fix this error
File: feature_plan.prompt
You are working in a software development agency and a project manager and software architect approach you telling you that yo
{{ project_details }}
{{ features_list }}
Here is directory tree that shows current folder structure of project:
```
{{ directory_tree }}
```
App and all its features are already finished and working.
{{ files_list }}
Finally, here is the description of new feature that needs to be added to {{ app_type }} "{{ name }}":
```
{{ feature_description }}
```
{{ project_tasks }}
File: feature_summary.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to create a summary for a new feature.
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```
After {{ app_type }} was successfully coded, user asked for an improvement. Here is what user asked:
```
{{ feature_description }}
```
Then a development plan for that feature was created and the feature was then broken down to smaller tasks so that it's easier for
```{% for task in development_tasks %}
- {{ loop.index }}. {{ task['description'] }}
{% endfor %}
```
Your goal is to create summary describing ONLY this feature in as little amount of sentences as possible. Try to keep it as short as
You are not making summary of project or tasks. Only summary of this feature.
{{summary_instructions}}
File: get_alternative_solutions.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to write code for the entire application.
Pay attention to file paths: if the command or argument is a file or folder from the project, use paths relative to the project root (for
File: get_snippet_from_comment.prompt
Here are previously coded files:
{% for file in previously_coded_files %}
**{{ file.path }}/{{ file.name }}**:
```
{{ file.content }}
```
{% endfor %}
These are placeholders for already written lines of code that can be found in the previously coded files that represent lines of code
**IMPORTANT**
Make sure that you respond with all lines of code that are replaced by these comments, including all control structures, error hand
Under no circumstances should you ever leave any part of the code snippet unwritten. Every single line of code that exists in the p
File: identify_files_to_change.prompt
You're a senior software developer implementing changes in one file in the project.
Based on the provided instructions and full file list, identify the file
that needs to be modified.
{{ files_list }}
Instructions:
{{ code_changes_description }}
Output *ONLY* the file path, relative to project root, in a single Markdown code block,
without any comments or explanation, like this:
```
path/to/file
```
File: implement_changes.prompt
I need to modify file `{{ file_name }}` that currently looks like this:
File: iteration.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to write code for the entire application.
Here are the technologies that you need to use for this project:
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}
{% endif %}
A part of the app is already finished.
{{ files_list }}
{% if user_feedback != '' %}
User who was using the app "{{ name }}" sent you this feedback:
```
{{ user_feedback }}
```{% endif %}
{% if user_feedback_qa %}Feedback was not clear enough so you asked user for additional information and got this response:
```{% for row in user_feedback_qa %}
Q: {{ row.question }}
A: {{ row.answer }}
{% endfor %}
```{% endif %}
{% if next_solution_to_try is not none %}
Focus on solving this issue in the following way:
```
{{ next_solution_to_try }}
```
{% endif %}
Now, you have to debug this issue and comply with the additional user feedback.
**IMPORTANT**
Think about all information provided. Your job is to look at big picture by analysing all files to find where the issue is.
Don't reply with any code, your thoughts or breakdown of the issue. Respond only with description of solution, explaining what sho
Create as little steps as possible to fix the issue. Each step should describe, using sentences and not code, what changes are nee
When there are multiple things that have to be done in one file write everything as one step and don't split it in multiple steps.
You can count that the environment is set up previously and packages listed in files are installed so tell me only commands neede
**IMPORTANT**
If report mentions *multiple* issues, treat it as if you got several separate reports: think through each, and provide solutions for eac
{{ execution_order }}
{{ file_size_limit }}
File: parse_task.prompt
Ok, now, take your previous message that starts with `{{ instructions_prefix }}` and ends with `{{ instructions_postfix }}` and conver
File: plan.prompt
You are working in a software development agency and a project manager and software architect approach you telling you that yo
{{ project_details }}
{{ features_list }}
{% if files %}The developers have already used a project scaffolding tool that creates the initial boilerplate for the project:
{{ existing_summary }}
{{ project_tasks }}
File: review_changes.prompt
A developer on your team has been working on the task described in previous message. Based on those instructions, the develop
As you can see, there {% if hunks|length == 1 %}is only one hunk in this diff, and it{% else %}are {{hunks|length}} hunks in this diff
Think carefully about the instructions and review the proposed changes. For each hunk of change, provide a detailed rationale, an
* applied - if the change is correct
* ignored - for example if it is a code deletion or change that wasn't asked for
* reworked - if the change does something correctly but also makes a serious mistake, in which case both applying and ignoring th
Finally, if the changes miss something that was in the instructions, mention that. Keep in mind you're just reviewing one file, `{{ file
Note that the developer may add, modify or delete logging (including `gpt_pilot_debugging_log`) or error handling that's not explici
Here is an example output if 2 of 4 hunks in the change should be applied, one of them should be ignored, one should be reworke
```
{
"hunks": [
{
"number": 1,
"reason": "Some explanation why this part of the change is important. The reasoning here should be as detailed as possible as tha
"decision": "apply"
},
{
"number": 2,
"reason": "Another explanation, for the 2nd hunk",
"decision": "apply"
},
{
"number": 3,
"reason": "This hunk accidentally deletes important code without any useful change",
"decision": "ignore"
},
{
"number": 4,
"reason": "This hunk does something correctly but also does something really wrong. It would be incorrect to either apply or ignore
"decision": "rework"
File: review_feedback.prompt
Your changes have been reviewed.
{% if content != original_content %}
THe reviewer approved and applied some of your changes, but requested you rework the others.
{{ rework_feedback }}
Based on this feedback and the original instructions, think carefully, make the correct changes, and output the entire file again. Re
File: review_task.prompt
You are working on a App called "{{ name }}" and you need to write code for the entire application.
{% if user_input -%}While working on this task, your colleague who is testing the app "{{ name }}" sent you some additional info. H
```
{{ user_input }}
```
After this info, you tried to fix it. {% endif %}Files that were modified during implementation of the task are:
```
{{ modified_files }}
```
Now I will show you how those files looked before this task implementation started. If a file is listed as the file that changed but is n
File: cli_response.prompt
Response from the CLI:
{{ cli_response }}
File: install_next_technology.prompt
Let's install `{{ technology }}` - respond with the command that I need to run to check if {{ technology }} is installed and ready to be
When you want to tell me a command I need to run, respond only with the command you want me to run and nothing else.
When the technology is fully installed, respond with `INSTALLED`.
If the previous command was meant to check if the technology is install and if it wasn't, respond with `NOT_INSTALLED`.
File: specs.prompt
You are working in a software development agency and a project manager and software architect approach you telling you that yo
Here are the technologies that you need to use for this project:
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}
Let's set up the environment on my machine. Here are the details about my machine:
```
{{ os_info }}
```
First, filter out the technologies from the list above and tell me, which technologies need to be installed on my machine. That is eve
File: unsuccessful_installation.prompt
Ok, let's install {{ technology }} on my machine. You will tell me commands that I need to run and I will tell you the output I got. The
File: break_down_code_changes.prompt
You need to implement the current changes into a codebase:
-- INSTRUCTIONS --
{{ instructions }}
-- END OF INSTRUCTIONS --
First, you need to break down these instructions into actionable steps that can be made. There are 2 types of steps. If a step requi
For a step to be actionable, it cannot have a vague description but a clear explanation of what needs to be done to finish that step
BAD STEP: `Set up mongo database`
GOOD STEP: `Inside db.js, add the following code: {code that needs to be added to the file}`
When thinking about steps, first think about what files need to changed to finish this task. When you determine what changes need
So, each step of type `code_change` can contain ALL changes that need to be made to a single file. If changes need to be made
Remember, all commands will be run from the project root folder.
Now, think step by step and return a list of steps that need to be run.
File: breakdown.prompt
File: next_step.prompt
{% if step_index != 0 %}
So far, steps {{ finished_steps }} are finished so let's do
{% else %}
Let's start with the{% endif %} step #{{ step_index }}. Here is the description of this step:
```
{{ step_description }}
```
Think step by step about what needs to be done to fulfill this step.
{% if step_type == 'COMMAND' %}
Respond with all commands that need to be run to fulfill this step.
{% elif step_type == 'CODE_CHANGE' %}
First, you need to know the code that's currently written so that you can appropriately write new or update the existing code. {# He
```
{{ directory_tree }}
```
#}
Respond with a list of files that you need to see before you can write the code for the current step. This list needs to be in a JSON
{% endif %}
File: request_files_for_code_changes.prompt
{#You need to implement the current changes into a codebase:
-- INSTRUCTIONS --
{{ instructions }}
-- END OF INSTRUCTIONS --
#}{% if step_index != 0 %}So far, steps {{ finished_steps }} are finished so let's do{% else %}Let's start with the{% endif %} step #{
{# I will give you each file that needs to be changed and you will implement changes from the instructions. #}To do this, you will ne
{#
Here is the current folder tree:
```
{{ directory_tree }}
```
#}
Remember, ask for files relative to the project root. For example, if you need a file with path `{project_root}/models/model.py`, you
File: request_test_files.prompt
Ok, now, I will show you the list of all files with automated tests that are written so far and I want you to tell me which automated te
{{ testing_files_tree }}
Remember, ask for files relative to the project root. For example, if you need a file with path `{project_root}/models/model.py`, you
File: step_check.prompt
Now, we need to verify if this change was successfully implemented. We can do that in 2 ways:
{#1. By writing an automated test or by running a previously written test - you write automated tests in Jest and you always try find
1. By running a command (or multiple commands) - this is good for when an automated test is an overkill. For example, if we insta
2. By requesting that a human checks if everything works as expected - this is the last option that we want to avoid but if we can't
Ok, now, tell me how can we verify if this change was successful and respond only with a keyword for a type of test.
File: step_code.prompt
Here are the requested files:
{% for file in files %}
**{{ file.name }}/{{ file.name }}**
```{{ file.language }}
{{ file.content }}
```
File: update_task.prompt
{% if completed_steps|length > 1 -%}
We have a plan for how to debug this and we've already done these steps:
-------------------------list_of_completed_steps-------------------------{% for step in completed_steps[:-1] %}
step #{{ loop.index - 1 }}:
step_type: {{ step.type }}{% if 'command' in step %}
command_to_run: `{{ step.command.command }}`{% elif step.type == 'code_change' %}
code_change_description: `{{ step.code_change_description }}`{% endif %}{% endfor %}
-------------------------END_list_of_completed_steps-------------------------
{% else %}
We have a plan for how to debug this.
{%- endif %}
We've run the command `{{ current_step.command.command }}` and we got the following output:
{{ cli_response }}
{% if next_steps %}
Here is a list of steps that will be executed after current step:
-------------------------list_of_next_steps-------------------------
{% for step in next_steps %}
--step_{{ loop.index + current_step_index }}--
step_type: {{ step.type }}{% if 'command' in step %}
command_to_run: `{{ step.command.command }}`{% elif step.type == 'code_change' %}
code_change_description: `{{ step.code_change_description }}`{% endif %}
--END_step_{{ loop.index + current_step_index }}--{% endfor %}
-------------------------END_list_of_next_steps-------------------------
{% endif %}
Now, think step by step and create a new list of task steps to complete this task based on the output we got from running `{{ curren
{{ file_naming }}
{{ execution_order }}
{{ file_size_limit }}
File: command_test.prompt
Write a list of commands that should be ran that will tell you if the implementation was successful. Write them in a JSON array whe
COMMAND: {command_that_needs_to_be_ran}
File: write_automated_test.prompt
{{ if files|length > 0}}
Here are the requested files:
{% for file in files %}
**{{ file.name }}/{{ file.name }}**
```{# file.language #}
{{ file.content }}
```
{% endfor %}
{% else %}
Currently, no tests are written.
{% endif %}
Now, start with the implementation of the automated test (or tests).
File: create_readme.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to create a detailed documentation for current state of projec
{{ project_details }}
{{ features_list }}
{{ files_list }}
DO NOT specify commands to create any folders or files, they will be created automatically - just specify the relative path to file th
Now, based on the project details provided, think step by step and create README.md file for this project. The file should have the
# Project name
Short description (a few sentences) of the project based on the project details.
## Overview
Description of the architecture and technologies used in the project, and the project structure.
## Features
## Getting started
### Requirements
### Quickstart
### License
The project is proprietary (not open source), just output the standard Copyright (c) 2024. template here.
File: answers.prompt
Here are the answers to your questions:
{% for answer in answers %}
{{ answer }}
{% endfor %}
Now, think step by step and ask any questions that you still want to get a better answer on.
**IMPORTANT**
If everything is clear, you write the response in the following format:
EVERYTHING_CLEAR
App recap: {{recap of the app after all clerifications}}
File: specs.prompt
I want you to create the {{ app_type }} (let's call it "{{ name }}") that can be described like this:
```
{{ prompt }}
```
File: specs_instruction.prompt
I'm going to show you an overview of tasks that you need to do to lead the process of creating this {{ app_type }} and for each task
Example app description: `Create a script that finds Youtube channels with the word "test" inside the channel name`.
The AI developer prefers working on web apps using Node/Express/MongoDB/Mongoose/EJS stack, and use vanilla JS with Boot
Try to avoid the use of Docker, Kubernetes, microservices and single-page app frameworks like React, Next.js, Angular, Vue or Sv
If you identify any missing information or need clarification on any vague or ambiguous parts of the brief, ask the client about it.
Important note: don't ask trivial questions for obvious or unimportant parts of the app, for example:
* Bad questions example 1:
* Client brief: I want to build a hello world web app
* Bad questions:
* What title do you want for the web page that displays "Hello World"?
* What color and font size would you like for the "Hello World" text to be displayed in?
* Should the "Hello World" message be static text served directly from the server, or would you like it implemented via JavaScript o
* Explanation: There's no need to micromanage the developer(s) and designer(s), the client would've specified these details if they
If you ask such trivial questions, the client will think you're stupid and will leave. DOn'T DO THAT
Think carefully about what a developer must know to be able to build the app. The specification must address all of this information
When you gather all the information from the client, output the complete specification. Remember, the specification should define b
The UI should use EJS view engine, Bootstrap for styling and plain vanilla JavaScript. Design should be simple and look like Hack
Each story has a title (one-line text), a link (optional, URL to an external article being shared on AI News), and text (text to show in
Use the following algorithm to rank top stories, and comments within a story: "score = upvotes - downvotes + comments - sqrt(age
* / - shows the top 20 posted stories, ranked using the scoring algorithm, with a "More" link that shows the next 20 (pagination usin
* /newest - shows the latest 20 posted stories, ranked chronologically (newest first), with a "More" link that shows the next 20 (pag
File: review_spec.prompt
Your team has taken the client brief and turned it into a project specification.
Your job is to check the specification and identify all the information that is contained in the client brief, but missing from the specif
In your response, output all the information that is present in the client brief but missing from the spec, so it can be appended.
Note: don't output suggestion to your team to take back to the drawing board. Instead, just output the missing information and the
File: architect.prompt
You are an experienced software architect. Your expertise is in creating an architecture for an MVP (minimum viable products) for
File: code_monkey.prompt
You are a full stack software developer that works in a software development agency. You write modular, clean, maintainable, pro
File: code_updater.prompt
You are a full stack software developer who works in a software development agency. You write very modular code. Your job is to
File: dev_ops.prompt
File: full_stack_developer.prompt
You are an expert full stack software developer who works in a software development agency.
You write modular, well-organized code split across files that are not too big, so that the codebase is maintainable. Your code is cl
Your job is to implement tasks that your tech lead assigns you. Each task has a description of what needs to be implemented.
File: product_owner.prompt
You are an experienced project owner (project manager) who manages the entire process of creating software applications for clie
File: spec_writer.prompt
You are a product owner working in a software development agency.
File: tech_lead.prompt
You are an experienced tech lead in a software development agency and your main task is to break down the project into smaller
File: technical_writer.prompt
You are technical writer and as such, you excel in clear, concise communication, skillfully breaking down complex technical conce
File: additional_questions.prompt
File: llm_response_error.prompt
There was an error processing your response:
{{ error }}
File: python_string.prompt
{{ content }}
File: summary.prompt
Based on the following conversation, write a summary:
File: update.prompt
I will show you some of your message to which I want you to make some updates. Please just modify your last message per these
```
{{ changes }}
```
File: __init__.py
import os
from typing import TYPE_CHECKING, Optional
from uuid import uuid4
if TYPE_CHECKING: # noqa
from helpers.Project import Project # noqa
PROJECT_TEMPLATES = {
"node_express_mongoose": NODE_EXPRESS_MONGOOSE,
}
def apply_project_template(
project: "Project",
) -> Optional[str]:
"""
Apply a project template to a new project.
Note: the template summary is injected in the project description, and the
created files are saved to a snapshot of the last development step (LLM request).
"""
template_name = project.project_template
if not template_name or template_name not in PROJECT_TEMPLATES:
logger.warning(f"Project template '{template_name}' not found, ignoring")
return None
File: node_express_mongoose.py
from helpers.cli import execute_command
def install_hook(project):
"""
Command to run to complete the project scaffolding setup.
NODE_EXPRESS_MONGOOSE = {
"path": "node_express_mongoose",
"description": "Node + Express + MongoDB web app with session-based authentication, EJS views and Bootstrap 5",
"summary": "\n".join([
"* initial Node + Express setup",
"* User model in Mongoose ORM with username and password fields, ensuring username is unique and hashing passwords with b
"* session-based authentication using username + password (hashed using bcrypt) in routes/authRoutes.js, using express-session
"* authentication middleware to protect routes that require login",
"* EJS view engine, html head, header and footer EJS partials, with included Boostrap 5.x CSS and JS",
"* routes and EJS views for login, register, and home (main) page",
"* config loading from environment using dotenv with a placeholder .env.example file: you will need to create a .env file with your o
]),
"install_hook": install_hook,
}
File: render.py
from __future__ import annotations
class Renderer:
"""
Render a Jinja template
Usage:
```
import Renderer from render
r = Renderer('path/to/templates')
output_string = r.render_template('template.html', {'key': 'value'})
output_tree = r.render_tree('tree/root', {'key': 'value'})
```
"""
File: .env
# Port to listen on (example: 3000)
PORT=3000
File: .env.example
# Copy this file to .env and edit the settings
File: package.json
{
"name": "{{ project_name }}",
"version": "1.0.0",
"description": "",
"main": "server.js",
"scripts": {
"start": "node server.js",
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"bcrypt": "^5.1.1",
"body-parser": "^1.20.2",
"chart.js": "^4.4.1",
"connect-flash": "^0.1.1",
"csv-writer": "^1.6.0",
"dotenv": "^16.4.1",
"ejs": "^3.1.9",
"express": "^4.18.2",
"express-session": "^1.18.0",
"connect-mongo": "^5.1.0",
"moment": "^2.30.1",
"mongoose": "^8.1.1"
}
}
File: server.js
// Load environment variables
require("dotenv").config();
const mongoose = require("mongoose");
const express = require("express");
const session = require("express-session");
const MongoStore = require('connect-mongo');
const authRoutes = require("./routes/authRoutes");
if (!process.env.DATABASE_URL || !process.env.SESSION_SECRET) {
File: User.js
const mongoose = require('mongoose');
const bcrypt = require('bcrypt');
userSchema.pre('save', function(next) {
const user = this;
if (!user.isModified('password')) return next();
bcrypt.hash(user.password, 10, (err, hash) => {
if (err) {
console.error('Error hashing password:', err);
return next(err);
}
user.password = hash;
next();
});
});
module.exports = User;
File: style.css
/* Placeholder for custom styles */
File: main.js
// Placeholder for future JavaScript code
File: authRoutes.js
const express = require('express');
const User = require('../models/User');
const bcrypt = require('bcrypt');
const router = express.Router();
module.exports = {
isAuthenticated
};
File: index.ejs
<!DOCTYPE html>
<html lang="en">
<%- include('partials/_head.ejs') %>
<body>
<%- include('partials/_header.ejs') %>
<main role="main" class="container mt-4">
<div class="text-center">
<h1>{{ project_name }}</h1>
</div>
</main>
</body>
<%- include('partials/_footer.ejs') %>
<script src="/js/main.js"></script>
</html>
File: login.ejs
<!DOCTYPE html>
<html lang="en">
<%- include('partials/_head.ejs') %>
<body>
<%- include('partials/_header.ejs') %>
<main role="main">
<div class="container mt-5">
<h2>Login</h2>
<form action="/auth/login" method="POST">
<div class="mb-3">
<input type="text" name="username" placeholder="Username" required class="form-control">
</div>
<div class="mb-3">
<input type="password" name="password" placeholder="Password" required class="form-control">
</div>
<div class="mb-3">
<button type="submit" class="btn btn-primary">Login</button>
Don't have an account? <a href="/auth/register">Register</a>
</div>
</form>
</div>
</main>
</body>
<%- include('partials/_footer.ejs') %>
</html>
File: register.ejs
<!DOCTYPE html>
<html lang="en">
<%- include('partials/_head.ejs') %>
<body>
File: _footer.ejs
<footer class="footer fixed-bottom bg-light">
<div class="container text-center my-2">
<span>Copyright © <%= 1900 + new Date().getYear() %> {{ project_name }}</span>
</div>
</footer>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.min.js" integrity="sha384-BBtl+eGJRgqQAUMxJ7pMwb
File: _head.ejs
<head>
<meta charset="UTF-8">
<title>{{ project_name }}</title>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" integrity="sha384-T3c6CoIi6u
<link rel="stylesheet" href="/css/style.css">
</head>
File: _header.ejs
<nav class="navbar navbar-expand-md navbar-dark bg-dark">
<a class="navbar-brand" href="/">{{ project_name }}</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarNav" aria-controls="navbarNav" aria-exp
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav">
<li class="nav-item">
<a class="nav-link" href="/">Home</a>
</li>
<li class="nav-item">
<% if (session && session.userId) { %>
<a class="nav-link" href="/auth/logout">Logout</a>
<% } else { %>
<a class="nav-link" href="/auth/login">Login</a>
<% } %>
</li>
</ul>
</div>
</nav>
File: __init__.py
File: mock_questionary.py
class MockQuestionary:
def __init__(self, answers=None, initial_state='project_description'):
if answers is None:
answers = []
self.answers = iter(answers)
self.state = initial_state
class Style:
def __init__(self, *args, **kwargs):
pass
class TestColorStyle(unittest.TestCase):
def test_initialization(self):
print("\n[INFO] Testing Theme Initialization...")
style_config.set_theme(Theme.DARK)
print(f"[INFO] Set theme to: {Theme.DARK}, Current theme: {style_config.theme}")
self.assertEqual(style_config.theme, Theme.DARK)
style_config.set_theme(Theme.LIGHT)
print(f"[INFO] Set theme to: {Theme.LIGHT}, Current theme: {style_config.theme}")
self.assertEqual(style_config.theme, Theme.LIGHT)
def test_color_function(self):
dark_color_codes = {
ColorName.RED: "\x1b[31m",
ColorName.GREEN: "\x1b[32m",
# ... other colors
}
light_color_codes = {
ColorName.RED: "\x1b[91m",
ColorName.GREEN: "\x1b[92m",
# ... other colors
}
reset = "\x1b[0m"
File: test_utils.py
from unittest.mock import Mock
def mock_terminal_size():
File: test_file_snapshot.py
from base64 import b64decode
EMPTY_PNG = b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJ
)
@pytest.fixture(autouse=True)
def database():
"""
Set up a new empty initialized test database.
This fixture will create all the tables and run the test in an isolated transaction.
which gets rolled back after the test. The fixture also drops all the tables at the
end.
"""
if DATABASE_TYPE == "postgres":
if not DB_NAME:
raise ValueError(
"PostgreSQL database name (DB_NAME) environment variable not set"
)
db = PostgresqlDatabase(
DB_NAME,
host=DB_HOST,
port=DB_PORT,
user=DB_USER,
password=DB_PASSWORD,
)
elif DATABASE_TYPE == "sqlite":
db = SqliteDatabase(":memory:")
else:
raise ValueError(f"Unexpected database type: {DATABASE_TYPE}")
db.bind(TABLES)
class PostgresRollback(Exception):
"""
Mock exception to ensure rollback after each test.
Even though we drop the tables at the end of each test, if the test
fails due to database integrity error, we have to roll back the
File: __init__.py
File: test_files.py
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
from unittest.mock import patch, call
import pytest
@patch("pilot.helpers.files.open")
@patch("pilot.helpers.files.os")
def test_update_file_creates_directories(mock_os, mock_open):
"""Test that update_file creates intermediate directories if they don't exist."""
mock_os.path.dirname = os.path.dirname
update_file("/path/to/file", "content")
mock_os.makedirs.assert_called_once_with("/path/to", exist_ok=True)
@patch("pilot.helpers.files.open")
@patch("pilot.helpers.files.os")
def test_update_file_creates_text_file(mock_os, mock_open):
"""Test that update_file creates intermediate directories if they don't exist."""
update_file("/path/to/file", "■■")
mock_open.assert_called_once_with("/path/to/file", "w", encoding="utf-8")
mock_open.return_value.__enter__.return_value.write.assert_called_once_with("■■")
@patch("pilot.helpers.files.open")
@patch("pilot.helpers.files.os")
def test_update_file_creates_binary_file(mock_os, mock_open):
"""Test that update_file creates intermediate directories if they don't exist."""
update_file("/path/to/file", b"\x00\x00\x00")
mock_open.assert_called_once_with("/path/to/file", "wb", encoding=None)
mock_open.return_value.__enter__.return_value.write.assert_called_once_with(b"\x00\x00\x00")
@pytest.mark.parametrize(
("source", "expected_encoded"),
[
("file.txt", b"file.txt"),
("foo.txt - ■■", b"foo.txt - \xe7\x84\xa1\xe7\x82\xba"),
(b"\xff\xff\xff", b"\xff\xff\xff"),
],
)
def test_update_file_with_encoded_content(source, expected_encoded):
# Can't use NamedTemporaryFile this as a context manager because Windows
# doesn't allow O_TEMPORARY files (with delete=True) to be opened
# twice, defeating the purpose.
file = NamedTemporaryFile(delete=False)
update_file(file.name, source)
assert file.read() == expected_encoded
file.close()
os.remove(file.name)
File: test_ignore.py
from unittest.mock import patch
import pytest
from tempfile import TemporaryDirectory
@pytest.mark.parametrize(
("path", "expected"),
[
(".git", True),
(".gpt-pilot", True),
(".idea", True),
(".vscode", True),
(".DS_Store", True),
(join("subdirectory", ".DS_Store"), True),
("__pycache__", True),
(join("subdirectory", "__pycache__"), True),
("node_modules", True),
(join("subdirectory", "node_modules"), True),
("package-lock.json", True),
("venv", True),
("dist", True),
("build", True),
("target", True),
(".gitignore", False),
("server.js", False),
(join(dirname(__file__), "node_modules"), True),
(join(dirname(__file__), "subdirectory", "node_modules"), True),
]
)
@patch("utils.ignore.os.path.getsize")
@patch("utils.ignore.os.path.isfile")
@patch("utils.ignore.open")
def test_default_ignore(mock_open, mock_isfile, mock_getsize, path, expected):
mock_open.return_value.read.return_value = "fake-content"
mock_isfile.return_value = True
mock_getsize.return_value = 100
matcher = IgnoreMatcher(root_path=dirname(__file__))
assert matcher.ignore(path) == expected
@pytest.mark.parametrize(
("ignore", "path", "expected"),
[
("*.py[co]", "test.pyc", True),
("*.py[co]", "subdir/test.pyo", True),
("*.py[co]", "test.py", False),
("*.min.js", f"public{sep}js{sep}script.min.js", True),
("*.min.js", f"public{sep}js{sep}min.js", False),
]
)
@patch("utils.ignore.os.path.getsize")
@patch("utils.ignore.os.path.isfile")
@patch("utils.ignore.open")
def test_additional_ignore(mock_open, mock_isfile, mock_getsize, ignore, path, expected):
mock_open.return_value.read.return_value = "fake-content"
mock_isfile.return_value = True
mock_getsize.return_value = 100
matcher = IgnoreMatcher([ignore])
assert matcher.ignore(path) == expected
File: test_settings.py
from io import StringIO
import json
from os.path import expanduser, expandvars, join
from os import getenv
from pathlib import Path
from subprocess import check_output
import sys
from unittest.mock import patch, MagicMock
import pytest
@pytest.fixture
def expected_config_location():
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
return join(xdg_config_home, "gpt-pilot", "config.json")
elif sys.platform in ["darwin", "linux"]:
return expanduser("~/.gpt-pilot/config.json")
elif sys.platform == "win32":
return expandvars("%APPDATA%\\GPT Pilot\\config.json")
else:
raise RuntimeError(f"Unknown platform: {sys.platform}")
def test_settings_initializes_known_variables():
settings = Settings()
assert settings.openai_api_key is None
assert settings.telemetry is None
def test_settings_init_ignores_unknown_variables():
settings = Settings(unknown="value")
assert not hasattr(settings, "unknown")
def test_settings_forbids_saving_unknown_variables():
settings = Settings()
with pytest.raises(AttributeError):
settings.unknown = "value"
def test_settings_update():
settings = Settings()
settings.update(openai_api_key="test_key")
assert settings.openai_api_key == "test_key"
def test_settings_to_dict():
settings = Settings()
settings.update(openai_api_key="test_key")
assert dict(settings) == {
"openai_api_key": "test_key",
File: test_telemetry.py
from unittest.mock import patch
@patch("utils.telemetry.settings")
def test_telemetry_constructor_with_telemetry_enabled(mock_settings):
mock_settings.telemetry = {
"id": "test-id",
"endpoint": "test-endpoint",
"enabled": True,
}
telemetry = Telemetry()
assert telemetry.enabled
assert telemetry.telemetry_id == "test-id"
assert telemetry.endpoint == "test-endpoint"
@patch("utils.telemetry.settings")
def test_telemetry_constructor_with_telemetry_disabled(mock_settings):
mock_settings.telemetry = {"id": "existing-id", "enabled": False}
telemetry = Telemetry()
assert not telemetry.enabled
@patch("utils.telemetry.settings")
def test_telemetry_constructor_with_telemetry_not_configured(mock_settings):
mock_settings.telemetry = None
telemetry = Telemetry()
assert not telemetry.enabled
@patch("utils.telemetry.config_path", "/path/to/config")
@patch("utils.telemetry.settings")
def test_telemetry_constructor_logging_enabled(mock_settings, caplog):
caplog.set_level("DEBUG")
mock_settings.telemetry = {
"id": "test-id",
"endpoint": "test-endpoint",
"enabled": True,
}
Telemetry()
assert (
"Anonymous telemetry enabled (id=test-id), configure or disable it in /path/to/config"
in caplog.text
)
@patch("utils.telemetry.sys.platform", "test_platform")
@patch("utils.telemetry.sys.version", "test_version")
@patch("utils.telemetry.version", "test_pilot_version")
def test_clear_data_resets_data():
telemetry = Telemetry()
empty = Telemetry()
telemetry.data = {
"model": "test-model",
"num_llm_requests": 10,
"num_llm_tokens": 100,
"num_steps": 5,
"elapsed_time": 123.45,
File: Dev_continue_development.py
import pytest
from unittest.mock import patch
@pytest.mark.ux_test
@patch('utils.questionary.get_saved_user_input')
@patch('helpers.cli.get_saved_command_run')
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
def test_continue_development(mock_4, mock_3, mock_2, mock_1):
# Given
project = create_project('continue_development', 'hello_world_server')
# execute_command(project, 'npm install', 13000)
developer = Developer(project)
project.developer = developer
convo = AgentConvo(developer)
convo.load_branch = lambda last_branch_name: None
developer.run_command = 'node server.js'
# Note: uncomment the following 2 lines and indent the remaining lines when debugging without console input
mock_questionary = MockQuestionary(['r', 'continue'])
with patch('utils.questionary.questionary', mock_questionary):
# When
# `continue_development` calls `run_command_until_success()` if the user types "r"
developer.continue_development(convo, 'branch_name', 'The web page should say "Hello, World!"')
print('end of "continue_development" scenario')
terminate_running_processes()
File: README.md
The functions in this directory are used to test specific scenarios of the user experience.
File: __init__.py
# from .run_command_until_success import run_command_until_success
from .cli_execute_command import cli_execute_command
from .Dev_continue_development import test_continue_development
from .utils import use_args
tests = {
# 'run_command_until_success': run_command_until_success,
'cli_execute_command': cli_execute_command,
'continue_development': test_continue_development,
}
if test_name in tests:
use_args(args)
return tests[test_name]()
File: arguments.py
import hashlib
import os
import re
import sys
import uuid
from getpass import getuser
from database.database import get_app, get_app_by_user_workspace
from utils.style import color_green_bold, color_red, style_config
from utils.utils import should_execute_step
from const.common import STEPS
def get_arguments():
# The first element in sys.argv is the name of the script itself.
# Any additional elements are the arguments passed from the command line.
args = sys.argv[1:]
app = None
if 'workspace' in arguments:
arguments['workspace'] = os.path.abspath(arguments['workspace'])
app = get_app_by_user_workspace(arguments['user_id'], arguments['workspace'])
if app is not None:
arguments['app_id'] = str(app.id)
arguments['continuing_project'] = True
else:
arguments['workspace'] = None
if 'app_id' in arguments:
if app is None:
try:
app = get_app(arguments['app_id'])
except ValueError as err:
print(color_red(f"Error: {err}"))
sys.exit(-1)
arguments['app_type'] = app.app_type
arguments['name'] = app.name
arguments['status'] = app.status
File: custom_print.py
import builtins
from helpers.ipc import IPCClient
from const.ipc import MESSAGE_TYPE, LOCAL_IGNORE_MESSAGE_TYPES
from utils.print import remove_ansi_codes
def get_custom_print(args):
built_in_print = builtins.print
ipc_client_instance.send({
'type': MESSAGE_TYPE[kwargs['type']],
'category': kwargs['category'] if 'category' in kwargs else '',
'content': remove_ansi_codes(message),
})
if kwargs['type'] == MESSAGE_TYPE['user_input_request']:
return ipc_client_instance.listen()
if 'category' in kwargs:
del kwargs['category']
built_in_print(message, **kwargs)
ipc_client_instance = None
if '--external-log-process-port' in args:
ipc_client_instance = IPCClient(args['--external-log-process-port'])
return print_to_external_process, ipc_client_instance
else:
return local_print, ipc_client_instance
File: dot_gpt_pilot.py
import json
import os
import yaml
from datetime import datetime
from dotenv import load_dotenv
load_dotenv()
# TODO: Parse files from the `.gpt-pilot` directory to resume a project - `user_stories` may have changed - include checksums for
# TODO: Save a summary at the end of each task/sprint.
class DotGptPilot:
File: exit.py
import requests
try:
response = requests.post("https://api.pythagora.io/telemetry", json=feedback_data)
response.raise_for_status()
except requests.RequestException as err:
print(f"Failed to send feedback data: {err}")
try:
response = requests.post("https://api.pythagora.io/telemetry", json=telemetry_data)
response.raise_for_status()
except: # noqa
pass
def get_path_id():
return telemetry.telemetry_id
def get_parent_folder(folder_name):
current_path = Path(os.path.abspath(__file__)) # get the path of the current script
while current_path.name != folder_name: # while the current folder name is not 'folder_name'
current_path = current_path.parent # go up one level
return current_path.parent
try:
save_user_app(args.get('user_id'), args.get('app_id'), project_path)
except Exception as e:
print(f'Error saving user app: {str(e)}')
print(os.path.basename(project_path), type='project_folder_name')
return project_path
return new_directory_path
def count_lines_of_code(files):
return sum(len(file['content'].splitlines()) for file in files)
File: function_calling.py
import json
import re
from typing import Union, TypeVar, List, Dict, Literal, Optional, TypedDict, Callable
class FunctionParameters(TypedDict):
"""Function parameters"""
type: Literal["object"]
properties: dict[str, JsonType]
required: Optional[list[str]]
File: ignore.py
from fnmatch import fnmatch
import os.path
from typing import Optional
class IgnoreMatcher:
def __init__(self,
ignore_paths: Optional[list[str]] = None,
*,
root_path: Optional[None] = None,
ignore_binaries: bool = True,
ignore_large_files: bool = True,
):
"""
Initialize the IgnoreMatcher object.
if self.is_in_ignore_list(path):
return True
return False
tokenizer = tiktoken.get_encoding("cl100k_base")
if 'parameters' in function:
parameters = function['parameters']
if 'properties' in parameters:
for propertiesKey in parameters['properties']:
function_tokens += len(tokenizer.encode(propertiesKey))
v = parameters['properties'][propertiesKey]
for field in v:
if field == 'type':
function_tokens += 2
function_tokens += len(tokenizer.encode(v['type']))
elif field == 'description':
function_tokens += 2
function_tokens += len(tokenizer.encode(v['description']))
elif field == 'enum':
function_tokens -= 3
for o in v['enum']:
function_tokens += 3
function_tokens += len(tokenizer.encode(o))
function_tokens += 11
num_tokens += function_tokens
num_tokens += 12
return num_tokens
File: print.py
import re
:return: None
"""
print({'task': {
'index': index,
'num_of_tasks': num_of_tasks,
'description': description,
'source': task_source,
'status': status,
'source_index': source_index,
}}, type='progress')
:return: None
"""
print({'step': {
'index': index,
'num_of_steps': num_of_steps,
'step': step,
'source': task_source,
}}, type='progress')
File: questionary.py
import platform
import questionary
import sys
from database.database import save_user_input
from utils.style import style_config
from utils.print import remove_ansi_codes
File: settings.py
import ast
import json
from logging import getLogger
from os import getenv, makedirs
from pathlib import Path
import sys
from typing import Any, Optional
load_dotenv()
log = getLogger(__name__)
AVAILABLE_SETTINGS = [
"telemetry",
"openai_api_key",
]
class Settings:
"""
Application settings
This object holds all the settings for the application, whether they are
loaded from the config file, set via environment variables or the command
line arguments.
To get a setting:
>>> settings.openai_api_key
>>> dict(settings)
Note: updating settings will not save them to the config file.
To do that, use the `loader.save()` method:
def spinner_start(text="Processing..."):
spinner = yaspin(Spinners.line, text=text)
spinner.start()
return spinner
def spinner_stop(spinner):
if spinner is not None:
spinner.stop()
File: style.py
from colorama import Fore, Style as ColoramaStyle, init
from enum import Enum
from questionary import Style
class Theme(Enum):
"""
Enum representing themes, which can be either DARK or LIGHT.
"""
DARK = 'dark'
LIGHT = 'light'
YELLOW = 'yellow'
class ColorName(Enum):
"""
Enum representing color names and their corresponding ANSI color codes.
Each color has a normal and a light version, indicated by the two elements in the tuple.
"""
RED = (Fore.RED, Fore.LIGHTRED_EX)
GREEN = (Fore.GREEN, Fore.LIGHTGREEN_EX)
YELLOW = (Fore.YELLOW, Fore.LIGHTYELLOW_EX)
BLUE = (Fore.BLUE, Fore.LIGHTBLUE_EX)
CYAN = (Fore.CYAN, Fore.LIGHTCYAN_EX)
WHITE = (Fore.WHITE, Fore.LIGHTWHITE_EX)
THEME_STYLES = {
# Style configurations for DARK theme
Theme.DARK: Style.from_dict({
'question': '#FFFFFF bold', # the color and style of the question - White
'answer': '#FF910A bold', # the color and style of the answer - Dark Orange / Pumpkin
'pointer': '#FF4500 bold', # the color and style of the pointer - Orange Red
'highlighted': '#63CD91 bold', # the color and style of the highlighted option - Medium Aquamarine
'instruction': '#FFFF00 bold' # the color and style of the instruction - Yellow
}),
# Style configurations for LIGHT theme
Theme.LIGHT: Style.from_dict({
'question': '#000000 bold', # the color and style of the question - Black
'answer': '#FFB74D bold', # the color and style of the answer - Light Orange
'pointer': '#FF7043 bold', # the color and style of the pointer - Light Red
'highlighted': '#AED581 bold', # the color and style of the highlighted option - Light Green
'instruction': '#757575 bold' # the color and style of the instruction - Grey
File: task.py
import json
from uuid import uuid4
class Task:
"""
Task data structure to store information about the current task. The task data structure is sent to telemetry.
Currently used to trace big loops in the code.
To set a value:
To increment a value:
>>> task.inc('steps')
>>> task.start_new_task('test', 1)
When debugging recursion happens inside a task (see pilot/helpers/Debugger.py) we add a debugging task to the
task data structure. To add a debugging task:
>>> task.add_debugging_task(1, {'command': 'test'}, 'This is not working', 'Command is not working')
>>> task.clear()
>>> task.send()
Note: the task will be sent automatically if the number of steps exceeds the threshold
"""
def __init__(self):
self.initial_data = {
'task_description': '',
'task_number': 0,
'steps': 0,
'iterations': 0,
'debugging': [],
}
self.data = self.initial_data.copy()
self.ping_extension = True
import requests
log = getLogger(__name__)
class Telemetry:
"""
Anonymous telemetry.
This class is a singleton, use the `telemetry` global variable to access it:
>>> telemetry.setup()
>>> telemetry.start()
>>> telemetry.stop()
>>> telemetry.send()
DEFAULT_ENDPOINT = "https://api.pythagora.io/telemetry"
MAX_CRASH_FRAMES = 3
def __init__(self):
self.enabled = False
self.telemetry_id = None
self.endpoint = None
self.clear_data()
def test_email_found_in_gitconfig():
mock_file_content = """
[user]
name = test_user
email = [email protected]
"""
with patch('os.path.exists', return_value=True):
with patch('builtins.open', mock_open(read_data=mock_file_content)):
assert get_email() == "[email protected]"
def test_email_not_found_in_gitconfig():
mock_file_content = """
[user]
name = test_user
"""
mock_uuid = "12345678-1234-5678-1234-567812345678"
def test_gitconfig_not_present():
mock_uuid = "12345678-1234-5678-1234-567812345678"
def test_username_to_uuid():
assert username_to_uuid("test_user") == "31676025-316f-b555-e0bf-a12f0bcfd0ea"
File: test_files.py
import os
from unittest.mock import patch
from utils.files import setup_workspace
def mocked_abspath(file):
return "/root_path/pilot/helpers"
@patch('utils.files.os.makedirs', side_effect=mocked_create_directory)
def test_setup_workspace_with_existing_workspace(mock_makedirs):
args = {'workspace': '/some/directory', 'name': 'sample'}
result = setup_workspace(args)
assert result == '/some/directory'
File: test_function_calling.py
from const.function_calls import ARCHITECTURE, USER_TASKS
from utils.llm_connection import clean_json_response
from .function_calling import parse_agent_response, JsonPrompter
class TestFunctionCalling:
def test_parse_agent_response_text(self):
# Given
response = {'text': 'Hello world!'}
# When
response = parse_agent_response(response, None)
# Then
assert response == 'Hello world!'
def test_parse_agent_response_json(self):
# Given
response = {'text': '{"greeting": "Hello world!"}'}
function_calls = {'definitions': [], 'functions': {}}
# When
response = parse_agent_response(response, function_calls)
# Then
assert response == {'greeting': 'Hello world!'}
def test_parse_agent_response_json_markdown(self):
# Given
response = {'text': '```json\n{"greeting": "Hello world!"}\n```'}
function_calls = {'definitions': [], 'functions': {}}
# When
response['text'] = clean_json_response(response['text'])
response = parse_agent_response(response, function_calls)
# Then
assert response == {'greeting': 'Hello world!'}
def test_parse_agent_response_markdown(self):
# Given
response = {'text': '```\n{"greeting": "Hello world!"}\n```'}
function_calls = {'definitions': [], 'functions': {}}
# When
response['text'] = clean_json_response(response['text'])
response = parse_agent_response(response, function_calls)
# Then
assert response == {'greeting': 'Hello world!'}
def test_parse_agent_response_multiple_args(self):
# Given
response = {'text': '{"greeting": "Hello", "name": "John"}'}
function_calls = {'definitions': [], 'functions': {}}
# When
response = parse_agent_response(response, function_calls)
# Then
assert response['greeting'] == 'Hello'
assert response['name'] == 'John'
File: test_llm_connection.py
import builtins
from json import JSONDecodeError
import os
import pytest
from unittest.mock import call, patch, Mock
from dotenv import load_dotenv
from jsonschema import ValidationError
from const.function_calls import ARCHITECTURE, DEVELOPMENT_PLAN
from helpers.AgentConvo import AgentConvo
from helpers.Project import Project
from helpers.agents.Architect import Architect
from helpers.agents.TechLead import TechLead
from utils.function_calling import parse_agent_response, FunctionType
from test.test_utils import assert_non_empty_string
from test.mock_questionary import MockQuestionary
from utils.llm_connection import create_gpt_chat_completion, stream_gpt_completion, \
assert_json_response, assert_json_schema, clean_json_response, retry_on_exception
from main import get_custom_print
load_dotenv()
os.environ.pop("AUTOFIX_FILE_PATHS", None)
def test_clean_json_response_True_False():
# Given a JSON response with Title Case True and False
response = '''
```json
{
"steps": [
{
"type": "command",
"command": {
"command": "git init",
"daemon": False,
"timeout": 3000,
"boolean": False
},
"another_True": True,
"check_if_fixed": True
}
]
}
```
'''
# When
response = clean_json_response(response)
def test_clean_json_response_boolean_in_python():
# Given a JSON response with Python booleans in a content string
response = '''
File: test_utils.py
from .utils import should_execute_step
class TestShouldExecuteStep:
def test_no_step_arg(self):
assert should_execute_step(None, 'project_description') is True
assert should_execute_step(None, 'architecture') is True
assert should_execute_step(None, 'coding') is True
def test_skip_step(self):
assert should_execute_step('architecture', 'project_description') is False
assert should_execute_step('architecture', 'architecture') is True
assert should_execute_step('architecture', 'coding') is True
def test_unknown_step(self):
assert should_execute_step('architecture', 'unknown') is False
assert should_execute_step('unknown', 'project_description') is False
assert should_execute_step('unknown', None) is False
assert should_execute_step(None, None) is False
File: utils.py
# utils/utils.py
import datetime
import os
import platform
import uuid
import distro
import json
import hashlib
import re
import copy
from jinja2 import Environment, FileSystemLoader
from .style import color_green
def capitalize_first_word_with_underscores(s):
# Split the string into words based on underscores.
words = s.split('_')
return capitalized_string
get_prompt_components(data)