0% found this document useful (0 votes)
482 views

Pilot

The document contains configuration files and code for a conversational AI assistant called GPT Pilot. It includes files for database initialization, command line argument parsing, logging, and interacting with AI models from OpenAI and other providers. The main entry point initializes the database and command line arguments, and gets a customized print function for communicating with clients.

Uploaded by

prediatech
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
482 views

Pilot

The document contains configuration files and code for a conversational AI assistant called GPT Pilot. It includes files for database initialization, command line argument parsing, logging, and interacting with AI models from OpenAI and other providers. The main entry point initializes the database and command line arguments, and gets a customized print function for communicating with clients.

Uploaded by

prediatech
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 78

File: .env.

example
# OPENAI or AZURE or OPENROUTER
ENDPOINT=OPENAI

# OPENAI_ENDPOINT=https://api.openai.com/v1/chat/completions
OPENAI_ENDPOINT=
OPENAI_API_KEY=

AZURE_API_KEY=
AZURE_ENDPOINT=

OPENROUTER_API_KEY=

# In case of Azure/OpenRouter endpoint, change this to your deployed model name


MODEL_NAME=gpt-4-turbo-preview
MAX_TOKENS=8192

# Folders which shouldn't be tracked in workspace (useful to ignore folders created by compiler)
# IGNORE_PATHS=folder1,folder2

# Database
# DATABASE_TYPE=postgres

DB_NAME=gpt-pilot
DB_HOST=
DB_PORT=
DB_USER=
DB_PASSWORD=

# USE_GPTPILOT_FOLDER=true

# Load database imported from another location/system - EXPERIMENTAL


# AUTOFIX_FILE_PATHS=false

# Set extra buffer to wait on top of detected retry time when rate limmit is hit. defaults to 6
# RATE_LIMIT_EXTRA_BUFFER=

File: __init__.py

File: db_init.py
from dotenv import load_dotenv
load_dotenv()
from database.database import create_tables, drop_tables

drop_tables()
create_tables()

File: instructions.comment.py
# init CLI
# 1. show the type of the app that needs to be created
# 1.c ask user to press enter if it's ok, or to add the type of the app they want
# if it's not ok, check if the wanted app CAN be created
# if it can, print confirmation message and continue
# if it can't, print error message and exit
# 2. ask user for the main definition of the app
# start the processing queue

# 2. show the user flow of the app


# 2.c ask user to press enter if it's ok, or to add the user flow they want
File: main.py
# main.py
import builtins
import json
import os

import sys
import traceback

try:
from dotenv import load_dotenv
except ImportError:
raise RuntimeError('Python environment for GPT Pilot is not completely set up: required package "python-dotenv" is missing.') from

load_dotenv()

from utils.style import color_red


from utils.custom_print import get_custom_print
from helpers.Project import Project
from utils.arguments import get_arguments
from utils.exit import exit_gpt_pilot
from logger.logger import logger
from database.database import database_exists, create_database, tables_exist, create_tables, get_created_apps_with_steps

from utils.settings import settings, loader, get_version


from utils.telemetry import telemetry
from helpers.exceptions import ApiError, TokenLimitError, GracefulExit

def init():
# Check if the "euclid" database exists, if not, create it
if not database_exists():
create_database()

# Check if the tables exist, if not, create them


if not tables_exist():
create_tables()

arguments = get_arguments()

logger.info('Starting with args: %s', arguments)

return arguments

if __name__ == "__main__":
ask_feedback = True
project = None
run_exit_fn = True

args = init()

try:
# sys.argv.append('--ux-test=' + 'continue_development')

builtins.print, ipc_client_instance = get_custom_print(args)

if '--api-key' in args:
os.environ["OPENAI_API_KEY"] = args['--api-key']
if '--api-endpoint' in args:
os.environ["OPENAI_ENDPOINT"] = args['--api-endpoint']

if '--get-created-apps-with-steps' in args:
run_exit_fn = False
File: pytest.ini
[pytest]
testpaths = .
pythonpath = .
python_files = test_*.py

markers =
slow: marks tests as slow (deselect with '-m "not slow"')
uses_tokens: Integration tests which use tokens
ux_test: Tests which are used to test the UX
daily: tests which should be run daily

File: test_main_e2e.py
import os
import builtins
import pytest
from unittest.mock import patch
from dotenv import load_dotenv
load_dotenv()

from database.database import create_tables


from helpers.Project import Project
from test.mock_questionary import MockQuestionary
from .main import init, get_custom_print

@pytest.mark.xfail(reason="Reliably fails on CI, reliably works locally")


@patch.dict(os.environ, {"DB_NAME": ":memory:"})
def test_init():
# When
args = init()

# Then
for field in ['app_id', 'user_id', 'email']:
assert args[field] is not None

for field in ['workspace', 'step']:


assert args[field] is None

@pytest.mark.slow
@pytest.mark.uses_tokens
@pytest.mark.skip(reason="Uses lots of tokens")
@pytest.mark.parametrize("endpoint, model", [
("OPENAI", "gpt-4"),
("OPENROUTER", "openai/gpt-3.5-turbo"),
("OPENROUTER", "meta-llama/codellama-34b-instruct"),
("OPENROUTER", "google/palm-2-chat-bison"),
("OPENROUTER", "google/palm-2-codechat-bison"),
# TODO: See https://github.com/1rgs/jsonformer-claude/blob/main/jsonformer_claude/main.py
# https://github.com/guidance-ai/guidance - token healing
("OPENROUTER", "anthropic/claude-2"),
])
def test_end_to_end(endpoint, model, monkeypatch):
# Given
monkeypatch.setenv('ENDPOINT', endpoint)
monkeypatch.setenv('MODEL_NAME', model)

create_tables()
args = init()
builtins.print, ipc_client_instance = get_custom_print(args)
project = Project(args)
File: code_execution.py
MAX_COMMAND_DEBUG_TRIES = 3
MAX_RECURSION_LAYER = 3
MIN_COMMAND_RUN_TIME = 2000 # 2sec
MAX_COMMAND_RUN_TIME = 60000 # 1min
MAX_COMMAND_OUTPUT_LENGTH = 50000
MAX_QUESTIONS_FOR_BUG_REPORT = 5

File: common.py
import os

APP_TYPES = ['Web App', 'Script', 'Mobile App', 'Chrome Extension']


ROLES = {
'product_owner': ['project_description', 'user_stories', 'user_tasks'],
'architect': ['architecture'],
'tech_lead': ['development_planning'],
'full_stack_developer': ['coding'],
'dev_ops': ['environment_setup'],
'code_monkey': ['coding']
}
STEPS = [
'project_description',
'user_stories',
'user_tasks',
'architecture',
'environment_setup',
'development_planning',
'coding',
'finished'
]

DEFAULT_IGNORE_PATHS = [
'.git',
'.gpt-pilot',
'.idea',
'.vscode',
'.next',
'.DS_Store',
'__pycache__',
"site-packages",
'node_modules',
'package-lock.json',
'venv',
'dist',
'build',
'target',
"*.min.js",
"*.min.css",
"*.svg",
"*.csv",
"*.log",
"go.sum",
]
IGNORE_PATHS = DEFAULT_IGNORE_PATHS + [
folder for folder
in os.environ.get('IGNORE_PATHS', '').split(',')
if folder
]
IGNORE_SIZE_THRESHOLD = 50000 # 50K+ files are ignored by default
PROMPT_DATA_TO_IGNORE = {'directory_tree', 'name'}
File: convert_to_playground_convo.js
let messages = {{messages}}

function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}

async function fill_playground(messages) {


let system_messages = messages.filter(msg => msg.role === 'system');
if (system_messages.length > 0) {
let system_message_textarea = document.querySelector('.chat-pg-instructions').querySelector('textarea');
system_message_textarea.focus();
system_message_textarea.value = '';
document.execCommand("insertText", false, system_messages[0].content);
await sleep(100);
}

// Remove all previous messages


let remove_buttons = document.querySelectorAll('.chat-message-remove-button');
for (let j = 0; j < 10; j++) {
for (let i = 0; i < remove_buttons.length; i++) {
let clickEvent = new Event('click', {
'bubbles': true,
'cancelable': true
});
remove_buttons[i].dispatchEvent(clickEvent);
}
}

let other_messages = messages.filter(msg => msg.role !== 'system');

for (let i = 0; i < other_messages.length; i++) {


document.querySelector('.add-message').click()
await sleep(100);
}

for (let i = 0; i < other_messages.length; i++) {


let all_elements = document.querySelectorAll('.text-input-with-focus');
let last_user_document = all_elements[i];

textarea_to_fill = last_user_document.querySelector('textarea');
textarea_to_fill.focus();
document.execCommand("insertText", false, other_messages[i].content);
await sleep(100);
}
}

fill_playground(messages)

File: function_calls.py
def process_user_stories(stories):
return stories

def process_user_tasks(tasks):
return tasks

def process_os_technologies(technologies):
return technologies
File: ipc.py
MESSAGE_TYPE = {
'verbose': 'verbose',
'stream': 'stream',
'user_input_request': 'user_input_request', # Displayed above the
'hint': 'hint', # Hint text, eg "Do you want to add anything else? If not, just press ENTER."
'info': 'info', # JSON data can be sent to progress `progress_stage`
'local': 'local',
'run_command': 'run_command', # Command to run server needed for extension only
'project_folder_name': 'project_folder_name', # Project folder name for extension only
'button': 'button', # Button text for extension only
'buttons-only': 'buttons-only', # Button text for extension only but input field is disabled
'exit': 'exit', # Exit message to let extension know we are done
'ipc': 'ipc', # Regular print message that is for extension only
'openFile': 'openFile', # Open a file in extension
'loadingFinished': 'loadingFinished', # Marks end of loading project
'loopTrigger': 'loopTrigger', # Trigger loop feedback popup in extension
'progress': 'progress', # Progress bar for extension only
'projectStats': 'projectStats', # Project stats for extension only
'keyExpired': 'keyExpired', # (Free trial) key expired message - for extension only
}

LOCAL_IGNORE_MESSAGE_TYPES = [
'info',
'project_folder_name',
'run_command',
'button',
'buttons-only',
'exit',
'ipc',
'openFile',
'loadingFinished',
'loopTrigger',
'progress',
'projectStats',
'keyExpired',
]

File: llm.py
import os
MAX_GPT_MODEL_TOKENS = int(os.getenv('MAX_TOKENS', 8192))
MIN_TOKENS_FOR_GPT_RESPONSE = 600
MAX_QUESTIONS = 5
END_RESPONSE = "EVERYTHING_CLEAR"
API_CONNECT_TIMEOUT = 30 # timeout for connecting to the API and sending the request (seconds)
API_READ_TIMEOUT = 300 # timeout for receiving the response (seconds)

File: messages.py
CHECK_AND_CONTINUE = 'Is everything working? Let me know if something needs to be changed for this task or type "continue
WHEN_USER_DONE = 'Once you have completed, enter "continue"'
AFFIRMATIVE_ANSWERS = ['', 'y', 'yes', 'ok', 'okay', 'sure', 'absolutely', 'indeed', 'correct', 'affirmative']
NEGATIVE_ANSWERS = ['n', 'no', 'skip', 'negative', 'not now', 'cancel', 'decline', 'stop']
STUCK_IN_LOOP = 'I\'m stuck in a loop'
NONE_OF_THESE = 'none of these'
MAX_PROJECT_NAME_LENGTH = 50

File: telemetry.py
LARGE_REQUEST_THRESHOLD = 50000 # tokens
SLOW_REQUEST_THRESHOLD = 300 # seconds
LOOP_THRESHOLD = 3 # number of iterations in task to be considered a loop
File: postgres.py
from peewee import PostgresqlDatabase
from database.config import DB_NAME, DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DATABASE_TYPE
if DATABASE_TYPE == "postgres":
import psycopg2
from psycopg2.extensions import quote_ident

def get_postgres_database():
return PostgresqlDatabase(DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST, port=DB_PORT)

def create_postgres_database():
conn = psycopg2.connect(
dbname='postgres',
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT
)
conn.autocommit = True
cursor = conn.cursor()
safe_db_name = quote_ident(DB_NAME, conn)
cursor.execute(f"CREATE DATABASE {safe_db_name}")
cursor.close()
conn.close()

File: sqlite.py
from peewee import SqliteDatabase
from database.config import DB_NAME

def get_sqlite_database():
return SqliteDatabase(DB_NAME)

File: __init__.py

File: app.py
from peewee import ForeignKeyField, CharField

from database.models.components.base_models import BaseModel


from database.models.user import User

class App(BaseModel):
user = ForeignKeyField(User, backref='apps')
app_type = CharField(null=True)
name = CharField(null=True)
status = CharField(null=True)

File: architecture.py
# from peewee import
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField

class Architecture(ProgressStep):
if DATABASE_TYPE == 'postgres':
architecture = BinaryJSONField()
else:
File: development.py
from database.models.components.progress_step import ProgressStep

class Development(ProgressStep):
class Meta:
table_name = 'development'

File: development_planning.py
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField

class DevelopmentPlanning(ProgressStep):
if DATABASE_TYPE == 'postgres':
development_plan = BinaryJSONField()
else:
development_plan = JSONField() # Custom JSON field for SQLite

class Meta:
table_name = 'development_planning'

File: development_steps.py
from peewee import ForeignKeyField, AutoField, TextField, IntegerField, CharField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField

class DevelopmentSteps(BaseModel):
id = AutoField() # This will serve as the primary key
app = ForeignKeyField(App, on_delete='CASCADE')
prompt_path = TextField(null=True)
llm_req_num = IntegerField(null=True)
token_limit_exception_raised = TextField(null=True)

if DATABASE_TYPE == 'postgres':
messages = BinaryJSONField(null=True)
llm_response = BinaryJSONField(null=False)
prompt_data = BinaryJSONField(null=True)
else:
messages = JSONField(null=True) # Custom JSON field for SQLite
llm_response = JSONField(null=False) # Custom JSON field for SQLite
prompt_data = JSONField(null=True)

previous_step = ForeignKeyField('self', null=True, column_name='previous_step')


high_level_step = CharField(null=True)

class Meta:
table_name = 'development_steps'
indexes = (
(('app', 'previous_step', 'high_level_step'), True),
)

File: environment_setup.py
from database.models.components.progress_step import ProgressStep
File: file_snapshot.py
import logging

from peewee import ForeignKeyField, BlobField

from database.models.components.base_models import BaseModel


from database.models.development_steps import DevelopmentSteps
from database.models.app import App
from database.models.files import File

log = logging.getLogger(__name__)

class SmartBlobField(BlobField):
"""
A binary blob field that can also accept/return utf-8 strings.

This is a temporary workaround for the fact that we're passing either binary
or string contents to the database. Once this is cleaned up, we should only
accept binary content and explcitily convert from/to strings as needed.
"""

def db_value(self, value):


if isinstance(value, str):
log.warning("FileSnapshot content is a string, expected bytes, working around it.")
value = value.encode("utf-8")
return super().db_value(value)

def python_value(self, value):


val = bytes(super().python_value(value))
try:
return val.decode("utf-8")
except UnicodeDecodeError:
return val

class FileSnapshot(BaseModel):
app = ForeignKeyField(App, on_delete='CASCADE')
development_step = ForeignKeyField(DevelopmentSteps, backref='files', on_delete='CASCADE')
file = ForeignKeyField(File, on_delete='CASCADE', null=True)
content = SmartBlobField()

class Meta:
table_name = 'file_snapshot'
indexes = (
(('development_step', 'file'), True),
)

File: files.py
from pathlib import Path
from os.path import commonprefix, join, sep
from peewee import AutoField, CharField, TextField, ForeignKeyField

from database.models.components.base_models import BaseModel


from database.models.app import App

class File(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
name = CharField()
path = CharField()
File: project_description.py
from peewee import TextField
from database.models.components.progress_step import ProgressStep

class ProjectDescription(ProgressStep):
prompt = TextField()
summary = TextField()

class Meta:
table_name = 'project_description'

File: user.py
from peewee import CharField

from database.models.components.base_models import BaseModel

class User(BaseModel):
email = CharField(unique=True)
password = CharField()

File: user_apps.py
from peewee import AutoField, CharField, ForeignKeyField

from database.models.components.base_models import BaseModel


from database.models.app import App
from database.models.user import User

class UserApps(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
user = ForeignKeyField(User, on_delete='CASCADE')
workspace = CharField(null=True)

class Meta:
table_name = 'user_apps'
indexes = (
(('app', 'user'), True),
)

File: user_inputs.py
from peewee import AutoField, ForeignKeyField, TextField, CharField

from database.models.components.base_models import BaseModel


from database.models.app import App

class UserInputs(BaseModel):
id = AutoField()
app = ForeignKeyField(App, on_delete='CASCADE')
query = TextField(null=True)
user_input = TextField(null=True)
hint = TextField(null=True)
previous_step = ForeignKeyField('self', null=True, column_name='previous_step')
high_level_step = CharField(null=True)

class Meta:
table_name = 'user_inputs'
indexes = (
File: user_tasks.py
from database.config import DATABASE_TYPE
from database.models.components.progress_step import ProgressStep
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField

class UserTasks(ProgressStep):
if DATABASE_TYPE == 'postgres':
user_tasks = BinaryJSONField()
else:
user_tasks = JSONField() # Custom JSON field for SQLite

class Meta:
table_name = 'user_tasks'

File: __init__.py

File: base_models.py
from peewee import Model, UUIDField, DateTimeField
from datetime import datetime
from uuid import uuid4

from database.config import DATABASE_TYPE


from database.connection.postgres import get_postgres_database
from database.connection.sqlite import get_sqlite_database

# Establish connection to the database


if DATABASE_TYPE == "postgres":
database = get_postgres_database()
else:
database = get_sqlite_database()

class BaseModel(Model):
id = UUIDField(primary_key=True, default=uuid4)
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)

class Meta:
database = database

File: progress_step.py
from peewee import ForeignKeyField, CharField, BooleanField, DateTimeField
from database.config import DATABASE_TYPE
from database.models.components.base_models import BaseModel
from database.models.app import App
from database.models.components.sqlite_middlewares import JSONField
from playhouse.postgres_ext import BinaryJSONField

class ProgressStep(BaseModel):
app = ForeignKeyField(App, primary_key=True, on_delete='CASCADE')
step = CharField()

if DATABASE_TYPE == 'postgres':
app_data = BinaryJSONField()
data = BinaryJSONField(null=True)
messages = BinaryJSONField(null=True)
File: Agent.py
class Agent:
def __init__(self, role, project):
self.role = role
self.project = project

File: AgentConvo.py
import json
import re
import subprocess
import uuid
from os.path import sep

from utils.style import color_yellow, color_yellow_bold, color_red_bold


from database.database import save_development_step
from helpers.exceptions import TokenLimitError, ApiError
from utils.function_calling import parse_agent_response, FunctionCallSet
from utils.llm_connection import create_gpt_chat_completion
from utils.utils import get_prompt, get_sys_message, capitalize_first_word_with_underscores
from logger.logger import logger
from prompts.prompts import ask_user
from const.llm import END_RESPONSE
from helpers.cli import running_processes
from utils.telemetry import telemetry

class AgentConvo:
"""
Represents a conversation with an agent.

Args:
agent: An instance of the agent participating in the conversation.
"""

def __init__(self, agent, temperature: float = 0.7):


# [{'role': 'system'|'user'|'assistant', 'content': ''}, ...]
self.messages: list[dict] = []
self.branches = {}
self.log_to_user = True
self.agent = agent
self.high_level_step = self.agent.project.current_step
self.temperature = temperature

# add system message


system_message = get_sys_message(self.agent.role, self.agent.project.args)
logger.info('\n>>>>>>>>>> System Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',
system_message['content'])
self.messages.append(system_message)

def send_message(self, prompt_path=None, prompt_data=None, function_calls: FunctionCallSet = None, should_log_message=T


"""
Sends a message in the conversation.

Args:
prompt_path: The path to a prompt.
prompt_data: Data associated with the prompt.
function_calls: Optional function calls to be included in the message.
should_log_message: Flag if final response should be logged.
Returns:
The response from the agent.
"""
# craft message
File: Debugger.py
import platform
import uuid
import re
import traceback

from const.code_execution import MAX_COMMAND_DEBUG_TRIES, MAX_RECURSION_LAYER


from const.function_calls import DEBUG_STEPS_BREAKDOWN
from const.messages import AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS
from helpers.AgentConvo import AgentConvo
from helpers.exceptions import TokenLimitError
from helpers.exceptions import TooDeepRecursionError
from logger.logger import logger
from prompts.prompts import ask_user
from utils.exit import trace_code_event
from utils.print import print_task_progress

class Debugger:
def __init__(self, agent):
self.agent = agent
self.recursion_layer = 0

def debug(self, convo, command=None, user_input=None, issue_description=None, is_root_task=False,


ask_before_debug=False, task_steps=None, step_index=None):
"""
Debug a conversation.

Args:
convo (AgentConvo): The conversation object.
command (dict, optional): The command to debug. Default is None.
user_input (str, optional): User input for debugging. Default is None.
Should provide `command` or `user_input`.
issue_description (str, optional): Description of the issue to debug. Default is None.
ask_before_debug (bool, optional): True if we have to ask user for permission to start debugging.
task_steps (list, optional): The steps of the task to debug. Default is None.
step_index (int, optional): The index of the step to debug. Default is None.

Returns:
bool: True if debugging was successful, False otherwise.
"""
logger.info('Debugging %s', command)
self.recursion_layer += 1
self.agent.project.current_task.add_debugging_task(self.recursion_layer, command, user_input, issue_description)
if self.recursion_layer > MAX_RECURSION_LAYER:
self.recursion_layer = 0
# TooDeepRecursionError kills all debugging loops and goes back to the point where first debug was called
# it does not retry initial step but instead calls dev_help_needed()
raise TooDeepRecursionError()

function_uuid = str(uuid.uuid4())
convo.save_branch(function_uuid)
success = False

for i in range(MAX_COMMAND_DEBUG_TRIES):
if success:
break

if ask_before_debug or i > 0:
print('yes/no', type='button')
answer = ask_user(self.agent.project, 'Can I start debugging this issue [Y/n/error details]?', require_some_input=False)
if answer.lower() in NEGATIVE_ANSWERS:
self.recursion_layer -= 1
File: Project.py
import json
import os
from pathlib import Path
from typing import Tuple

import peewee
from playhouse.shortcuts import model_to_dict

from const.messages import CHECK_AND_CONTINUE, AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS, STUCK_IN_LOOP


from utils.style import color_yellow_bold, color_cyan, color_white_bold, color_red_bold
from const.common import STEPS
from database.database import delete_unconnected_steps_from, delete_all_app_development_data, \
get_all_app_development_steps, delete_all_subsequent_steps, get_features_by_app_id
from const.ipc import MESSAGE_TYPE
from prompts.prompts import ask_user
from helpers.exceptions import TokenLimitError, GracefulExit
from utils.questionary import styled_text
from helpers.files import get_directory_contents, get_file_contents, clear_directory, update_file
from helpers.cli import build_directory_tree
from helpers.agents.TechLead import TechLead
from helpers.agents.Developer import Developer
from helpers.agents.Architect import Architect
from helpers.agents.ProductOwner import ProductOwner
from helpers.agents.TechnicalWriter import TechnicalWriter
from helpers.agents.SpecWriter import SpecWriter

from database.models.development_steps import DevelopmentSteps


from database.models.file_snapshot import FileSnapshot
from database.models.files import File
from logger.logger import logger
from utils.dot_gpt_pilot import DotGptPilot
from utils.llm_connection import test_api_access
from utils.ignore import IgnoreMatcher

from utils.telemetry import telemetry


from utils.task import Task
from utils.utils import remove_lines_with_string

class Project:
def __init__(
self,
args,
*,
ipc_client_instance=None,
):
"""
Initialize a project.

Args:
args (dict): Project arguments - app_id, (app_type, name), user_id, email, password, step
name (str, optional): Project name. Default is None.
description (str, optional): Project description. Default is None.
user_stories (list, optional): List of user stories. Default is None.
user_tasks (list, optional): List of user tasks. Default is None.
architecture (str, optional): Project architecture. Default is None.
development_plan (str, optional): Development plan. Default is None.
current_step (str, optional): Current step in the project. Default is None.
"""
self.args = args
self.llm_req_num = 0
self.command_runs_count = 0
File: __init__.py

File: cli.py
import psutil
import subprocess
import os
import signal
import threading
import queue
import time
import platform
from typing import Dict, Union

from logger.logger import logger


from utils.style import color_green, color_red, color_yellow_bold
from utils.ignore import IgnoreMatcher
from database.database import save_command_run
from helpers.exceptions import TooDeepRecursionError
from helpers.exceptions import TokenLimitError
from helpers.exceptions import CommandFinishedEarly
from prompts.prompts import ask_user
from const.code_execution import MIN_COMMAND_RUN_TIME, MAX_COMMAND_RUN_TIME, MAX_COMMAND_OUTPUT_LE
from const.messages import AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS

interrupted = False

running_processes: Dict[str, tuple[str, int]] = {}


"""Holds a list of (command, process ID)s, mapped to the `command_id` provided in the call to `execute_command()`."""

def enqueue_output(out, q):


for line in iter(out.readline, ''):
if interrupted: # Check if the flag is set
break
q.put(line)
out.close()

def run_command(command, root_path, q_stdout, q_stderr) -> subprocess.Popen:


"""
Execute a command in a subprocess.

Args:
command (str): The command to run.
root_path (str): The directory in which to run the command.
q_stdout (Queue): A queue to capture stdout.
q_stderr (Queue): A queue to capture stderr.

Returns:
subprocess.Popen: The subprocess object.
"""
logger.info(f'Running `{command}` on {platform.system()}')
if platform.system() == 'Windows': # Check the operating system
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=root_path
)
File: exceptions.py
import json

from const.llm import MAX_GPT_MODEL_TOKENS

class ApiKeyNotDefinedError(Exception):
def __init__(self, env_key: str):
self.env_key = env_key
super().__init__(f"API Key has not been configured: {env_key}")

class CommandFinishedEarly(Exception):
def __init__(self, message='Command finished before timeout. Handling early completion...'):
self.message = message
super().__init__(message)

class TokenLimitError(Exception):
def __init__(self, tokens_in_messages, max_tokens=MAX_GPT_MODEL_TOKENS):
self.tokens_in_messages = tokens_in_messages
self.max_tokens = max_tokens
super().__init__(f"Token limit error happened with {tokens_in_messages}/{max_tokens} tokens in messages!")

class TooDeepRecursionError(Exception):
def __init__(self, message='Recursion is too deep!'):
self.message = message
super().__init__(message)

class ApiError(Exception):
def __init__(self, message, response=None):
self.message = message
self.response = response
self.response_json = None
if response and hasattr(response, "text"):
try:
self.response_json = json.loads(response.text)
except Exception: # noqa
pass

super().__init__(message)

class GracefulExit(Exception):
def __init__(self, message='Graceful exit'):
self.message = message
super().__init__(message)

File: files.py
from pathlib import Path
import os
from typing import Optional, Union

from utils.style import color_green


from utils.ignore import IgnoreMatcher

def update_file(path: str, new_content: Union[str, bytes], project=None):


"""
Update file with the new content.
File: ipc.py
# ipc.py
import socket
import json
import time

from utils.utils import json_serial

class IPCClient:
def __init__(self, port):
self.ready = False
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to the external process...")
try:
client.connect(('localhost', int(port)))
self.client = client
print("Connected!")
except ConnectionRefusedError:
self.client = None
print("Connection refused, make sure you started the external process")

def handle_request(self, message_content):


print(f"Received request from the external process: {message_content}")
return message_content # For demonstration, we're just echoing back the content

def listen(self):
if self.client is None:
print("Not connected to the external process!")
return

while True:

data = b''
while True:
data = data + self.client.recv(512 * 1024)
try:
message = json.loads(data)
break
except json.JSONDecodeError:
# This means we still got an incomplete message, so
# we should continue to receive more data.
continue

if message['type'] == 'response':
# self.client.close()
return message['content']

def send(self, data):


serialized_data = json.dumps(data, default=json_serial)
data_length = len(serialized_data)
if self.client is not None:
self.client.sendall(data_length.to_bytes(4, byteorder='big'))
self.client.sendall(serialized_data.encode('utf-8'))

File: test_AgentConvo.py
import builtins
import os.path
from dotenv import load_dotenv
from database.database import database
from const.function_calls import IMPLEMENT_TASK
from helpers.agents.Developer import Developer
from helpers.AgentConvo import AgentConvo
File: test_Debugger.py
import builtins
import json

import pytest
from unittest.mock import patch, MagicMock
from dotenv import load_dotenv

load_dotenv()
from utils.custom_print import get_custom_print
from helpers.agents.Developer import Developer
from helpers.AgentConvo import AgentConvo
from helpers.Debugger import Debugger
from helpers.test_Project import create_project
from test.mock_questionary import MockQuestionary

################## NOTE: this test needs to be ran in debug with breakpoints ##################

@pytest.mark.uses_tokens
@patch('pilot.helpers.AgentConvo.get_saved_development_step')
@patch('pilot.helpers.AgentConvo.save_development_step')
@patch('utils.questionary.save_user_input')
@patch('helpers.cli.run_command')
@patch('helpers.cli.save_command_run')
# @patch('pilot.helpers.cli.execute_command', return_value=('', 'DONE', 0))
def test_debug(
# mock_execute_command,
mock_save_command, mock_run_command,
mock_save_input, mock_save_step, mock_get_saved_step):
# Given
builtins.print, ipc_client_instance = get_custom_print({})
project = create_project()
project.current_step = 'coding'
developer = Developer(project)
project.developer = developer
convo = AgentConvo(developer)
convo.load_branch = lambda x: None

debugger = Debugger(developer)
# TODO: mock agent.project.developer.execute_task

# convo.messages.append()
convo.construct_and_add_message_from_prompt('dev_ops/ran_command.prompt', {
'cli_response': '''
stderr:
```
node:internal/modules/cjs/loader:1080
throw err;
^

Error: Cannot find module 'mime'


Require stack:
- /workspace/chat_app/node_modules/send/index.js
- /workspace/chat_app/node_modules/express/lib/utils.js
- /workspace/chat_app/node_modules/express/lib/application.js
- /workspace/chat_app/node_modules/express/lib/express.js
- /workspace/chat_app/node_modules/express/index.js
- /workspace/chat_app/server.js
at Module._resolveFilename (node:internal/modules/cjs/loader:1077:15)
at Module._load (node:internal/modules/cjs/loader:922:27)
at Module.require (node:internal/modules/cjs/loader:1143:19)
at require (node:internal/modules/cjs/helpers:121:18)
File: test_Project.py
import os
import json
from pathlib import Path
import pytest
from unittest.mock import patch, MagicMock
from helpers.Project import Project

test_root = str(Path(__file__).parent.parent.parent / Path("workspace") / Path("gpt-pilot-test"))

def create_project():
os.environ["AUTOFIX_FILE_PATHS"] = "false"
project = Project({
'app_id': 'test-project',
'name': 'TestProject',
'app_type': ''
})
project.set_root_path(test_root)
project.app = 'test'
project.current_step = 'test'
return project

class TestProject:
@pytest.mark.parametrize('file_path, file_name, expected', [
('file.txt', 'file.txt', f'{test_root}/file.txt'),
('', 'file.txt', f'{test_root}/file.txt'),
('path/', 'file.txt', f'{test_root}/path/file.txt'),
('path/to/', 'file.txt', f'{test_root}/path/to/file.txt'),
('path/to/file.txt', 'file.txt', f'{test_root}/path/to/file.txt'),
('./path/to/file.txt', 'to/file.txt', f'{test_root}/path/to/file.txt'),
('./package.json', 'package.json', f'{test_root}/package.json'),
])
def test_get_full_path(self, file_path, file_name, expected):
# Given
project = create_project()

# When
relative_path, absolute_path = project.get_full_file_path(file_path, file_name)

# Then
assert absolute_path == str(Path(expected))

@pytest.mark.parametrize(
("file_path", "file_name", "expected_path", "expected_absolute_path"), [
('', '', '/', f'{test_root}/'),
('', '.', '/', f'{test_root}/'),
('', '.env', '/', f'{test_root}/.env'),
('', '~/', '/', f'{test_root}/'),
('', f'{test_root}/', '/', f'{test_root}/'),
('', f'{test_root}/folder1', '/folder1', f'{test_root}/folder1/'),
('', f'{test_root}/Folder With Space/', '/Folder With Space', f'{test_root}/Folder With Space/'),
('', 'server.js', '/', f'{test_root}/server.js'),
('', 'folder1', '/folder1', f'{test_root}/folder1/'),
('', 'folder1/folder2', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('', 'folder1/folder2/', '/folder1/folder2', f'{test_root}/folder1/folder2/'),
('', 'folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', f'{test_root}/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', f'{test_root}/Folder With Space/server.js', '/Folder With Space', f'{test_root}/Folder With Space/server.js'),
('', '~/folder1/folder2/server.js', '/folder1/folder2', f'{test_root}/folder1/folder2/server.js'),
('', './folder1/server.js', '/folder1', f'{test_root}/folder1/server.js'),
('.', '', '/', f'{test_root}/'),
('.', '.', '/', f'{test_root}/'),
File: test_cli.py
import platform
from unittest.mock import patch, MagicMock, call

import pytest

from helpers.cli import execute_command, terminate_process, run_command_until_success


from helpers.test_Project import create_project

@pytest.mark.xfail()
@patch("helpers.cli.os")
@patch("helpers.cli.subprocess")
def test_terminate_process_not_running(mock_subprocess, mock_os):
terminate_process(1234, 'not running')

mock_subprocess.run.assert_not_called()
mock_os.killpg.assert_not_called()

@patch("helpers.cli.MIN_COMMAND_RUN_TIME", create=True, new=100)


@patch('helpers.cli.run_command')
@patch("helpers.cli.terminate_process")
def test_execute_command_timeout_exit_code(mock_terminate_process, mock_run):
# Given
project = create_project()
command = 'cat'
timeout = 0.1
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_process.pid = 1234
mock_run.return_value = mock_process

# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout, force=True)

# Then
assert cli_response is not None
assert llm_response == 'DONE'
assert exit_code is not None
mock_terminate_process.assert_called_once_with(1234)

def mock_run_command(command, path, q, q_stderr):


q.put('hello')
mock_process = MagicMock()
mock_process.returncode = 0
mock_process.pid = 1234
return mock_process

@patch('helpers.cli.ask_user', return_value='')
@patch('helpers.cli.run_command')
@patch("helpers.cli.terminate_process")
def test_execute_command_enter(mock_terminate_process, mock_run, mock_ask):
# Given
project = create_project()
command = 'echo hello'
timeout = 1000
mock_run.side_effect = mock_run_command

# When
cli_response, llm_response, exit_code = execute_command(project, command, timeout)

# Then
File: AGENTS.md
Roles are defined in `const.common.ROLES`.
Each agent's role is described to the LLM by a prompt in `pilot/prompts/system_messages/{role}.prompt`

## Product Owner
`project_description`, `user_stories`, `user_tasks`

- Talk to client, ask detailed questions about what client wants


- Give specifications to dev team

## Architect
`architecture`

- Scripts: Node.js, MongoDB, PeeWee ORM


- Testing: Node.js -> Jest, Python -> pytest, E2E -> Cypress **(TODO - BDD?)**
- Frontend: Bootstrap, vanilla Javascript **(TODO - TypeScript, Material/Styled, React/Vue/other?)**
- Other: cronjob, Socket.io

TODO:
- README.md
- .gitignore
- .editorconfig
- LICENSE
- CI/CD
- IaC, Dockerfile

## Tech Lead
`development_planning`

- Break down the project into smaller tasks for devs.


- Specify each task as clear as possible:
- Description
- "Programmatic goal" which determines if the task can be marked as done.
eg: "server needs to be able to start running on a port 3000 and accept API request
to the URL `http://localhost:3000/ping` when it will return the status code 200"
- "User-review goal"
eg: "run `npm run start` and open `http://localhost:3000/ping`, see "Hello World" on the screen"

## Dev Ops
`environment_setup`

**TODO: no prompt**

`debug` functions: `run_command`, `implement_changes`

## Developer (full_stack_developer)
`create_scripts`, `coding`

- Implement tasks assigned by tech lead


- Modular code, TDD
- Tasks provided as "programmatic goals" **(TODO: consider BDD)**

## Code Monkey
`create_scripts`, `coding`, `implement_changes`

`implement_changes` functions: `save_files`

- Implement tasks assigned by tech lead


File: Architect.py
from utils.utils import step_already_finished
from helpers.Agent import Agent
import json

from utils.style import color_green_bold, color_yellow_bold


from const.function_calls import ARCHITECTURE
import platform

from utils.utils import should_execute_step, generate_app_data


from database.database import save_progress, get_progress_steps
from logger.logger import logger
from helpers.AgentConvo import AgentConvo
from prompts.prompts import ask_user
from templates import PROJECT_TEMPLATES

ARCHITECTURE_STEP = 'architecture'
WARN_SYSTEM_DEPS = ["docker", "kubernetes", "microservices"]
WARN_FRAMEWORKS = ["react", "react.js", "next.js", "vue", "vue.js", "svelte", "angular"]
WARN_FRAMEWORKS_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/Using-GPT-Pilot-with-frontend-frameworks"

class Architect(Agent):
def __init__(self, project):
super().__init__('architect', project)
self.convo_architecture = None

def get_architecture(self):
print(json.dumps({
"project_stage": "architecture"
}), type='info')

self.project.current_step = ARCHITECTURE_STEP

# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], ARCHITECTURE_STEP)
if step and not should_execute_step(self.project.args['step'], ARCHITECTURE_STEP):
step_already_finished(self.project.args, step)
self.project.architecture = None
self.project.system_dependencies = None
self.project.package_dependencies = None
self.project.project_template = None
db_data = step["architecture"]
if db_data:
if isinstance(db_data, dict):
self.project.architecture = db_data["architecture"]
self.project.system_dependencies = db_data["system_dependencies"]
self.project.package_dependencies = db_data["package_dependencies"]
self.project.project_template = db_data.get("project_template")
elif isinstance(db_data, list):
self.project.architecture = ""
self.project.system_dependencies = [
{
"name": dep,
"description": "",
"test": "",
"required_locally": False
} for dep in db_data
]
self.project.package_dependencies = []
self.project.project_template = None
return
File: CodeMonkey.py
import os.path
import re
from typing import Optional
from traceback import format_exc
from difflib import unified_diff

from helpers.AgentConvo import AgentConvo


from helpers.Agent import Agent
from helpers.files import get_file_contents
from const.function_calls import GET_FILE_TO_MODIFY, REVIEW_CHANGES
from logger.logger import logger

from utils.exit import trace_code_event


from utils.telemetry import telemetry

# Constant for indicating missing new line at the end of a file in a unified diff
NO_EOL = "\ No newline at end of file"

# Regular expression pattern for matching hunk headers


PATCH_HEADER_PATTERN = re.compile(r"^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@")

MAX_REVIEW_RETRIES = 3

class CodeMonkey(Agent):
save_dev_steps = True

def __init__(self, project):


super().__init__('code_monkey', project)

def get_original_file(
self,
code_changes_description: str,
step: dict[str, str],
files: list[dict],
) -> tuple[str, str]:
"""
Get the original file content and name.

:param code_changes_description: description of the code changes


:param step: information about the step being implemented
:param files: list of files to send to the LLM
:return: tuple of (file_name, file_content)
"""
# If we're called as a result of debugging, we don't have the name/path of the file
# to modify so we need to figure that out first.
if 'path' not in step or 'name' not in step:
file_to_change = self.identify_file_to_change(code_changes_description, files)
step['path'] = os.path.dirname(file_to_change)
step['name'] = os.path.basename(file_to_change)

rel_path, abs_path = self.project.get_full_file_path(step['path'], step['name'])

for f in files:
# Take into account that step path might start with "/"
if (f['path'] == step['path'] or (os.path.sep + f['path'] == step['path'])) and f['name'] == step['name'] and f['content']:
file_content = f['content']
break
else:
# If we didn't have the match (because of incorrect or double use of path separators or similar), fallback to directly loading the file
try:
file_content = get_file_contents(abs_path, self.project.root_path)['content']
if isinstance(file_content, bytes):
File: Developer.py
import platform
import uuid
import re
import json

from const.messages import WHEN_USER_DONE, AFFIRMATIVE_ANSWERS, NEGATIVE_ANSWERS, STUCK_IN_LOOP, NO


from utils.exit import trace_code_event
from utils.style import (
color_green,
color_green_bold,
color_red,
color_red_bold,
color_yellow_bold,
color_cyan_bold,
color_white_bold
)
from helpers.exceptions import TokenLimitError
from const.code_execution import MAX_COMMAND_DEBUG_TRIES, MAX_QUESTIONS_FOR_BUG_REPORT
from helpers.exceptions import TooDeepRecursionError
from helpers.Debugger import Debugger
from utils.questionary import styled_text
from utils.utils import step_already_finished
from helpers.agents.CodeMonkey import CodeMonkey
from logger.logger import logger
from helpers.Agent import Agent
from helpers.AgentConvo import AgentConvo
from utils.utils import should_execute_step, array_of_objects_to_string, generate_app_data
from helpers.cli import run_command_until_success, execute_command_and_check_cli_response
from const.function_calls import (EXECUTE_COMMANDS, GET_TEST_TYPE, IMPLEMENT_TASK, COMMAND_TO_RUN,
ALTERNATIVE_SOLUTIONS, GET_BUG_REPORT_MISSING_DATA)
from database.database import save_progress, get_progress_steps, update_app_status
from utils.telemetry import telemetry
from prompts.prompts import ask_user
from utils.print import print_task_progress, print_step_progress

ENVIRONMENT_SETUP_STEP = 'environment_setup'

class Developer(Agent):
def __init__(self, project):
super().__init__('full_stack_developer', project)
self.review_count = 0
self.run_command = None
self.save_dev_steps = True
self.debugger = Debugger(self)

def start_coding(self, task_source):


print('Starting development...', type='verbose', category='agent:developer')
if not self.project.finished:
self.project.current_step = 'coding'
update_app_status(self.project.args['app_id'], self.project.current_step)

# DEVELOPMENT
if not self.project.skip_steps:
logger.info("Starting to create the actual code...")

total_tasks = len(self.project.development_plan)
progress_thresholds = [50] # Percentages of progress when documentation is created
documented_thresholds = set()

for i, dev_task in enumerate(self.project.development_plan):


# don't create documentation for features
File: ProductOwner.py
import json
from utils.style import color_green_bold
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from logger.logger import logger
from database.database import get_app, save_progress, save_app, get_progress_steps
from utils.utils import should_execute_step, generate_app_data, step_already_finished, clean_filename
from utils.files import setup_workspace
from prompts.prompts import ask_for_app_type, ask_for_main_app_definition, ask_user
from const.llm import END_RESPONSE
from const.messages import MAX_PROJECT_NAME_LENGTH
from const.common import EXAMPLE_PROJECT_DESCRIPTION

PROJECT_DESCRIPTION_STEP = 'project_description'
USER_STORIES_STEP = 'user_stories'
USER_TASKS_STEP = 'user_tasks'

class ProductOwner(Agent):
def __init__(self, project):
super().__init__('product_owner', project)

def get_project_description(self, spec_writer):


print(json.dumps({
"project_stage": "project_description"
}), type='info', category='agent:product-owner')

self.project.app = get_app(self.project.args['app_id'], error_if_not_found=False)

# If this app_id already did this step, just get all data from DB and don't ask user again
if self.project.app is not None:
step = get_progress_steps(self.project.args['app_id'], PROJECT_DESCRIPTION_STEP)
if step and not should_execute_step(self.project.args['step'], PROJECT_DESCRIPTION_STEP):
step_already_finished(self.project.args, step)
self.project.set_root_path(setup_workspace(self.project.args))
self.project.project_description = step['summary']
self.project.project_description_messages = step['messages']
self.project.main_prompt = step['prompt']
return

# PROJECT DESCRIPTION
self.project.current_step = PROJECT_DESCRIPTION_STEP
is_example_project = False

if 'app_type' not in self.project.args:


self.project.args['app_type'] = ask_for_app_type()
if 'name' not in self.project.args:
while True:
question = 'What is the project name?'
print(question, type='ipc')
print('start an example project', type='button')
project_name = ask_user(self.project, question)
if len(project_name) <= MAX_PROJECT_NAME_LENGTH:
break
else:
print(f"Hold your horses cowboy! Please, give project NAME with max {MAX_PROJECT_NAME_LENGTH} characters.")

if project_name.lower() == 'start an example project':


is_example_project = True
project_name = 'Example Project'

self.project.args['name'] = clean_filename(project_name)
File: SpecWriter.py
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from utils.files import count_lines_of_code
from utils.style import color_green_bold, color_yellow_bold
from prompts.prompts import ask_user
from const.messages import AFFIRMATIVE_ANSWERS
from utils.exit import trace_code_event

INITIAL_PROJECT_HOWTO_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/How-to-write-a-good-initial-project-description"

class SpecWriter(Agent):
def __init__(self, project):
super().__init__('spec_writer', project)
self.save_dev_steps = True

def analyze_project(self, initial_prompt):


msg = (
"Your project description seems a bit short. "
"The better you can describe the project, the better GPT Pilot will understand what you'd like to build.\n\n"
f"Here are some tips on how to better describe the project: {INITIAL_PROJECT_HOWTO_URL}\n\n"
)
print(color_yellow_bold(msg))
print(color_green_bold("Let's start by refining your project idea:"))

convo = AgentConvo(self)
convo.construct_and_add_message_from_prompt('spec_writer/ask_questions.prompt', {})

num_questions = 0
skipped = False
user_response = initial_prompt
while True:
llm_response = convo.send_message('utils/python_string.prompt', {
"content": user_response,
})
if not llm_response:
continue

num_questions += 1
llm_response = llm_response.strip()
if len(llm_response) > 500:
print('continue', type='button')
user_response = ask_user(
self.project,
"Can we proceed with this project description? If so, just press ENTER. Otherwise, please tell me what's missing or what you'd like
hint="Does this sound good, and does it capture all the information about your project?",
require_some_input=False
)
if user_response:
user_response = user_response.strip()
if user_response.lower() in AFFIRMATIVE_ANSWERS + ['continue']:
break
else:
print('skip questions', type='button')
user_response = ask_user(self.project, llm_response)
if user_response and user_response.lower() == 'skip questions':
llm_response = convo.send_message(
'utils/python_string.prompt',
{
'content': 'This is enough clarification, you have all the information. Please output the spec now, without additional comments or qu
}
)
File: TechLead.py
from utils.utils import step_already_finished
from helpers.Agent import Agent
from utils.style import color_green_bold
from helpers.AgentConvo import AgentConvo

from utils.utils import should_execute_step, generate_app_data


from database.database import save_progress, get_progress_steps, save_feature, get_features_by_app_id
from logger.logger import logger
from const.function_calls import DEVELOPMENT_PLAN
from templates import apply_project_template
from utils.exit import trace_code_event

DEVELOPMENT_PLANNING_STEP = 'development_planning'

class TechLead(Agent):
def __init__(self, project):
super().__init__('tech_lead', project)
self.save_dev_steps = False
self.convo_feature_plan = AgentConvo(self)

def create_development_plan(self):
self.project.current_step = DEVELOPMENT_PLANNING_STEP
self.convo_development_plan = AgentConvo(self)

# If this app_id already did this step, just get all data from DB and don't ask user again
step = get_progress_steps(self.project.args['app_id'], DEVELOPMENT_PLANNING_STEP)
if step and not should_execute_step(self.project.args['step'], DEVELOPMENT_PLANNING_STEP):
step_already_finished(self.project.args, step)
self.project.development_plan = step['development_plan']
return

existing_summary = apply_project_template(self.project)

# DEVELOPMENT PLANNING
print(color_green_bold("Starting to create the action plan for development...\n"), category='agent:tech-lead')
logger.info("Starting to create the action plan for development...")

llm_response = self.convo_development_plan.send_message('development/plan.prompt',
{
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"architecture": self.project.architecture,
"technologies": self.project.system_dependencies + self.project.package_dependencies,
"existing_summary": existing_summary,
"files": self.project.get_all_coded_files(),
"task_type": 'app',
}, DEVELOPMENT_PLAN)
self.project.development_plan = llm_response['plan']

logger.info('Plan for development is created.')

save_progress(self.project.args['app_id'], self.project.current_step, {
"development_plan": self.project.development_plan, "app_data": generate_app_data(self.project.args)
})

return

def create_feature_plan(self, feature_description):


File: TechnicalWriter.py
from const.function_calls import GET_DOCUMENTATION_FILE
from helpers.AgentConvo import AgentConvo
from helpers.Agent import Agent
from utils.files import count_lines_of_code
from utils.style import color_green_bold, color_green

class TechnicalWriter(Agent):
def __init__(self, project):
super().__init__('technical_writer', project)
self.save_dev_steps = True

def document_project(self, percent):


files = self.project.get_all_coded_files()
print(f'{color_green_bold("CONGRATULATIONS!!!")}', category='success')
print(f'You reached {color_green(str(percent) + "%")} of your project generation!\n\n')
print('For now, you have created:\n')
print(f'{color_green(len(files))} files\n')
print(f'{color_green(count_lines_of_code(files))} lines of code\n\n')
print('Before continuing, GPT Pilot will create some documentation for the project...\n')
print('', type='verbose', category='agent:tech-writer')
self.create_license()
self.create_readme()
self.create_api_documentation()

def create_license(self):
# check if LICENSE file exists and if not create one. We want to create it only once.
return

def create_readme(self):
print(color_green('Creating README.md'))
convo = AgentConvo(self)

llm_response = convo.send_message('documentation/create_readme.prompt', {
"name": self.project.args['name'],
"app_type": self.project.args['app_type'],
"app_summary": self.project.project_description,
"user_stories": self.project.user_stories,
"user_tasks": self.project.user_tasks,
"directory_tree": self.project.get_directory_tree(True),
"files": self.project.get_all_coded_files(),
"previous_features": self.project.previous_features,
"current_feature": self.project.current_feature,
}, GET_DOCUMENTATION_FILE)

self.project.save_file(llm_response)
return convo

def create_api_documentation(self):
# create API documentation
return

File: __init__.py
from .Architect import Architect, ARCHITECTURE_STEP
from .Developer import Developer, ENVIRONMENT_SETUP_STEP
from .TechLead import TechLead

File: test_Developer.py
import builtins
import json
import os
File: test_TechLead.py
import builtins
import os
import pytest
from unittest.mock import patch
from dotenv import load_dotenv
load_dotenv()

from main import get_custom_print


from helpers.agents.TechLead import TechLead, DEVELOPMENT_PLANNING_STEP
from helpers.Project import Project
from test.test_utils import assert_non_empty_string
from test.mock_questionary import MockQuestionary

class TestTechLead:
def setup_method(self):
builtins.print, ipc_client_instance = get_custom_print({})

name = 'TestTechLead'
self.project = Project({
'app_id': 'test-tech-lead',
'name': name,
'app_type': ''
},
name=name,
architecture=[],
user_stories=[]
)

self.project.set_root_path(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../workspace/TestTechLead')))
self.project.technologies = []
self.project.project_description = '''
The project entails creating a web-based chat application, tentatively named "chat_app."
This application does not require user authentication or chat history storage.
It solely supports one-on-one messaging, excluding group chats or multimedia sharing like photos, videos, or files.
Additionally, there are no specific requirements for real-time functionality, like live typing indicators or read receipts.
The development of this application will strictly follow a monolithic structure, avoiding the use of microservices, as per the client's d
The development process will include the creation of user stories and tasks, based on detailed discussions with the client.
'''
self.project.user_stories = [
'User Story 1: As a user, I can access the web-based "chat_app" directly without needing to authenticate or log in. Do you want to
'User Story 2: As a user, I can start one-on-one conversations with another user on the "chat_app". Do you want to add anything e
'User Story 3: As a user, I can send and receive messages in real-time within my one-on-one conversation on the "chat_app". Do
'User Story 4: As a user, I do not need to worry about deleting or storing my chats because the "chat_app" does not store chat his
'User Story 5: As a user, I will only be able to send text messages, as the "chat_app" does not support any kind of multimedia sha
'User Story 6: As a user, I will not see any live typing indicators or read receipts since the "chat_app" does not provide any addition
]
self.project.architecture = ['Node.js', 'Socket.io', 'Bootstrap', 'JavaScript', 'HTML5', 'CSS3']
self.project.current_step = DEVELOPMENT_PLANNING_STEP

@pytest.mark.uses_tokens
@patch('helpers.AgentConvo.get_saved_development_step', return_value=None)
@patch('helpers.agents.TechLead.save_progress', return_value=None)
@patch('helpers.agents.TechLead.get_progress_steps', return_value=None)
def test_create_development_plan(self, mock_get_saved_step, mock_save_progress, mock_get_progress_steps):
self.techLead = TechLead(self.project)

mock_questionary = MockQuestionary(['', '', 'no'])

with patch('utils.questionary.questionary', mock_questionary):


# When
File: __init__.py

File: logger.py
import os
import re
import logging

def setup_logger():
# Create a custom format for your logs
log_format = "%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(levelname)s: %(message)s"

# Create a log handler for file output


file_handler = logging.FileHandler(
filename=os.path.join(os.path.dirname(__file__), 'debug.log'),
mode='w',
encoding='utf-8',
)

# Apply the custom format to the handler


formatter = logging.Formatter(log_format)
file_handler.setFormatter(formatter)
# file_handler.addFilter(lambda record: record.levelno <= logging.INFO)
file_handler.addFilter(filter_sensitive_fields)

# Create a logger and add the handler


logger = logging.getLogger()
logger.addHandler(file_handler)

if os.getenv('DEBUG') == 'true':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)

return logger

sensitive_fields = ['--api-key', 'password']

def filter_sensitive_fields(record):
# TODO: also remove escape sequences for colors, bold etc
if isinstance(record.args, dict): # check if args is a dictionary
args = record.args.copy()
for field in sensitive_fields:
if field in args:
args[field] = '*****'
record.args = args

elif isinstance(record.args, tuple): # check if args is a tuple


args_list = list(record.args)
# Convert the tuple to a list and replace sensitive fields
args_list = ['*****' if arg in sensitive_fields else arg for arg in args_list]
record.args = tuple(args_list)

# Remove ANSI escape sequences - colours & bold


# Peewee passes a tuple as record.msg
if isinstance(record.msg, str):
record.msg = re.sub(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])', '', record.msg)

return True
File: __init__.py

File: prompts.py
# prompts/prompts.py
from utils.style import color_white_bold
from const import common
from const.llm import MAX_QUESTIONS, END_RESPONSE
from utils.llm_connection import create_gpt_chat_completion
from utils.utils import get_sys_message, get_prompt
from utils.questionary import styled_select, styled_text
from logger.logger import logger
from helpers.exceptions import ApiError

def ask_for_app_type():
return 'App'
answer = styled_select(
"What type of app do you want to build?",
choices=common.APP_TYPES
)

if answer is None:
print("Exiting application.")
exit(0)

while 'unavailable' in answer:


print("Sorry, that option is not available.")
answer = styled_select(
"What type of app do you want to build?",
choices=common.APP_TYPES
)
if answer is None:
print("Exiting application.")
exit(0)

print("You chose: " + answer)


logger.info(f"You chose: {answer}")
return answer

def ask_for_main_app_definition(project):
question = 'Describe your app in as much detail as possible.'
print(question, type='ipc')
description = ask_user(
project,
question
)

if description is None:
print("No input provided!")
return

logger.info(f"Initial App description done: {description}")

return description

def ask_user(project, question: str, require_some_input=True, hint: str = None, ignore_user_input_count: bool = False):
while True:
if hint is not None:
print(color_white_bold(hint) + '\n')
File: test_prompts.py
from .prompts import get_prompt

def test_prompt_ran_command_None_exit():
# When
prompt = get_prompt('dev_ops/ran_command.prompt', {
'cli_response': 'stdout:\n```\nsuccess\n```',
'command': './scripts/run_tests',
'additional_message': 'Some additional message\n',
'exit_code': None
})

# Then
assert prompt == '''
Some additional message

I ran the command `./scripts/run_tests`. The output was:

stdout:
```
success
```

Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE

Do not respond with anything other than these two keywords.


'''.strip()

def test_prompt_ran_command_0_exit():
# When
prompt = get_prompt('dev_ops/ran_command.prompt', {
'cli_response': 'stdout:\n```\nsuccess\n```',
'command': './scripts/run_tests',
'additional_message': 'Some additional message\n',
'exit_code': 0
})

# Then
assert prompt == '''
Some additional message

I ran the command `./scripts/run_tests`. The output was:

stdout:
```
success
```

Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE

Do not respond with anything other than these two keywords.


'''.strip()

def test_parse_task_no_processes():
# When
prompt = get_prompt('development/parse_task.prompt', {
'running_processes': {}
})

# Then
File: change_in_tech.prompt
Do you want to change any of the technologies listed?

File: technologies.prompt
You're designing the architecture and technical specifications for a new project.

If the project requirements call out for specific technology, use that. Otherwise, if working on a web app, prefer Node.js for the bac

Here are the details for the new project:


-----------------------------
{{ project_details }}
{{ features_list }}
-----------------------------

Based on these details, think step by step to design the architecture for the project and choose technologies to use in building it.

1. First, design and describe project architecture in general terms


2. Then, list any system dependencies that should be installed on the system prior to start of development. For each system depe
3. Finally, list any other 3rd party packages or libraries that will be used (that will be installed later using packager a package mana
4. {% if templates %}Optionally, choose a project starter template.{% else %}(for this project there are no available starter/boilerpla

{% if templates %}You have an option to use a project template that implements standard boilerplate/scaffolding so you can start f

If no project templates are a good match, don't pick any! It's better to start from scratch than to use a template that is not a good fit

Here are the available project templates:


{% for name, tpl in templates.items() %}
### {{ name }}
{{ tpl['description']}}

Contains:
{{ tpl['summary']}}
{% endfor %}{% endif %}

*IMPORTANT*: You must follow these rules while creating your project:

* You must only list *system* dependencies, ie. the ones that need to be installed (typically as admin) to set up the programming la
* If there are several popular options (such as Nginx or Apache for web server), pick one that would be more suitable for the app in
* DO NOT include text editors, IDEs, shells, OpenSSL, CLI tools such as git, AWS, or Stripe clients, or other utilities in your list. on
* If a dependency (such as database) has a cloud alternative or can be installed on another computer (ie. isn't required on this com

Output only your response in JSON format like in this example, without other commentary:
```json
{
"architecture": "Detailed description of the architecture of the application",
"system_dependencies": [
{
"name": "Node.js",
"description": "JavaScript runtime for building apps. This is required to be able to run the app you're building.",
"test": "node --version",
"required_locally": true
},
{
"name": "MongoDB",
"description": "NoSQL database. If you don't want to install MongoDB locally, you can use a cloud version such as MongoDB Atlas
"test": "mongosh --version",
"required_locally": false
},
...
],
"package_dependencies": [
{
File: execution_order.prompt
All the steps will be executed in order in which you give them, so it is very important that you think about all steps before you start

File: features_list.prompt
{% if previous_features %}
Here is the list of features that were previously implemented on top of initial high level description of "{{ name }}":
```
{% for feature in previous_features %}
- {{ loop.index }}. {{ feature['summary'] }}
{% endfor %}
```

{% endif %}{% if current_feature %}Here is the feature that you are implementing right now:
```
{{ current_feature }}
```
{% endif %}

File: file_naming.prompt
When creating and naming new files, ensure the file naming (camelCase, kebab-case, underscore_case, etc) is consistent with th

File: file_size_limit.prompt
**IMPORTANT**
When you think about in which file should the new code go to, always try to make files as small as possible and put code in more s

File: files_list.prompt
{% if files|length > 0 %}Here are files that are currently implemented:
---START_OF_FILES---{% for file in files %}
**{{ file.path }}/{{ file.name }}** ({{ file.lines_of_code }} lines of code):
```
{{ file.content }}
```
{% endfor %}
---END_OF_FILES---
{% endif -%}

File: human_intervention_explanation.prompt
**IMPORTANT**
You must not tell me to run a command in the database or anything OS related - only if some dependencies need to be installed. I
Avoid using "Human Intervention" if possible. You should NOT use "Human Intervention" for anything else than steps that you can

Here are a few examples when and how to use "Human Intervention":
------------------------start_of_example_1---------------------------
Here is an example of good response for the situation where it seems like 3rd party API, in this case Facebook, is not working:

* "Human Intervention"
"1. Check latest Facebook API documentation for updates on endpoints, parameters, or authentication.
2. Verify Facebook API key/authentication and request format to ensure they are current and correctly implemented.
3. Use REST client tools like Postman or cURL to directly test the Facebook API endpoints.
4. Check the Facebook API's status page for any reported downtime or service issues.
5. Try calling the Facebook API from a different environment to isolate the issue."
------------------------end_of_example_1---------------------------

------------------------start_of_example_2---------------------------
Here is an example of good response for the situation where the user needs to enable some settings in their Gmail account:

* "Human Intervention"
"To enable sending emails from your Node.js app via your Gmail, account, you need to do the following:
1. Log in to your Gmail account.
2. Go to 'Manage your Google Account' > Security.
File: list_running_processes.prompt
{% if running_processes -%}
Note that the following processes are already running:

{%- for key, data in running_processes.items() %}

command_id: {{ key }}
command: {{ data[0] }}
{%- endfor -%}
{%- endif -%}

File: logs_and_error_handling.prompt
**IMPORTANT**: Logging
Whenever you write code, make sure to log code execution so that when a developer looks at the CLI output, they can understand

**IMPORTANT**: Error handling


Whenever you write code, make sure to add error handling for all edge cases you can think of because this app will be used in pro

File: no_microservices.prompt
**IMPORTANT**
Do not use, create or suggest any microservices. Ensure that the architecture for this task remains strictly monolithic. DO not sugg

File: project_details.prompt
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```

{% if architecture %}Here is a short description of the project architecture:


{{ architecture }}

{% endif %}{% if user_stories %}Here are user stories that specify how users use "{{ name }}":
```
{% for story in user_stories %}
- {{ story }}
{% endfor %}
```

{% endif %}{% if user_tasks %}Here are user tasks that specify what users need to do to interact with "{{ name }}":
```
{% for task in user_tasks %}
- {{ task }}
{% endfor %}
```

{% endif %}{% if technologies %}Here are the technologies that {% if task_type == 'feature' %}that were used{% else %}you need
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}

{% endif %}

File: project_tasks.prompt
Before we go into the coding part, I want you to split the development process of creating this {{ task_type }} into smaller tasks so

Each task needs to be related only to the development of this {{ task_type }} and nothing else - once the {{ task_type }} is fully wor

**IMPORTANT**
As an experienced tech lead you always follow rules on how to create tasks. Dividing project into tasks is extremely important job

Now, based on the project details provided{% if task_type == 'feature' %} and new feature description{% endif %}, think task by ta
File: single_question.prompt
**IMPORTANT**
Here are the instructions for Asking Additional Questions:

Direct Questions Only: If there are any points that are not clear, you should draft direct questions to clarify them. Do not include an

Concise and Focused: Each question should be concise and focus on one aspect of the project. Do not merge multiple queries int

No Lead-ins or Conclusions: After receiving an answer to a question, proceed directly to the next question without adding any than

Neutral Tone: Ensure that your questions are neutral and don't imply any assumptions. The objective is to gather information, not

Examples:
Instead of "Thank you for that information. My next question is: Should A be bigger than B?", simply ask "Should A be bigger than
Instead of "Based on what you said earlier, do we need to prioritize X over Y?", just ask "Do we need to prioritize X over Y?".

Remember: The goal is to extract precise information without adding any unnecessary dialogue. Your questions should be straigh

I want your response to be only one question at a time. I will ask you again when I am ready for next question.

Ask maximum of {{MAX_QUESTIONS}} questions and after that I want you to respond with "{{END_RESPONSE}}".

If everything is clear before asking those {{MAX_QUESTIONS}} questions, you write the response in the following format:
"{{END_RESPONSE}}"

File: steps_list.prompt
{% if task_steps and step_index is not none -%}
The current task has been split into multiple steps, and each step is one of the following:
* `command` - command to run
* `save_file` - create or update a file
* `human_intervention` - if the human needs to do something

{% if step_index > 0 %}Here is the list of steps that have been executed:
{% for step in task_steps %}{% if loop.index0 < step_index %}
{%- if step.type in ['save_file', 'code_change', 'modify_file'] -%}
{%- set type_content = step.get(step.type, None) -%}
{%- if type_content -%}
{%- if 'content' in type_content -%}
{%- set _ = type_content.update({'content': '...' }) -%}
{%- endif -%}
{%- if 'code_change_description' in type_content -%}
{%- set _ = type_content.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- else -%}
{%- if 'code_change_description' in step -%}
{%- set _ = step.update({'code_change_description': '...' }) -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{{ step }}
{% endif %}{% endfor %}{% endif %}
Here is the step you are currently debugging:
{{ task_steps[step_index] }}

{% if step_index < task_steps|length - 1 %}Here are steps that will be executed once debugging is done:
{% for step in task_steps %}{% if loop.index0 > step_index %}
{%- if step.type in ['save_file', 'code_change', 'modify_file'] -%}
{%- set type_content = step.get(step.type, None) -%}
{%- if type_content -%}
{%- if 'content' in type_content -%}
{%- set _ = type_content.update({'content': '...' }) -%}
{%- endif -%}
File: summary_instructions.prompt
**IMPORTANT**
Here are the instructions for Writing the Summary:

1. **Stick to the Facts**: Every sentence should be informative and relevant. Length is not an issue as long as all pertinent details

2. **Avoid Subjectivity and Mentioning The Client or Any External Entities**: Do not mention phrases like "the client wants" or "the

3. **Use Active Voice**: Use active rather than passive voice. For instance, "The project includes 5 subdomains" instead of "It was

4. **Be Direct**: Replace indirect phrases with direct statements. For example, instead of saying "The client said there might be a

5. **Prioritize Clarity**: Each statement should be clear and easy to understand. Refrain from using jargon unless it's widely recog

6. **Organize Information**: Group related items to ensure a coherent flow in your summary, making it more understandable for re

**Examples**:
- Instead of "The client expressed a preference for blue in our last meeting", write "The primary color is blue".
- Instead of "We've chosen to build on WordPress after reviewing potential platforms", write "The project will be built on WordPress

Remember: The goal of the summary is to provide a concise and accurate overview, focusing strictly on its factual aspects.

File: debug.prompt
{{ steps_list }}
{% if issue_description -%}
You wanted me to check this - `{{ issue_description }}` but there was a problem
{%- else -%}
Now, we need to debug this issue
{%- endif -%}
{% if command %} and we need to be able to execute `{{ command }}` successfully. {% endif %}.
{% if user_input %}I looked into this issue. I want you to take my findings as important part of debugging this issue. Here are my fin
```
{{ user_input }}
```{% endif -%}
I want you to create a list of steps that are needed to debug this issue.

Each step can be either:

* `command` - command to run (must be able to run on a {{ os }} machine, assume current working directory is project root folder)
* `code_change` - step will change the code and you need to thoroughly describe what needs to be implemented. I will implement
* `human_intervention` - if you need the human to do something, use this type of step and explain in details what you want the hum

{{ execution_order }}

Also, make sure that at least the last step has `check_if_fixed` set to TRUE.

{{ file_size_limit }}

{# After this, you need to decide what to do next. You can rerun the command `{{ command }}` to check if the problem is fixed or ru

File: ran_command.prompt
{{ steps_list }}
{%- if task_steps and step_index is not none -%}
When trying to see if command was ran successfully, take into consideration steps that were previously executed and steps that w
{%- endif %}
{%- if additional_message %}{{ additional_message }}{% endif %}
I ran the command `{{ command }}`. The output was:
{#%- if error_response %}, it {{ error_response }}{% endif %#}
{{ cli_response }}

Think about this output and not any output in previous messages. If the command was successfully executed, respond with `DONE
File: bug_report_summary.prompt
You are working on this app:
```
{{ app_summary }}
```

{% if task_review_description %}
User was given instructions on how to test if the app is working correctly. Here are the instructions:
```
{{ task_review_description }}
```
{% endif %}

User came to you with this bug report:


```
{{ user_feedback }}
```
You didn't have enough information to start working on it, so you asked the user to provide more details.
Here are questions and answers that you asked the user:
```{% for row in questions_and_answers %}
Q: {{ row.question }}
A: {{ row.answer }}
{% endfor %}
```

Your job is now to write issue explanation that will be sent to developer, strictly following these rules:
- If there are some logs you MUST copy all logs in whole! Do not omit any logs! This is also true for code snippets or stack traces!
- Explanation must be very clear and factual, keep it as short as possible. Do not make assumptions, only state the facts.
- When writing explanation of the issue, it is good to cover all categories that you have information on. If you don't have information
- Omit all information that turns out to be irrelevant for this issue (e.g. after asking additional questions it turns out that first user me
- Write issue explanation as if you are talking directly to developer (in first person). Do not mention "user", talk as if you found the t
- Do not use any subjective thoughts, just facts.
- Write only issue explanation. Do not write any recap, summary or ideas how to solve the problem at the end. Do not write introdu
- Do not write any new code, only if something is provided by user.
- Have in mind that developer is smart and he will understand everything as long as you provide all information that you have and
- Have in mind that issue might not be related to your current development task.
- Do not use user instructions when creating issue explanation, that is only for you to understand the issue better.

File: context.prompt
{%- if directory_tree %}
The project directory tree looks like:

{{ directory_tree }}
{% endif -%}
{% if running_processes -%}
Note that the following processes are already running:

{%- for key, data in running_processes.items() %}

command_id: {{ key }}
command: {{ data[0] }}
{%- endfor -%}
{%- endif -%}

File: define_user_review_goal.prompt
How can a human user test if this task was completed successfully?

Please list actions, step by step, in order, that the user should take to verify the task. After each action, describe what the expected

**IMPORTANT**

Follow these important rules when compiling a list of actions the user will take:
File: error.prompt
I got the following error:
```
{{ error }}
```

Specify what needs to be done to fix this error either in the code or what command (or commands) needs to be run to fix this error

File: feature_plan.prompt
You are working in a software development agency and a project manager and software architect approach you telling you that yo

{{ project_details }}
{{ features_list }}
Here is directory tree that shows current folder structure of project:
```
{{ directory_tree }}
```

App and all its features are already finished and working.
{{ files_list }}

Finally, here is the description of new feature that needs to be added to {{ app_type }} "{{ name }}":
```
{{ feature_description }}
```

{{ project_tasks }}

File: feature_summary.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to create a summary for a new feature.
Here is a high level description of "{{ name }}":
```
{{ app_summary }}
```

After {{ app_type }} was successfully coded, user asked for an improvement. Here is what user asked:
```
{{ feature_description }}
```

Then a development plan for that feature was created and the feature was then broken down to smaller tasks so that it's easier for
```{% for task in development_tasks %}
- {{ loop.index }}. {{ task['description'] }}
{% endfor %}
```

Your goal is to create summary describing ONLY this feature in as little amount of sentences as possible. Try to keep it as short as

You are not making summary of project or tasks. Only summary of this feature.

{{summary_instructions}}

File: get_alternative_solutions.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to write code for the entire application.

Here is a high level description of "{{ name }}":


```
{{ app_summary }}
```
{{ features_list }}
Project architecture:
File: get_run_command.prompt
How can I run this app?
**IMPORTANT**
Do not reply with anything else but the command with which I can run this app with.
For example, if the command is "python app.py", then your response needs to be only `python app.py` without the `

Pay attention to file paths: if the command or argument is a file or folder from the project, use paths relative to the project root (for

If there is no command to run reply with empty response.


For example, if we only setup package.json and no other files are coded there is no command to run so respond with `` without the

File: get_snippet_from_comment.prompt
Here are previously coded files:
{% for file in previously_coded_files %}
**{{ file.path }}/{{ file.name }}**:
```
{{ file.content }}
```
{% endfor %}

And here are the new implementations of these same files:


{% for file in files_with_comments -%}
**{{ file.path }}/{{ file.name }}**:
```
{{ file.content }}
```
{%- endfor %}

Look at the following lines of code:


{% for file in files_with_comments -%}
{% if file.comments|length > 0 -%}
{%- for comment in file['comments'] -%}
`{{ comment }}` inside `{{ file['path'] }}/{{ file['name'] }}`
{% endfor %}
{%- endif -%}
{%- endfor %}

These are placeholders for already written lines of code that can be found in the previously coded files that represent lines of code

**IMPORTANT**
Make sure that you respond with all lines of code that are replaced by these comments, including all control structures, error hand
Under no circumstances should you ever leave any part of the code snippet unwritten. Every single line of code that exists in the p

File: identify_files_to_change.prompt
You're a senior software developer implementing changes in one file in the project.
Based on the provided instructions and full file list, identify the file
that needs to be modified.

{{ files_list }}

Instructions:
{{ code_changes_description }}

Output *ONLY* the file path, relative to project root, in a single Markdown code block,
without any comments or explanation, like this:
```
path/to/file
```

File: implement_changes.prompt
I need to modify file `{{ file_name }}` that currently looks like this:
File: iteration.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to write code for the entire application.

Here is a high level description of "{{ name }}":


```
{{ app_summary }}
```
{{ features_list }}
Project architecture:
{{ architecture }}

Here are the technologies that you need to use for this project:
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}

{% if development_tasks and current_task %}


Development process of this app was split into smaller tasks. Here is the list of all tasks:
```{% for task in development_tasks %}
{{ loop.index }}. {{ task['description'] }}
{% endfor %}
```
You are currently working on task "{{ current_task.description }}" and you have to focus only on that task.

{% endif %}
A part of the app is already finished.
{{ files_list }}

{% if user_feedback != '' %}
User who was using the app "{{ name }}" sent you this feedback:
```
{{ user_feedback }}
```{% endif %}
{% if user_feedback_qa %}Feedback was not clear enough so you asked user for additional information and got this response:
```{% for row in user_feedback_qa %}
Q: {{ row.question }}
A: {{ row.answer }}
{% endfor %}
```{% endif %}
{% if next_solution_to_try is not none %}
Focus on solving this issue in the following way:
```
{{ next_solution_to_try }}
```
{% endif %}
Now, you have to debug this issue and comply with the additional user feedback.

**IMPORTANT**
Think about all information provided. Your job is to look at big picture by analysing all files to find where the issue is.
Don't reply with any code, your thoughts or breakdown of the issue. Respond only with description of solution, explaining what sho
Create as little steps as possible to fix the issue. Each step should describe, using sentences and not code, what changes are nee
When there are multiple things that have to be done in one file write everything as one step and don't split it in multiple steps.
You can count that the environment is set up previously and packages listed in files are installed so tell me only commands neede

**IMPORTANT**
If report mentions *multiple* issues, treat it as if you got several separate reports: think through each, and provide solutions for eac

{{ execution_order }}

{{ file_size_limit }}

File: parse_task.prompt
Ok, now, take your previous message that starts with `{{ instructions_prefix }}` and ends with `{{ instructions_postfix }}` and conver
File: plan.prompt
You are working in a software development agency and a project manager and software architect approach you telling you that yo

{{ project_details }}
{{ features_list }}
{% if files %}The developers have already used a project scaffolding tool that creates the initial boilerplate for the project:
{{ existing_summary }}

{{ files_list }}{% endif %}

{{ project_tasks }}

File: review_changes.prompt
A developer on your team has been working on the task described in previous message. Based on those instructions, the develop

Here is the original content of this file:


```
{{ old_content }}
```

Here is the diff of the changes:

{% for hunk in hunks %}## Hunk {{ loop.index }}


```diff
{{ hunk }}
```
{% endfor %}

As you can see, there {% if hunks|length == 1 %}is only one hunk in this diff, and it{% else %}are {{hunks|length}} hunks in this diff

Think carefully about the instructions and review the proposed changes. For each hunk of change, provide a detailed rationale, an
* applied - if the change is correct
* ignored - for example if it is a code deletion or change that wasn't asked for
* reworked - if the change does something correctly but also makes a serious mistake, in which case both applying and ignoring th

Finally, if the changes miss something that was in the instructions, mention that. Keep in mind you're just reviewing one file, `{{ file

Note that the developer may add, modify or delete logging (including `gpt_pilot_debugging_log`) or error handling that's not explici

Here is an example output if 2 of 4 hunks in the change should be applied, one of them should be ignored, one should be reworke
```
{
"hunks": [
{
"number": 1,
"reason": "Some explanation why this part of the change is important. The reasoning here should be as detailed as possible as tha
"decision": "apply"
},
{
"number": 2,
"reason": "Another explanation, for the 2nd hunk",
"decision": "apply"
},
{
"number": 3,
"reason": "This hunk accidentally deletes important code without any useful change",
"decision": "ignore"
},
{
"number": 4,
"reason": "This hunk does something correctly but also does something really wrong. It would be incorrect to either apply or ignore
"decision": "rework"
File: review_feedback.prompt
Your changes have been reviewed.
{% if content != original_content %}
THe reviewer approved and applied some of your changes, but requested you rework the others.

Here's the file with the approved changes already applied:


```
{{ content }}
```

Here's the reviewer's feedback:


{% else %}
The reviewer requested that you rework your changes, here's the feedback:
{% endif %}

{{ rework_feedback }}

Based on this feedback and the original instructions, think carefully, make the correct changes, and output the entire file again. Re

File: review_task.prompt
You are working on a App called "{{ name }}" and you need to write code for the entire application.

Here is a high level description of "{{ name }}":


```
{{ app_summary }}
```
{{ features_list }}
Development process of this app was split into smaller tasks. Here is the list of all tasks:
```{% for task in tasks %}
{{ loop.index }}. {{ task['description'] }}
{% endfor %}
```
You are currently working on task "{{ current_task }}" and you have to focus only on that task.

A part of the app is already finished.


Here are files that were modified during this task implementation:
{% if files|length > 0 %}---start_of_current_files---
{% for file in files %}
**{{ file.path }}/{{ file.name }}** ({{ file.lines_of_code }} lines of code):
```
{{ file.content }}
```
{% endfor %}
---end_of_current_files---
{% endif -%}

{% if user_input -%}While working on this task, your colleague who is testing the app "{{ name }}" sent you some additional info. H
```
{{ user_input }}
```

After this info, you tried to fix it. {% endif %}Files that were modified during implementation of the task are:
```
{{ modified_files }}
```

Now I will show you how those files looked before this task implementation started. If a file is listed as the file that changed but is n

{% if files_at_start_of_task|length > 0 %}---start_of_files_at_start_of_task---


{% for file in files_at_start_of_task %}
**{{ file.path }}/{{ file.name }}** ({{ file.lines_of_code }} lines of code):
```
File: update_file.prompt

File: cli_response.prompt
Response from the CLI:
{{ cli_response }}

File: install_next_technology.prompt
Let's install `{{ technology }}` - respond with the command that I need to run to check if {{ technology }} is installed and ready to be
When you want to tell me a command I need to run, respond only with the command you want me to run and nothing else.
When the technology is fully installed, respond with `INSTALLED`.
If the previous command was meant to check if the technology is install and if it wasn't, respond with `NOT_INSTALLED`.

File: specs.prompt
You are working in a software development agency and a project manager and software architect approach you telling you that yo

Here are the technologies that you need to use for this project:
{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}

Let's set up the environment on my machine. Here are the details about my machine:
```
{{ os_info }}
```

First, filter out the technologies from the list above and tell me, which technologies need to be installed on my machine. That is eve

File: unsuccessful_installation.prompt
Ok, let's install {{ technology }} on my machine. You will tell me commands that I need to run and I will tell you the output I got. The

File: break_down_code_changes.prompt
You need to implement the current changes into a codebase:
-- INSTRUCTIONS --
{{ instructions }}
-- END OF INSTRUCTIONS --

Here is the current folder tree:


```
{{ directory_tree }}
```

Here are technologies that you can use:


{% for tech in technologies %}
* {{ tech["name"] }} - {{ tech["description"] }}{% endfor %}

First, you need to break down these instructions into actionable steps that can be made. There are 2 types of steps. If a step requi
For a step to be actionable, it cannot have a vague description but a clear explanation of what needs to be done to finish that step
BAD STEP: `Set up mongo database`
GOOD STEP: `Inside db.js, add the following code: {code that needs to be added to the file}`

When thinking about steps, first think about what files need to changed to finish this task. When you determine what changes need

So, each step of type `code_change` can contain ALL changes that need to be made to a single file. If changes need to be made

Remember, all commands will be run from the project root folder.

Now, think step by step and return a list of steps that need to be run.

File: breakdown.prompt
File: next_step.prompt
{% if step_index != 0 %}
So far, steps {{ finished_steps }} are finished so let's do
{% else %}
Let's start with the{% endif %} step #{{ step_index }}. Here is the description of this step:
```
{{ step_description }}
```
Think step by step about what needs to be done to fulfill this step.
{% if step_type == 'COMMAND' %}
Respond with all commands that need to be run to fulfill this step.
{% elif step_type == 'CODE_CHANGE' %}
First, you need to know the code that's currently written so that you can appropriately write new or update the existing code. {# He
```
{{ directory_tree }}
```
#}
Respond with a list of files that you need to see before you can write the code for the current step. This list needs to be in a JSON
{% endif %}

File: request_files_for_code_changes.prompt
{#You need to implement the current changes into a codebase:
-- INSTRUCTIONS --
{{ instructions }}
-- END OF INSTRUCTIONS --
#}{% if step_index != 0 %}So far, steps {{ finished_steps }} are finished so let's do{% else %}Let's start with the{% endif %} step #{

{# I will give you each file that needs to be changed and you will implement changes from the instructions. #}To do this, you will ne
{#
Here is the current folder tree:
```
{{ directory_tree }}
```
#}
Remember, ask for files relative to the project root. For example, if you need a file with path `{project_root}/models/model.py`, you

File: request_test_files.prompt
Ok, now, I will show you the list of all files with automated tests that are written so far and I want you to tell me which automated te
{{ testing_files_tree }}

Remember, ask for files relative to the project root. For example, if you need a file with path `{project_root}/models/model.py`, you

File: step_check.prompt
Now, we need to verify if this change was successfully implemented. We can do that in 2 ways:
{#1. By writing an automated test or by running a previously written test - you write automated tests in Jest and you always try find

1. By running a command (or multiple commands) - this is good for when an automated test is an overkill. For example, if we insta

2. By requesting that a human checks if everything works as expected - this is the last option that we want to avoid but if we can't

Ok, now, tell me how can we verify if this change was successful and respond only with a keyword for a type of test.

File: step_code.prompt
Here are the requested files:
{% for file in files %}
**{{ file.name }}/{{ file.name }}**
```{{ file.language }}
{{ file.content }}
```
File: update_task.prompt
{% if completed_steps|length > 1 -%}
We have a plan for how to debug this and we've already done these steps:
-------------------------list_of_completed_steps-------------------------{% for step in completed_steps[:-1] %}
step #{{ loop.index - 1 }}:
step_type: {{ step.type }}{% if 'command' in step %}
command_to_run: `{{ step.command.command }}`{% elif step.type == 'code_change' %}
code_change_description: `{{ step.code_change_description }}`{% endif %}{% endfor %}
-------------------------END_list_of_completed_steps-------------------------
{% else %}
We have a plan for how to debug this.
{%- endif %}

Right now, we're working on the step #{{ current_step_index }}:


-------------------------current_step-------------------------
step_type: {{ current_step.type }}
command_to_run: `{{ current_step.command.command }}`
-------------------------END_current_step-------------------------

We've run the command `{{ current_step.command.command }}` and we got the following output:
{{ cli_response }}

{% if next_steps %}
Here is a list of steps that will be executed after current step:
-------------------------list_of_next_steps-------------------------
{% for step in next_steps %}
--step_{{ loop.index + current_step_index }}--
step_type: {{ step.type }}{% if 'command' in step %}
command_to_run: `{{ step.command.command }}`{% elif step.type == 'code_change' %}
code_change_description: `{{ step.code_change_description }}`{% endif %}
--END_step_{{ loop.index + current_step_index }}--{% endfor %}
-------------------------END_list_of_next_steps-------------------------

{% endif %}
Now, think step by step and create a new list of task steps to complete this task based on the output we got from running `{{ curren

{{ file_naming }}

{{ execution_order }}

{{ file_size_limit }}

File: command_test.prompt
Write a list of commands that should be ran that will tell you if the implementation was successful. Write them in a JSON array whe
COMMAND: {command_that_needs_to_be_ran}

File: write_automated_test.prompt
{{ if files|length > 0}}
Here are the requested files:
{% for file in files %}
**{{ file.name }}/{{ file.name }}**
```{# file.language #}
{{ file.content }}
```

{% endfor %}
{% else %}
Currently, no tests are written.

{% endif %}
Now, start with the implementation of the automated test (or tests).
File: create_readme.prompt
You are working on a {{ app_type }} called "{{ name }}" and you need to create a detailed documentation for current state of projec

{{ project_details }}
{{ features_list }}
{{ files_list }}

DO NOT specify commands to create any folders or files, they will be created automatically - just specify the relative path to file th

Now, based on the project details provided, think step by step and create README.md file for this project. The file should have the

# Project name

Short description (a few sentences) of the project based on the project details.

## Overview

Description of the architecture and technologies used in the project, and the project structure.

## Features

Description of what the app can do and how it can be used.

## Getting started

### Requirements

Required technologies/setup needed on the computer to run the project.

### Quickstart

How to set up the project and run it

### License

The project is proprietary (not open source), just output the standard Copyright (c) 2024. template here.

File: answers.prompt
Here are the answers to your questions:
{% for answer in answers %}
{{ answer }}
{% endfor %}

Now, think step by step and ask any questions that you still want to get a better answer on.
**IMPORTANT**
If everything is clear, you write the response in the following format:
EVERYTHING_CLEAR
App recap: {{recap of the app after all clerifications}}

File: specs.prompt
I want you to create the {{ app_type }} (let's call it "{{ name }}") that can be described like this:
```
{{ prompt }}
```

File: specs_instruction.prompt
I'm going to show you an overview of tasks that you need to do to lead the process of creating this {{ app_type }} and for each task
Example app description: `Create a script that finds Youtube channels with the word "test" inside the channel name`.

Here is an overview of the tasks that you need to do:


File: ask_questions.prompt
Your task is to talk to a new client and develop a detailed specification for a new application the client wants to build. This specifica

The AI developer prefers working on web apps using Node/Express/MongoDB/Mongoose/EJS stack, and use vanilla JS with Boot
Try to avoid the use of Docker, Kubernetes, microservices and single-page app frameworks like React, Next.js, Angular, Vue or Sv

In your work, follow these important rules:


* In your communication with the client, be straightforward, concise, and focused on the task.
* Ask questions ONE BY ONE. This is veryy important, as the client is easily confused. If you were to ask multiple questions the us
* Ask specific questions, taking into account what you already know about the project. For example, don't ask "what features do yo
* Pay special attention to any documentation or information that the project might require (such as accessing a custom API, etc). B
* This is a a prototype project, it is important to have small and well-defined scope. If the scope seems to grow too large (beyond a
* Do not address non-functional requirements (performance, deployment, security, budget, timelines, etc...). We are only concerne
* Do not address deployment or hosting, including DevOps tasks to set up a CI/CD pipeline
* Don't address or invision any future development (post proof-of-concept), the scope of your task is to only spec the PoC/prototyp
* If the user provided specific information on how to access 3rd party API or how exactly to implement something, you MUST inclu

Ensure that you have all the information about:


* overall description and goals for the app
* all the features of the application
* functional specification
* how the user will use the app
* enumerate all the parts of the application (eg. pages of the application, background processing if any, etc); for each part, explain
* identify any constraints, business rules, user flows or other important info that affect how the application works or how it is used
* technical specification
* what kind of an application this is and what platform/technologies will be used
* the architecture of the application (what happens on backend, frontend, mobile, background tasks, integration with 3rd party serv
* detailed description of each component of the application architecture
* integration specification
* any 3rd party apps, services, APIs that will be used (eg. for auth, payments, etc..)
* if a custom API is used, precise definitions, with examples, how to use the custom API or do the custom integration

If you identify any missing information or need clarification on any vague or ambiguous parts of the brief, ask the client about it.

Important note: don't ask trivial questions for obvious or unimportant parts of the app, for example:
* Bad questions example 1:
* Client brief: I want to build a hello world web app
* Bad questions:
* What title do you want for the web page that displays "Hello World"?
* What color and font size would you like for the "Hello World" text to be displayed in?
* Should the "Hello World" message be static text served directly from the server, or would you like it implemented via JavaScript o
* Explanation: There's no need to micromanage the developer(s) and designer(s), the client would've specified these details if they

If you ask such trivial questions, the client will think you're stupid and will leave. DOn'T DO THAT

Think carefully about what a developer must know to be able to build the app. The specification must address all of this information

When you gather all the information from the client, output the complete specification. Remember, the specification should define b

Here's an EXAMPLE initial prompt:


---start-of-example-output---
Online forum similar to Hacker News (news.ycombinator.com), with a simple and clean interface, where people can post links or te

The UI should use EJS view engine, Bootstrap for styling and plain vanilla JavaScript. Design should be simple and look like Hack

Each story has a title (one-line text), a link (optional, URL to an external article being shared on AI News), and text (text to show in

Use the following algorithm to rank top stories, and comments within a story: "score = upvotes - downvotes + comments - sqrt(age

Implement the following pages:

* / - shows the top 20 posted stories, ranked using the scoring algorithm, with a "More" link that shows the next 20 (pagination usin
* /newest - shows the latest 20 posted stories, ranked chronologically (newest first), with a "More" link that shows the next 20 (pag
File: review_spec.prompt
Your team has taken the client brief and turned it into a project specification.

Your job is to check the specification and identify all the information that is contained in the client brief, but missing from the specif

This might include:


* details on how the app should work
* information which 3rd party packages or APIs to use or avoid
* concrete examples of API requests/responses, library usage, or other external documentation

Here is the client brief:


---CLIENT-BRIEF-START---
{{ brief }}
---CLIENT-BRIEF-END---

Here is the specification your team came up with:


---SPEC-START---
{{ spec }}
---SPEC-END---

In your response, output all the information that is present in the client brief but missing from the spec, so it can be appended.

Note: don't output suggestion to your team to take back to the drawing board. Instead, just output the missing information and the

File: architect.prompt
You are an experienced software architect. Your expertise is in creating an architecture for an MVP (minimum viable products) for

File: code_monkey.prompt
You are a full stack software developer that works in a software development agency. You write modular, clean, maintainable, pro

File: code_updater.prompt
You are a full stack software developer who works in a software development agency. You write very modular code. Your job is to

File: dev_ops.prompt

File: full_stack_developer.prompt
You are an expert full stack software developer who works in a software development agency.

You write modular, well-organized code split across files that are not too big, so that the codebase is maintainable. Your code is cl

Your job is to implement tasks that your tech lead assigns you. Each task has a description of what needs to be implemented.

File: product_owner.prompt
You are an experienced project owner (project manager) who manages the entire process of creating software applications for clie

File: spec_writer.prompt
You are a product owner working in a software development agency.

File: tech_lead.prompt
You are an experienced tech lead in a software development agency and your main task is to break down the project into smaller

File: technical_writer.prompt
You are technical writer and as such, you excel in clear, concise communication, skillfully breaking down complex technical conce

File: additional_questions.prompt
File: llm_response_error.prompt
There was an error processing your response:

{{ error }}

Please think carefully and try again.

File: python_string.prompt
{{ content }}

File: summary.prompt
Based on the following conversation, write a summary:

``` {{conversation}} ```

File: update.prompt
I will show you some of your message to which I want you to make some updates. Please just modify your last message per these
```
{{ changes }}
```

File: __init__.py
import os
from typing import TYPE_CHECKING, Optional
from uuid import uuid4

from utils.style import color_green_bold


from logger.logger import logger
from utils.exit import trace_code_event

from .node_express_mongoose import NODE_EXPRESS_MONGOOSE


from .render import Renderer

if TYPE_CHECKING: # noqa
from helpers.Project import Project # noqa

PROJECT_TEMPLATES = {
"node_express_mongoose": NODE_EXPRESS_MONGOOSE,
}

def apply_project_template(
project: "Project",
) -> Optional[str]:
"""
Apply a project template to a new project.

:param project: the project object


:return: a summary of the applied template, or None if no template was applied

If project.project_template is None (not selected), or not one of the supported


templates, do nothing.

Note: the template summary is injected in the project description, and the
created files are saved to a snapshot of the last development step (LLM request).
"""
template_name = project.project_template
if not template_name or template_name not in PROJECT_TEMPLATES:
logger.warning(f"Project template '{template_name}' not found, ignoring")
return None
File: node_express_mongoose.py
from helpers.cli import execute_command

def install_hook(project):
"""
Command to run to complete the project scaffolding setup.

:param project: the project object


"""
execute_command(project, "npm install")

NODE_EXPRESS_MONGOOSE = {
"path": "node_express_mongoose",
"description": "Node + Express + MongoDB web app with session-based authentication, EJS views and Bootstrap 5",
"summary": "\n".join([
"* initial Node + Express setup",
"* User model in Mongoose ORM with username and password fields, ensuring username is unique and hashing passwords with b
"* session-based authentication using username + password (hashed using bcrypt) in routes/authRoutes.js, using express-session
"* authentication middleware to protect routes that require login",
"* EJS view engine, html head, header and footer EJS partials, with included Boostrap 5.x CSS and JS",
"* routes and EJS views for login, register, and home (main) page",
"* config loading from environment using dotenv with a placeholder .env.example file: you will need to create a .env file with your o
]),
"install_hook": install_hook,
}

File: render.py
from __future__ import annotations

from os import walk


from os.path import join, relpath
from typing import Any, Callable

from jinja2 import Environment, FileSystemLoader

class Renderer:
"""
Render a Jinja template

Sets up Jinja renderer and renders one or more templates


using provided context.

* `render_template` renders a single template


* `render_tree` renders all templates starting from a predefined
root folder (which must reside inside templates folder structure)

Rendered template(s) are returned as strings. Nothing is written


to disk.

Usage:

```
import Renderer from render
r = Renderer('path/to/templates')
output_string = r.render_template('template.html', {'key': 'value'})
output_tree = r.render_tree('tree/root', {'key': 'value'})
```
"""
File: .env
# Port to listen on (example: 3000)
PORT=3000

# MongoDB database URL (example: mongodb://localhost/dbname)


DATABASE_URL=mongodb://localhost/myDb # INPUT_REQUIRED {insert your MongoDB url here}

# Session secret string (must be unique to your server)


SESSION_SECRET={{ random_secret }}

File: .env.example
# Copy this file to .env and edit the settings

# Port to listen on (example: 3000)


PORT=

# MongoDB database URL (example: mongodb://localhost/dbname)


DATABASE_URL=

# Session secret string (must be unique to your server)


SESSION_SECRET=

File: package.json
{
"name": "{{ project_name }}",
"version": "1.0.0",
"description": "",
"main": "server.js",
"scripts": {
"start": "node server.js",
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"bcrypt": "^5.1.1",
"body-parser": "^1.20.2",
"chart.js": "^4.4.1",
"connect-flash": "^0.1.1",
"csv-writer": "^1.6.0",
"dotenv": "^16.4.1",
"ejs": "^3.1.9",
"express": "^4.18.2",
"express-session": "^1.18.0",
"connect-mongo": "^5.1.0",
"moment": "^2.30.1",
"mongoose": "^8.1.1"
}
}

File: server.js
// Load environment variables
require("dotenv").config();
const mongoose = require("mongoose");
const express = require("express");
const session = require("express-session");
const MongoStore = require('connect-mongo');
const authRoutes = require("./routes/authRoutes");

if (!process.env.DATABASE_URL || !process.env.SESSION_SECRET) {
File: User.js
const mongoose = require('mongoose');
const bcrypt = require('bcrypt');

const userSchema = new mongoose.Schema({


username: { type: String, unique: true, required: true },
password: { type: String, required: true }
});

userSchema.pre('save', function(next) {
const user = this;
if (!user.isModified('password')) return next();
bcrypt.hash(user.password, 10, (err, hash) => {
if (err) {
console.error('Error hashing password:', err);
return next(err);
}
user.password = hash;
next();
});
});

const User = mongoose.model('User', userSchema);

module.exports = User;

File: style.css
/* Placeholder for custom styles */

File: main.js
// Placeholder for future JavaScript code

File: authRoutes.js
const express = require('express');
const User = require('../models/User');
const bcrypt = require('bcrypt');
const router = express.Router();

router.get('/auth/register', (req, res) => {


res.render('register');
});

router.post('/auth/register', async (req, res) => {


try {
const { username, password } = req.body;
// User model will automatically hash the password using bcrypt
await User.create({ username, password });
res.redirect('/auth/login');
} catch (error) {
console.error('Registration error:', error);
res.status(500).send(error.message);
}
});

router.get('/auth/login', (req, res) => {


res.render('login');
});

router.post('/auth/login', async (req, res) => {


try {
const { username, password } = req.body;
File: authMiddleware.js
const isAuthenticated = (req, res, next) => {
if (req.session && req.session.userId) {
return next(); // User is authenticated, proceed to the next middleware/route handler
} else {
return res.status(401).send('You are not authenticated'); // User is not authenticated
}
};

module.exports = {
isAuthenticated
};

File: index.ejs
<!DOCTYPE html>
<html lang="en">
<%- include('partials/_head.ejs') %>
<body>
<%- include('partials/_header.ejs') %>
<main role="main" class="container mt-4">
<div class="text-center">
<h1>{{ project_name }}</h1>
</div>
</main>
</body>
<%- include('partials/_footer.ejs') %>
<script src="/js/main.js"></script>
</html>

File: login.ejs
<!DOCTYPE html>
<html lang="en">
<%- include('partials/_head.ejs') %>
<body>
<%- include('partials/_header.ejs') %>
<main role="main">
<div class="container mt-5">
<h2>Login</h2>
<form action="/auth/login" method="POST">
<div class="mb-3">
<input type="text" name="username" placeholder="Username" required class="form-control">
</div>
<div class="mb-3">
<input type="password" name="password" placeholder="Password" required class="form-control">
</div>
<div class="mb-3">
<button type="submit" class="btn btn-primary">Login</button>
Don't have an account? <a href="/auth/register">Register</a>
</div>
</form>
</div>
</main>
</body>
<%- include('partials/_footer.ejs') %>
</html>

File: register.ejs
<!DOCTYPE html>
<html lang="en">
<%- include('partials/_head.ejs') %>
<body>
File: _footer.ejs
<footer class="footer fixed-bottom bg-light">
<div class="container text-center my-2">
<span>Copyright &copy; <%= 1900 + new Date().getYear() %> {{ project_name }}</span>
</div>
</footer>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.min.js" integrity="sha384-BBtl+eGJRgqQAUMxJ7pMwb

File: _head.ejs
<head>
<meta charset="UTF-8">
<title>{{ project_name }}</title>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" integrity="sha384-T3c6CoIi6u
<link rel="stylesheet" href="/css/style.css">
</head>

File: _header.ejs
<nav class="navbar navbar-expand-md navbar-dark bg-dark">
<a class="navbar-brand" href="/">{{ project_name }}</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarNav" aria-controls="navbarNav" aria-exp
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav">
<li class="nav-item">
<a class="nav-link" href="/">Home</a>
</li>
<li class="nav-item">
<% if (session && session.userId) { %>
<a class="nav-link" href="/auth/logout">Logout</a>
<% } else { %>
<a class="nav-link" href="/auth/login">Login</a>
<% } %>
</li>
</ul>
</div>
</nav>

File: __init__.py

File: mock_questionary.py
class MockQuestionary:
def __init__(self, answers=None, initial_state='project_description'):
if answers is None:
answers = []
self.answers = iter(answers)
self.state = initial_state

class Style:
def __init__(self, *args, **kwargs):
pass

def text(self, question: str, style=None):


print('AI: ' + question)
if question.startswith('User Story'):
self.state = 'user_stories'
elif question.endswith('write "DONE"'):
self.state = 'DONE'
return self
File: test_colors.py
import unittest
from pilot.utils.style import style_config, Theme, ColorName, get_color_function

class TestColorStyle(unittest.TestCase):
def test_initialization(self):
print("\n[INFO] Testing Theme Initialization...")
style_config.set_theme(Theme.DARK)
print(f"[INFO] Set theme to: {Theme.DARK}, Current theme: {style_config.theme}")
self.assertEqual(style_config.theme, Theme.DARK)

style_config.set_theme(Theme.LIGHT)
print(f"[INFO] Set theme to: {Theme.LIGHT}, Current theme: {style_config.theme}")
self.assertEqual(style_config.theme, Theme.LIGHT)

def test_color_function(self):
dark_color_codes = {
ColorName.RED: "\x1b[31m",
ColorName.GREEN: "\x1b[32m",
# ... other colors
}
light_color_codes = {
ColorName.RED: "\x1b[91m",
ColorName.GREEN: "\x1b[92m",
# ... other colors
}
reset = "\x1b[0m"

# Test DARK theme


print("\n[INFO] Testing DARK Theme Colors...")
style_config.set_theme(Theme.DARK)
for color_name, code in dark_color_codes.items():
with self.subTest(color=color_name):
color_func = get_color_function(color_name, bold=False)
print(f"[INFO] Testing color: {color_name}, Expect: {code}Test, Got: {color_func('Test')}")
self.assertEqual(color_func("Test"), f"{code}Test{reset}")

color_func = get_color_function(color_name, bold=True)


print(
f"[INFO] Testing color (bold): {color_name}, Expect: {code}\x1b[1mTest, Got: {color_func('Test')}")
self.assertEqual(color_func("Test"), f"{code}\x1b[1mTest{reset}")

# Test LIGHT theme


print("\n[INFO] Testing LIGHT Theme Colors...")
style_config.set_theme(Theme.LIGHT)
for color_name, code in light_color_codes.items():
with self.subTest(color=color_name):
color_func = get_color_function(color_name, bold=False)
print(f"[INFO] Testing color: {color_name}, Expect: {code}Test, Got: {color_func('Test')}")
self.assertEqual(color_func("Test"), f"{code}Test{reset}")

color_func = get_color_function(color_name, bold=True)


print(
f"[INFO] Testing color (bold): {color_name}, Expect: {code}\x1b[1mTest, Got: {color_func('Test')}")
self.assertEqual(color_func("Test"), f"{code}\x1b[1mTest{reset}")

File: test_utils.py
from unittest.mock import Mock

def mock_terminal_size():
File: test_file_snapshot.py
from base64 import b64decode

from peewee import SqliteDatabase, PostgresqlDatabase


import pytest

from database.config import (


DATABASE_TYPE,
DB_NAME,
DB_HOST,
DB_PORT,
DB_USER,
DB_PASSWORD,
)
from database.database import TABLES
from database.models.user import User
from database.models.app import App
from database.models.file_snapshot import FileSnapshot
from database.models.files import File
from database.models.development_steps import DevelopmentSteps

EMPTY_PNG = b64decode(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJ
)

@pytest.fixture(autouse=True)
def database():
"""
Set up a new empty initialized test database.

In case of SQlite, the database is created in-memory. In case of PostgreSQL,


the database should already exist and be empty.

This fixture will create all the tables and run the test in an isolated transaction.
which gets rolled back after the test. The fixture also drops all the tables at the
end.
"""
if DATABASE_TYPE == "postgres":
if not DB_NAME:
raise ValueError(
"PostgreSQL database name (DB_NAME) environment variable not set"
)
db = PostgresqlDatabase(
DB_NAME,
host=DB_HOST,
port=DB_PORT,
user=DB_USER,
password=DB_PASSWORD,
)
elif DATABASE_TYPE == "sqlite":
db = SqliteDatabase(":memory:")
else:
raise ValueError(f"Unexpected database type: {DATABASE_TYPE}")

db.bind(TABLES)

class PostgresRollback(Exception):
"""
Mock exception to ensure rollback after each test.

Even though we drop the tables at the end of each test, if the test
fails due to database integrity error, we have to roll back the
File: __init__.py

File: test_files.py
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
from unittest.mock import patch, call

import pytest

from pilot.helpers.files import get_file_contents, get_directory_contents, update_file

@patch("pilot.helpers.files.open")
@patch("pilot.helpers.files.os")
def test_update_file_creates_directories(mock_os, mock_open):
"""Test that update_file creates intermediate directories if they don't exist."""

mock_os.path.dirname = os.path.dirname
update_file("/path/to/file", "content")
mock_os.makedirs.assert_called_once_with("/path/to", exist_ok=True)

@patch("pilot.helpers.files.open")
@patch("pilot.helpers.files.os")
def test_update_file_creates_text_file(mock_os, mock_open):
"""Test that update_file creates intermediate directories if they don't exist."""

update_file("/path/to/file", "■■")
mock_open.assert_called_once_with("/path/to/file", "w", encoding="utf-8")
mock_open.return_value.__enter__.return_value.write.assert_called_once_with("■■")

@patch("pilot.helpers.files.open")
@patch("pilot.helpers.files.os")
def test_update_file_creates_binary_file(mock_os, mock_open):
"""Test that update_file creates intermediate directories if they don't exist."""

update_file("/path/to/file", b"\x00\x00\x00")
mock_open.assert_called_once_with("/path/to/file", "wb", encoding=None)
mock_open.return_value.__enter__.return_value.write.assert_called_once_with(b"\x00\x00\x00")

@pytest.mark.parametrize(
("source", "expected_encoded"),
[
("file.txt", b"file.txt"),
("foo.txt - ■■", b"foo.txt - \xe7\x84\xa1\xe7\x82\xba"),
(b"\xff\xff\xff", b"\xff\xff\xff"),
],
)
def test_update_file_with_encoded_content(source, expected_encoded):
# Can't use NamedTemporaryFile this as a context manager because Windows
# doesn't allow O_TEMPORARY files (with delete=True) to be opened
# twice, defeating the purpose.
file = NamedTemporaryFile(delete=False)
update_file(file.name, source)
assert file.read() == expected_encoded

file.close()
os.remove(file.name)
File: test_ignore.py
from unittest.mock import patch
import pytest
from tempfile import TemporaryDirectory

from utils.ignore import IgnoreMatcher


from os.path import sep, join, dirname

@pytest.mark.parametrize(
("path", "expected"),
[
(".git", True),
(".gpt-pilot", True),
(".idea", True),
(".vscode", True),
(".DS_Store", True),
(join("subdirectory", ".DS_Store"), True),
("__pycache__", True),
(join("subdirectory", "__pycache__"), True),
("node_modules", True),
(join("subdirectory", "node_modules"), True),
("package-lock.json", True),
("venv", True),
("dist", True),
("build", True),
("target", True),
(".gitignore", False),
("server.js", False),
(join(dirname(__file__), "node_modules"), True),
(join(dirname(__file__), "subdirectory", "node_modules"), True),
]
)
@patch("utils.ignore.os.path.getsize")
@patch("utils.ignore.os.path.isfile")
@patch("utils.ignore.open")
def test_default_ignore(mock_open, mock_isfile, mock_getsize, path, expected):
mock_open.return_value.read.return_value = "fake-content"
mock_isfile.return_value = True
mock_getsize.return_value = 100
matcher = IgnoreMatcher(root_path=dirname(__file__))
assert matcher.ignore(path) == expected

@pytest.mark.parametrize(
("ignore", "path", "expected"),
[
("*.py[co]", "test.pyc", True),
("*.py[co]", "subdir/test.pyo", True),
("*.py[co]", "test.py", False),
("*.min.js", f"public{sep}js{sep}script.min.js", True),
("*.min.js", f"public{sep}js{sep}min.js", False),
]
)
@patch("utils.ignore.os.path.getsize")
@patch("utils.ignore.os.path.isfile")
@patch("utils.ignore.open")
def test_additional_ignore(mock_open, mock_isfile, mock_getsize, ignore, path, expected):
mock_open.return_value.read.return_value = "fake-content"
mock_isfile.return_value = True
mock_getsize.return_value = 100
matcher = IgnoreMatcher([ignore])
assert matcher.ignore(path) == expected
File: test_settings.py
from io import StringIO
import json
from os.path import expanduser, expandvars, join
from os import getenv
from pathlib import Path
from subprocess import check_output
import sys
from unittest.mock import patch, MagicMock

import pytest

from utils.settings import (


Loader,
Settings,
get_git_commit,
get_package_version,
get_version,
)

@pytest.fixture
def expected_config_location():
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
return join(xdg_config_home, "gpt-pilot", "config.json")
elif sys.platform in ["darwin", "linux"]:
return expanduser("~/.gpt-pilot/config.json")
elif sys.platform == "win32":
return expandvars("%APPDATA%\\GPT Pilot\\config.json")
else:
raise RuntimeError(f"Unknown platform: {sys.platform}")

def test_settings_initializes_known_variables():
settings = Settings()
assert settings.openai_api_key is None
assert settings.telemetry is None

def test_settings_init_ignores_unknown_variables():
settings = Settings(unknown="value")
assert not hasattr(settings, "unknown")

def test_settings_forbids_saving_unknown_variables():
settings = Settings()

with pytest.raises(AttributeError):
settings.unknown = "value"

def test_settings_update():
settings = Settings()
settings.update(openai_api_key="test_key")
assert settings.openai_api_key == "test_key"

def test_settings_to_dict():
settings = Settings()
settings.update(openai_api_key="test_key")
assert dict(settings) == {
"openai_api_key": "test_key",
File: test_telemetry.py
from unittest.mock import patch

from utils.telemetry import Telemetry

@patch("utils.telemetry.settings")
def test_telemetry_constructor_with_telemetry_enabled(mock_settings):
mock_settings.telemetry = {
"id": "test-id",
"endpoint": "test-endpoint",
"enabled": True,
}
telemetry = Telemetry()
assert telemetry.enabled
assert telemetry.telemetry_id == "test-id"
assert telemetry.endpoint == "test-endpoint"

@patch("utils.telemetry.settings")
def test_telemetry_constructor_with_telemetry_disabled(mock_settings):
mock_settings.telemetry = {"id": "existing-id", "enabled": False}
telemetry = Telemetry()
assert not telemetry.enabled

@patch("utils.telemetry.settings")
def test_telemetry_constructor_with_telemetry_not_configured(mock_settings):
mock_settings.telemetry = None
telemetry = Telemetry()
assert not telemetry.enabled

@patch("utils.telemetry.config_path", "/path/to/config")
@patch("utils.telemetry.settings")
def test_telemetry_constructor_logging_enabled(mock_settings, caplog):
caplog.set_level("DEBUG")
mock_settings.telemetry = {
"id": "test-id",
"endpoint": "test-endpoint",
"enabled": True,
}
Telemetry()
assert (
"Anonymous telemetry enabled (id=test-id), configure or disable it in /path/to/config"
in caplog.text
)

@patch("utils.telemetry.sys.platform", "test_platform")
@patch("utils.telemetry.sys.version", "test_version")
@patch("utils.telemetry.version", "test_pilot_version")
def test_clear_data_resets_data():
telemetry = Telemetry()
empty = Telemetry()

telemetry.data = {
"model": "test-model",
"num_llm_requests": 10,
"num_llm_tokens": 100,
"num_steps": 5,
"elapsed_time": 123.45,
File: Dev_continue_development.py
import pytest
from unittest.mock import patch

from helpers.AgentConvo import AgentConvo


from helpers.agents import Developer
from .utils import create_project
from helpers.cli import terminate_running_processes
from test.mock_questionary import MockQuestionary

@pytest.mark.ux_test
@patch('utils.questionary.get_saved_user_input')
@patch('helpers.cli.get_saved_command_run')
@patch('helpers.AgentConvo.get_saved_development_step')
@patch('helpers.AgentConvo.save_development_step')
def test_continue_development(mock_4, mock_3, mock_2, mock_1):
# Given
project = create_project('continue_development', 'hello_world_server')
# execute_command(project, 'npm install', 13000)

developer = Developer(project)
project.developer = developer
convo = AgentConvo(developer)
convo.load_branch = lambda last_branch_name: None
developer.run_command = 'node server.js'

# Note: uncomment the following 2 lines and indent the remaining lines when debugging without console input
mock_questionary = MockQuestionary(['r', 'continue'])
with patch('utils.questionary.questionary', mock_questionary):

# When
# `continue_development` calls `run_command_until_success()` if the user types "r"
developer.continue_development(convo, 'branch_name', 'The web page should say "Hello, World!"')
print('end of "continue_development" scenario')

terminate_running_processes()

File: README.md
The functions in this directory are used to test specific scenarios of the user experience.

File: __init__.py
# from .run_command_until_success import run_command_until_success
from .cli_execute_command import cli_execute_command
from .Dev_continue_development import test_continue_development
from .utils import use_args

def run_test(test_name: str, args):


print(f'Running UX test "{test_name}"...')

tests = {
# 'run_command_until_success': run_command_until_success,
'cli_execute_command': cli_execute_command,
'continue_development': test_continue_development,
}

if test_name in tests:
use_args(args)
return tests[test_name]()

print(f'UX test "{test_name}" not found')


File: __init__.py

File: arguments.py
import hashlib
import os
import re
import sys
import uuid
from getpass import getuser
from database.database import get_app, get_app_by_user_workspace
from utils.style import color_green_bold, color_red, style_config
from utils.utils import should_execute_step
from const.common import STEPS

def get_arguments():
# The first element in sys.argv is the name of the script itself.
# Any additional elements are the arguments passed from the command line.
args = sys.argv[1:]

# Create an empty dictionary to store the key-value pairs.


arguments = {
'continuing_project': False
}

# Loop through the arguments and parse them as key-value pairs.


for arg in args:
if '=' in arg:
key, value = arg.split('=', 1)
arguments[key] = value
else:
arguments[arg] = True

theme_mapping = {'light': style_config.theme.LIGHT, 'dark': style_config.theme.DARK}


theme_value = arguments.get('theme', 'dark')
style_config.set_theme(theme=theme_mapping.get(theme_value, style_config.theme.DARK))

if 'user_id' not in arguments:


arguments['user_id'] = username_to_uuid(getuser())

app = None
if 'workspace' in arguments:
arguments['workspace'] = os.path.abspath(arguments['workspace'])
app = get_app_by_user_workspace(arguments['user_id'], arguments['workspace'])
if app is not None:
arguments['app_id'] = str(app.id)
arguments['continuing_project'] = True
else:
arguments['workspace'] = None

if 'app_id' in arguments:
if app is None:
try:
app = get_app(arguments['app_id'])
except ValueError as err:
print(color_red(f"Error: {err}"))
sys.exit(-1)

arguments['app_type'] = app.app_type
arguments['name'] = app.name
arguments['status'] = app.status
File: custom_print.py
import builtins
from helpers.ipc import IPCClient
from const.ipc import MESSAGE_TYPE, LOCAL_IGNORE_MESSAGE_TYPES
from utils.print import remove_ansi_codes

def get_custom_print(args):
built_in_print = builtins.print

def print_to_external_process(*args, **kwargs):


# message = " ".join(map(str, args))
message = args[0]

if 'type' not in kwargs:


kwargs['type'] = 'verbose'
elif kwargs['type'] == MESSAGE_TYPE['local']:
local_print(*args, **kwargs)
return

ipc_client_instance.send({
'type': MESSAGE_TYPE[kwargs['type']],
'category': kwargs['category'] if 'category' in kwargs else '',
'content': remove_ansi_codes(message),
})
if kwargs['type'] == MESSAGE_TYPE['user_input_request']:
return ipc_client_instance.listen()

def local_print(*args, **kwargs):


message = " ".join(map(str, args))
if 'type' in kwargs:
if kwargs['type'] in LOCAL_IGNORE_MESSAGE_TYPES:
return
del kwargs['type']

if 'category' in kwargs:
del kwargs['category']

built_in_print(message, **kwargs)

ipc_client_instance = None
if '--external-log-process-port' in args:
ipc_client_instance = IPCClient(args['--external-log-process-port'])
return print_to_external_process, ipc_client_instance
else:
return local_print, ipc_client_instance

File: dot_gpt_pilot.py
import json
import os
import yaml
from datetime import datetime
from dotenv import load_dotenv

load_dotenv()

USE_GPTPILOT_FOLDER = os.getenv('USE_GPTPILOT_FOLDER') == 'true'

# TODO: Parse files from the `.gpt-pilot` directory to resume a project - `user_stories` may have changed - include checksums for
# TODO: Save a summary at the end of each task/sprint.
class DotGptPilot:
File: exit.py
import requests

from helpers.cli import terminate_running_processes


from prompts.prompts import ask_user

from utils.telemetry import telemetry

def send_feedback(feedback, path_id):


"""Send the collected feedback to the endpoint."""
# Prepare the feedback data (you can adjust the structure as per your backend needs)
feedback_data = {
"pathId": path_id,
"data": feedback,
"event": "pilot-feedback"
}

try:
response = requests.post("https://api.pythagora.io/telemetry", json=feedback_data)
response.raise_for_status()
except requests.RequestException as err:
print(f"Failed to send feedback data: {err}")

def trace_code_event(name: str, data: dict):


"""
Record a code event to trace potential logic bugs.

:param name: name of the event


:param data: data to send with the event
"""
path_id = get_path_id()

# Prepare the telemetry data


telemetry_data = {
"pathId": path_id,
"event": f"trace-{name}",
"data": data,
}

try:
response = requests.post("https://api.pythagora.io/telemetry", json=telemetry_data)
response.raise_for_status()
except: # noqa
pass

def get_path_id():
return telemetry.telemetry_id

def ask_to_store_prompt(project, path_id):


init_prompt = project.main_prompt if project is not None and project.main_prompt else None
if init_prompt is None:
return

# Prepare the prompt data


telemetry_data = {
"pathId": path_id,
"event": "pilot-prompt",
"data": init_prompt
}
question = ('We would appreciate if you let us store your initial app prompt. If you are OK with that, please just '
File: files.py
import os
from pathlib import Path
from database.database import save_user_app

def get_parent_folder(folder_name):
current_path = Path(os.path.abspath(__file__)) # get the path of the current script

while current_path.name != folder_name: # while the current folder name is not 'folder_name'
current_path = current_path.parent # go up one level

return current_path.parent

def setup_workspace(args) -> str:


"""
Creates & returns the path to the project workspace.
:param args: may contain 'root' key
"""
workspace = args.get('workspace')
if workspace:
project_path = workspace
else:
root = args.get('root') or get_parent_folder('pilot')
name = args.get('name', 'default_project_name')
project_path = create_directory(os.path.join(root, 'workspace'), name)

try:
save_user_app(args.get('user_id'), args.get('app_id'), project_path)
except Exception as e:
print(f'Error saving user app: {str(e)}')

print(os.path.basename(project_path), type='project_folder_name')
return project_path

def create_directory(parent_directory, new_directory):


new_directory_path = os.path.join(parent_directory, new_directory)
os.makedirs(new_directory_path, exist_ok=True)

return new_directory_path

def count_lines_of_code(files):
return sum(len(file['content'].splitlines()) for file in files)

File: function_calling.py
import json
import re
from typing import Union, TypeVar, List, Dict, Literal, Optional, TypedDict, Callable

JsonTypeBase = Union[str, int, float, bool, None, List["JsonType"], Dict[str, "JsonType"]]


JsonType = TypeVar("JsonType", bound=JsonTypeBase)

class FunctionParameters(TypedDict):
"""Function parameters"""

type: Literal["object"]
properties: dict[str, JsonType]
required: Optional[list[str]]
File: ignore.py
from fnmatch import fnmatch
import os.path
from typing import Optional

from const.common import IGNORE_PATHS, IGNORE_SIZE_THRESHOLD

class IgnoreMatcher:
def __init__(self,
ignore_paths: Optional[list[str]] = None,
*,
root_path: Optional[None] = None,
ignore_binaries: bool = True,
ignore_large_files: bool = True,
):
"""
Initialize the IgnoreMatcher object.

The passed paths (optional) are *added* to the list of


ignore paths from `const.common.IGNORE_PATHS`.

:param ignore_paths: List of paths to ignore (optional)


"""
if ignore_paths is None:
ignore_paths = []

self.ignore_paths = ignore_paths + IGNORE_PATHS


self.ignore_binaries = ignore_binaries
self.ignore_large_files = ignore_large_files
self.root_path = root_path

def ignore(self, path: str) -> bool:


"""
Check if the given path matches any of the ignore patterns.

Specified path can be either the full path, or a relative path


(if root_path was set in the constructor).

:param path: Path to the file or directory to check


:return: True if the path matches any of the ignore patterns, False otherwise
"""

# Turn into absolute (full) path


if self.root_path and not path.startswith(self.root_path):
path = os.path.join(self.root_path, path)

if self.is_in_ignore_list(path):
return True

if self.ignore_large_files and self.is_large_file(path):


return True

if self.ignore_binaries and self.is_binary(path):


return True

return False

def is_in_ignore_list(self, path: str) -> bool:


"""
Check if the given path matches any of the ignore patterns.

:param path: The path to the file or directory to check


File: llm_connection.py
import re
import requests
import os
import sys
import time
import json
import tiktoken
from prompt_toolkit.styles import Style

from jsonschema import validate, ValidationError


from utils.style import color_red, color_yellow
from typing import List
from const.llm import MAX_GPT_MODEL_TOKENS, API_CONNECT_TIMEOUT, API_READ_TIMEOUT
from const.messages import AFFIRMATIVE_ANSWERS
from logger.logger import logger, logging
from helpers.exceptions import TokenLimitError, ApiKeyNotDefinedError, ApiError
from utils.utils import fix_json, get_prompt
from utils.function_calling import add_function_calls_to_request, FunctionCallSet, FunctionType
from utils.questionary import styled_text

from .telemetry import telemetry

tokenizer = tiktoken.get_encoding("cl100k_base")

def get_tokens_in_messages(messages: List[str]) -> int:


tokenized_messages = [tokenizer.encode(message['content']) for message in messages]
return sum(len(tokens) for tokens in tokenized_messages)

# TODO: not used anywhere


def num_tokens_from_functions(functions):
"""Return the number of tokens used by a list of functions."""
num_tokens = 0
for function in functions:
function_tokens = len(tokenizer.encode(function['name']))
function_tokens += len(tokenizer.encode(function['description']))

if 'parameters' in function:
parameters = function['parameters']
if 'properties' in parameters:
for propertiesKey in parameters['properties']:
function_tokens += len(tokenizer.encode(propertiesKey))
v = parameters['properties'][propertiesKey]
for field in v:
if field == 'type':
function_tokens += 2
function_tokens += len(tokenizer.encode(v['type']))
elif field == 'description':
function_tokens += 2
function_tokens += len(tokenizer.encode(v['description']))
elif field == 'enum':
function_tokens -= 3
for o in v['enum']:
function_tokens += 3
function_tokens += len(tokenizer.encode(o))
function_tokens += 11

num_tokens += function_tokens

num_tokens += 12
return num_tokens
File: print.py
import re

def print_task_progress(index, num_of_tasks, description, task_source, status, source_index=1):


"""
Print task progress in extension.

:param index: Index of the task.


:param num_of_tasks: Number of tasks.
:param description: Description of the task.
:param task_source: Source of the task, one of: 'app', 'feature', 'debugger', 'troubleshooting', 'review'.
:param status: Status of the task, can be 'in_progress' or 'done'.
:param source_index: Index of the source.

:return: None
"""
print({'task': {
'index': index,
'num_of_tasks': num_of_tasks,
'description': description,
'source': task_source,
'status': status,
'source_index': source_index,
}}, type='progress')

def print_step_progress(index, num_of_steps, step, task_source):


"""
Print step progress in extension.

:param index: Index of the step.


:param num_of_steps: Number of steps.
:param step: Name of the step.
:param task_source: Source of the task, one of: 'app', 'feature', 'debugger', 'troubleshooting', 'review'.

:return: None
"""
print({'step': {
'index': index,
'num_of_steps': num_of_steps,
'step': step,
'source': task_source,
}}, type='progress')

def remove_ansi_codes(s: str) -> str:


ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
# Check if the input is a string
if isinstance(s, str):
return ansi_escape.sub('', s)
else:
# If the input is not a string, return the input as is
return s

File: questionary.py
import platform
import questionary
import sys
from database.database import save_user_input
from utils.style import style_config
from utils.print import remove_ansi_codes
File: settings.py
import ast
import json
from logging import getLogger
from os import getenv, makedirs
from pathlib import Path
import sys
from typing import Any, Optional

from dotenv import load_dotenv

load_dotenv()

log = getLogger(__name__)

AVAILABLE_SETTINGS = [
"telemetry",
"openai_api_key",
]

class Settings:
"""
Application settings

This object holds all the settings for the application, whether they are
loaded from the config file, set via environment variables or the command
line arguments.

Available settings are listed in the `AVAILABLE_SETTINGS` list.

This is a singleton object, use it by importing the instance


directly from the module:

>>> from utils.settings import settings

To get a setting:

>>> settings.openai_api_key

To get all settings as a dictionary:

>>> dict(settings)

To set (update) one setting:

>>> settings.openai_api_key = "test_key"

To update multiple settings at once:

>>> settings.update(openai_api_key="test_key", telemetry=None)

Note: updating settings will not save them to the config file.
To do that, use the `loader.save()` method:

>>> from utils.settings import loader


>>> loader.save("openai_api_key", "telemetry")

To see all available settings:

>>> from utils.settings import AVAILABLE_SETTINGS


>>> print(AVAILABLE_SETTINGS)
"""
File: spinner.py
from yaspin import yaspin
from yaspin.spinners import Spinners

def spinner_start(text="Processing..."):
spinner = yaspin(Spinners.line, text=text)
spinner.start()
return spinner

def spinner_stop(spinner):
if spinner is not None:
spinner.stop()

File: style.py
from colorama import Fore, Style as ColoramaStyle, init
from enum import Enum
from questionary import Style

# Initialize colorama. Ensures that ANSI codes work on Windows systems.


init(autoreset=True)

class Theme(Enum):
"""
Enum representing themes, which can be either DARK or LIGHT.
"""
DARK = 'dark'
LIGHT = 'light'
YELLOW = 'yellow'

class ColorName(Enum):
"""
Enum representing color names and their corresponding ANSI color codes.
Each color has a normal and a light version, indicated by the two elements in the tuple.
"""
RED = (Fore.RED, Fore.LIGHTRED_EX)
GREEN = (Fore.GREEN, Fore.LIGHTGREEN_EX)
YELLOW = (Fore.YELLOW, Fore.LIGHTYELLOW_EX)
BLUE = (Fore.BLUE, Fore.LIGHTBLUE_EX)
CYAN = (Fore.CYAN, Fore.LIGHTCYAN_EX)
WHITE = (Fore.WHITE, Fore.LIGHTWHITE_EX)

THEME_STYLES = {
# Style configurations for DARK theme
Theme.DARK: Style.from_dict({
'question': '#FFFFFF bold', # the color and style of the question - White
'answer': '#FF910A bold', # the color and style of the answer - Dark Orange / Pumpkin
'pointer': '#FF4500 bold', # the color and style of the pointer - Orange Red
'highlighted': '#63CD91 bold', # the color and style of the highlighted option - Medium Aquamarine
'instruction': '#FFFF00 bold' # the color and style of the instruction - Yellow
}),
# Style configurations for LIGHT theme
Theme.LIGHT: Style.from_dict({
'question': '#000000 bold', # the color and style of the question - Black
'answer': '#FFB74D bold', # the color and style of the answer - Light Orange
'pointer': '#FF7043 bold', # the color and style of the pointer - Light Red
'highlighted': '#AED581 bold', # the color and style of the highlighted option - Light Green
'instruction': '#757575 bold' # the color and style of the instruction - Grey
File: task.py
import json
from uuid import uuid4

from utils.telemetry import telemetry


from utils.exit import trace_code_event
from const.telemetry import LOOP_THRESHOLD

class Task:
"""
Task data structure to store information about the current task. The task data structure is sent to telemetry.
Currently used to trace big loops in the code.

>>> from utils.task import Task

To set up a new task:

>>> task = Task()

To set a value:

>>> task.set('task_description', 'test')

To increment a value:

>>> task.inc('steps')

To start a new task:

>>> task.start_new_task('test', 1)

When debugging recursion happens inside a task (see pilot/helpers/Debugger.py) we add a debugging task to the
task data structure. To add a debugging task:

>>> task.add_debugging_task(1, {'command': 'test'}, 'This is not working', 'Command is not working')

To clear the task:

>>> task.clear()

To send the task:

>>> task.send()

Note: the task will be sent automatically if the number of steps exceeds the threshold
"""

def __init__(self):
self.initial_data = {
'task_description': '',
'task_number': 0,
'steps': 0,
'iterations': 0,
'debugging': [],
}
self.data = self.initial_data.copy()
self.ping_extension = True

def set(self, key: str, value: any):


"""
Set a value in the task data
File: telemetry.py
from logging import getLogger
from pathlib import Path
import sys
import time
import traceback
from typing import Any
from uuid import uuid4

import requests

from .settings import settings, version, config_path


from const.telemetry import LARGE_REQUEST_THRESHOLD, SLOW_REQUEST_THRESHOLD

log = getLogger(__name__)

class Telemetry:
"""
Anonymous telemetry.

See ../../docs/TELEMETRY.md for more information on what is collected


and how to disable it on a configuration level.

This class is a singleton, use the `telemetry` global variable to access it:

>>> from utils.telemetry import telemetry

To set up telemetry (only once, at GPT-Pilot setup), use the


`telemetry.setup()` method:

>>> telemetry.setup()

To record start of application creation process:

>>> telemetry.start()

To record data or increase counters:

>>> telemetry.set("model", "gpt-4")


>>> telemetry.inc("num_llm_requests", 5)

To stop recording and send the data:

>>> telemetry.stop()
>>> telemetry.send()

Note: all methods are no-ops if telemetry is not enabled.


"""

DEFAULT_ENDPOINT = "https://api.pythagora.io/telemetry"
MAX_CRASH_FRAMES = 3

def __init__(self):
self.enabled = False
self.telemetry_id = None
self.endpoint = None
self.clear_data()

if settings.telemetry is not None:


self.enabled = settings.telemetry.get("enabled", False)
self.telemetry_id = settings.telemetry.get("id")
self.endpoint = settings.telemetry.get("endpoint")
File: test_arguments.py
import pytest
from unittest.mock import patch, mock_open
import uuid
from .arguments import get_email, username_to_uuid

def test_email_found_in_gitconfig():
mock_file_content = """
[user]
name = test_user
email = [email protected]
"""
with patch('os.path.exists', return_value=True):
with patch('builtins.open', mock_open(read_data=mock_file_content)):
assert get_email() == "[email protected]"

def test_email_not_found_in_gitconfig():
mock_file_content = """
[user]
name = test_user
"""
mock_uuid = "12345678-1234-5678-1234-567812345678"

with patch('os.path.exists', return_value=True):


with patch('builtins.open', mock_open(read_data=mock_file_content)):
with patch.object(uuid, "uuid4", return_value=mock_uuid):
assert get_email() == mock_uuid

def test_gitconfig_not_present():
mock_uuid = "12345678-1234-5678-1234-567812345678"

with patch('os.path.exists', return_value=False):


with patch.object(uuid, "uuid4", return_value=mock_uuid):
assert get_email() == mock_uuid

def test_username_to_uuid():
assert username_to_uuid("test_user") == "31676025-316f-b555-e0bf-a12f0bcfd0ea"

File: test_files.py
import os
from unittest.mock import patch
from utils.files import setup_workspace

def mocked_create_directory(path, exist_ok=True):


return

def mocked_abspath(file):
return "/root_path/pilot/helpers"

@patch('utils.files.os.makedirs', side_effect=mocked_create_directory)
def test_setup_workspace_with_existing_workspace(mock_makedirs):
args = {'workspace': '/some/directory', 'name': 'sample'}
result = setup_workspace(args)
assert result == '/some/directory'
File: test_function_calling.py
from const.function_calls import ARCHITECTURE, USER_TASKS
from utils.llm_connection import clean_json_response
from .function_calling import parse_agent_response, JsonPrompter

class TestFunctionCalling:
def test_parse_agent_response_text(self):
# Given
response = {'text': 'Hello world!'}

# When
response = parse_agent_response(response, None)

# Then
assert response == 'Hello world!'

def test_parse_agent_response_json(self):
# Given
response = {'text': '{"greeting": "Hello world!"}'}
function_calls = {'definitions': [], 'functions': {}}

# When
response = parse_agent_response(response, function_calls)

# Then
assert response == {'greeting': 'Hello world!'}

def test_parse_agent_response_json_markdown(self):
# Given
response = {'text': '```json\n{"greeting": "Hello world!"}\n```'}
function_calls = {'definitions': [], 'functions': {}}

# When
response['text'] = clean_json_response(response['text'])
response = parse_agent_response(response, function_calls)

# Then
assert response == {'greeting': 'Hello world!'}

def test_parse_agent_response_markdown(self):
# Given
response = {'text': '```\n{"greeting": "Hello world!"}\n```'}
function_calls = {'definitions': [], 'functions': {}}

# When
response['text'] = clean_json_response(response['text'])
response = parse_agent_response(response, function_calls)

# Then
assert response == {'greeting': 'Hello world!'}

def test_parse_agent_response_multiple_args(self):
# Given
response = {'text': '{"greeting": "Hello", "name": "John"}'}
function_calls = {'definitions': [], 'functions': {}}

# When
response = parse_agent_response(response, function_calls)

# Then
assert response['greeting'] == 'Hello'
assert response['name'] == 'John'
File: test_llm_connection.py
import builtins
from json import JSONDecodeError
import os

import pytest
from unittest.mock import call, patch, Mock
from dotenv import load_dotenv
from jsonschema import ValidationError
from const.function_calls import ARCHITECTURE, DEVELOPMENT_PLAN
from helpers.AgentConvo import AgentConvo
from helpers.Project import Project
from helpers.agents.Architect import Architect
from helpers.agents.TechLead import TechLead
from utils.function_calling import parse_agent_response, FunctionType
from test.test_utils import assert_non_empty_string
from test.mock_questionary import MockQuestionary
from utils.llm_connection import create_gpt_chat_completion, stream_gpt_completion, \
assert_json_response, assert_json_schema, clean_json_response, retry_on_exception
from main import get_custom_print

load_dotenv()
os.environ.pop("AUTOFIX_FILE_PATHS", None)

def test_clean_json_response_True_False():
# Given a JSON response with Title Case True and False
response = '''
```json
{
"steps": [
{
"type": "command",
"command": {
"command": "git init",
"daemon": False,
"timeout": 3000,
"boolean": False
},
"another_True": True,
"check_if_fixed": True
}
]
}
```
'''

# When
response = clean_json_response(response)

# Then the markdown is removed


assert response.startswith('{')
assert response.endswith('}')
# And the booleans are converted to lowercase
assert '"daemon":false,' in response
assert '"boolean":false' in response
assert '"another_True":true,' in response
assert '"check_if_fixed":true' in response

def test_clean_json_response_boolean_in_python():
# Given a JSON response with Python booleans in a content string
response = '''
File: test_utils.py
from .utils import should_execute_step

class TestShouldExecuteStep:
def test_no_step_arg(self):
assert should_execute_step(None, 'project_description') is True
assert should_execute_step(None, 'architecture') is True
assert should_execute_step(None, 'coding') is True

def test_skip_step(self):
assert should_execute_step('architecture', 'project_description') is False
assert should_execute_step('architecture', 'architecture') is True
assert should_execute_step('architecture', 'coding') is True

def test_unknown_step(self):
assert should_execute_step('architecture', 'unknown') is False
assert should_execute_step('unknown', 'project_description') is False
assert should_execute_step('unknown', None) is False
assert should_execute_step(None, None) is False

File: utils.py
# utils/utils.py

import datetime
import os
import platform
import uuid
import distro
import json
import hashlib
import re
import copy
from jinja2 import Environment, FileSystemLoader
from .style import color_green

from const.llm import MAX_QUESTIONS, END_RESPONSE


from const.common import ROLES, STEPS
from logger.logger import logger

prompts_path = os.path.join(os.path.dirname(__file__), '..', 'prompts')


file_loader = FileSystemLoader(prompts_path)
env = Environment(loader=file_loader)

def capitalize_first_word_with_underscores(s):
# Split the string into words based on underscores.
words = s.split('_')

# Capitalize the first word and leave the rest unchanged.


words[0] = words[0].capitalize()

# Join the words back into a string with underscores.


capitalized_string = '_'.join(words)

return capitalized_string

def get_prompt(prompt_name, original_data=None):


data = copy.deepcopy(original_data) if original_data is not None else {}

get_prompt_components(data)

You might also like