###################### OpenHands Configuration Example ###################### | |
# | |
# All settings have default values, so you only need to uncomment and | |
# modify what you want to change | |
# The fields within each section are sorted in alphabetical order. | |
# | |
############################################################################## | |
#################################### Core #################################### | |
# General core configurations | |
############################################################################## | |
[core] | |
# API key for E2B | |
#e2b_api_key = "" | |
# API key for Modal | |
#modal_api_token_id = "" | |
#modal_api_token_secret = "" | |
# Base path for the workspace | |
workspace_base = "./workspace" | |
# Cache directory path | |
#cache_dir = "/tmp/cache" | |
# Reasoning effort for o1 models (low, medium, high, or not set) | |
#reasoning_effort = "medium" | |
# Debugging enabled | |
#debug = false | |
# Disable color in terminal output | |
#disable_color = false | |
# Enable saving and restoring the session when run from CLI | |
#enable_cli_session = false | |
# Path to store trajectories, can be a folder or a file | |
# If it's a folder, the session id will be used as the file name | |
#save_trajectory_path="./trajectories" | |
# Path to replay a trajectory, must be a file path | |
# If provided, trajectory will be loaded and replayed before the | |
# agent responds to any user instruction | |
#replay_trajectory_path = "" | |
# File store path | |
#file_store_path = "/tmp/file_store" | |
# File store type | |
#file_store = "memory" | |
# List of allowed file extensions for uploads | |
#file_uploads_allowed_extensions = [".*"] | |
# Maximum file size for uploads, in megabytes | |
#file_uploads_max_file_size_mb = 0 | |
# Maximum budget per task, 0.0 means no limit | |
#max_budget_per_task = 0.0 | |
# Maximum number of iterations | |
#max_iterations = 100 | |
# Path to mount the workspace in the sandbox | |
#workspace_mount_path_in_sandbox = "/workspace" | |
# Path to mount the workspace | |
#workspace_mount_path = "" | |
# Path to rewrite the workspace mount path to | |
#workspace_mount_rewrite = "" | |
# Run as openhands | |
#run_as_openhands = true | |
# Runtime environment | |
#runtime = "eventstream" | |
# Name of the default agent | |
#default_agent = "CodeActAgent" | |
# JWT secret for authentication | |
#jwt_secret = "" | |
# Restrict file types for file uploads | |
#file_uploads_restrict_file_types = false | |
# List of allowed file extensions for uploads | |
#file_uploads_allowed_extensions = [".*"] | |
#################################### LLM ##################################### | |
# Configuration for LLM models (group name starts with 'llm') | |
# use 'llm' for the default LLM config | |
############################################################################## | |
[llm] | |
# AWS access key ID | |
#aws_access_key_id = "" | |
# AWS region name | |
#aws_region_name = "" | |
# AWS secret access key | |
#aws_secret_access_key = "" | |
# API key to use (For Headless / CLI only - In Web this is overridden by Session Init) | |
api_key = "your-api-key" | |
# API base URL (For Headless / CLI only - In Web this is overridden by Session Init) | |
#base_url = "" | |
# API version | |
#api_version = "" | |
# Cost per input token | |
#input_cost_per_token = 0.0 | |
# Cost per output token | |
#output_cost_per_token = 0.0 | |
# Custom LLM provider | |
#custom_llm_provider = "" | |
# Embedding API base URL | |
#embedding_base_url = "" | |
# Embedding deployment name | |
#embedding_deployment_name = "" | |
# Embedding model to use | |
embedding_model = "local" | |
# Maximum number of characters in an observation's content | |
#max_message_chars = 10000 | |
# Maximum number of input tokens | |
#max_input_tokens = 0 | |
# Maximum number of output tokens | |
#max_output_tokens = 0 | |
# Model to use. (For Headless / CLI only - In Web this is overridden by Session Init) | |
model = "gpt-4o" | |
# Number of retries to attempt when an operation fails with the LLM. | |
# Increase this value to allow more attempts before giving up | |
#num_retries = 8 | |
# Maximum wait time (in seconds) between retry attempts | |
# This caps the exponential backoff to prevent excessively long | |
#retry_max_wait = 120 | |
# Minimum wait time (in seconds) between retry attempts | |
# This sets the initial delay before the first retry | |
#retry_min_wait = 15 | |
# Multiplier for exponential backoff calculation | |
# The wait time increases by this factor after each failed attempt | |
# A value of 2.0 means each retry waits twice as long as the previous one | |
#retry_multiplier = 2.0 | |
# Drop any unmapped (unsupported) params without causing an exception | |
#drop_params = false | |
# Modify params for litellm to do transformations like adding a default message, when a message is empty. | |
# Note: this setting is global, unlike drop_params, it cannot be overridden in each call to litellm. | |
#modify_params = true | |
# Using the prompt caching feature if provided by the LLM and supported | |
#caching_prompt = true | |
# Base URL for the OLLAMA API | |
#ollama_base_url = "" | |
# Temperature for the API | |
#temperature = 0.0 | |
# Timeout for the API | |
#timeout = 0 | |
# Top p for the API | |
#top_p = 1.0 | |
# If model is vision capable, this option allows to disable image processing (useful for cost reduction). | |
#disable_vision = true | |
# Custom tokenizer to use for token counting | |
# https://docs.litellm.ai/docs/completion/token_usage | |
#custom_tokenizer = "" | |
# Whether to use native tool calling if supported by the model. Can be true, false, or None by default, which chooses the model's default behavior based on the evaluation. | |
# ATTENTION: Based on evaluation, enabling native function calling may lead to worse results | |
# in some scenarios. Use with caution and consider testing with your specific use case. | |
# https://github.com/All-Hands-AI/OpenHands/pull/4711 | |
#native_tool_calling = None | |
[llm.gpt4o-mini] | |
api_key = "your-api-key" | |
model = "gpt-4o" | |
#################################### Agent ################################### | |
# Configuration for agents (group name starts with 'agent') | |
# Use 'agent' for the default agent config | |
# otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g. | |
# agent.CodeActAgent | |
############################################################################## | |
[agent] | |
# whether the browsing tool is enabled | |
codeact_enable_browsing = true | |
# whether the LLM draft editor is enabled | |
codeact_enable_llm_editor = false | |
# whether the IPython tool is enabled | |
codeact_enable_jupyter = true | |
# Name of the micro agent to use for this agent | |
#micro_agent_name = "" | |
# Memory enabled | |
#memory_enabled = false | |
# Memory maximum threads | |
#memory_max_threads = 3 | |
# LLM config group to use | |
#llm_config = 'your-llm-config-group' | |
# Whether to use prompt extension (e.g., microagent, repo/runtime info) at all | |
#enable_prompt_extensions = true | |
# List of microagents to disable | |
#disabled_microagents = [] | |
[agent.RepoExplorerAgent] | |
# Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially | |
# useful when an agent doesn't demand high quality but uses a lot of tokens | |
llm_config = 'gpt3' | |
#################################### Sandbox ################################### | |
# Configuration for the sandbox | |
############################################################################## | |
[sandbox] | |
# Sandbox timeout in seconds | |
#timeout = 120 | |
# Sandbox user ID | |
#user_id = 1000 | |
# Container image to use for the sandbox | |
#base_container_image = "nikolaik/python-nodejs:python3.12-nodejs22" | |
# Use host network | |
#use_host_network = false | |
# runtime extra build args | |
#runtime_extra_build_args = ["--network=host", "--add-host=host.docker.internal:host-gateway"] | |
# Enable auto linting after editing | |
#enable_auto_lint = false | |
# Whether to initialize plugins | |
#initialize_plugins = true | |
# Extra dependencies to install in the runtime image | |
#runtime_extra_deps = "" | |
# Environment variables to set at the launch of the runtime | |
#runtime_startup_env_vars = {} | |
# BrowserGym environment to use for evaluation | |
#browsergym_eval_env = "" | |
#################################### Security ################################### | |
# Configuration for security features | |
############################################################################## | |
[security] | |
# Enable confirmation mode (For Headless / CLI only - In Web this is overridden by Session Init) | |
#confirmation_mode = false | |
# The security analyzer to use (For Headless / CLI only - In Web this is overridden by Session Init) | |
#security_analyzer = "" | |
#################################### Eval #################################### | |
# Configuration for the evaluation, please refer to the specific evaluation | |
# plugin for the available options | |
############################################################################## | |