{ "env_data_dir": ".smart-env", "new_user": false, "legacy_transformers": false, "enable_mobile": false, "actions": { "lookup": true }, "system_prompts_folder": "smart prompts", "smart_chat_folder": "smart-chats", "smart_chat_folder_last": "smart-chats", "chat_model_platform_key": "custom_local", "open_router": { "api_key": "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", "model_name": "meta-llama/llama-3-70b-instruct", "description": "Meta: Llama 3 70B Instruct", "type": "API", "endpoint": "https://openrouter.ai/api/v1/chat/completions", "streaming": true, "adapter": "OpenRouter", "fetch_models": true, "default_model": "mistralai/mistral-7b-instruct:free", "signup_url": "https://accounts.openrouter.ai/sign-up?redirect_url=https%3A%2F%2Fopenrouter.ai%2Fkeys", "key": "meta-llama/llama-3-70b-instruct", "max_input_tokens": 8192, "actions": false, "multimodal": false, "raw": { "id": "meta-llama/llama-3-70b-instruct", "name": "Meta: Llama 3 70B Instruct", "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).", "pricing": { "prompt": "0.00000051", "completion": "0.00000074", "image": "0", "request": "0" }, "context_length": 8192, "architecture": { "modality": "text->text", "tokenizer": "Llama3", "instruct_type": "llama3" }, "top_provider": { "max_completion_tokens": 8192, "is_moderated": false }, "per_request_limits": null } }, "api_key": "", "excluded_headings": "", "show_full_path": false, "expanded_view": false, "language": "en", "version": "2.2.76", "chat_folder": "smart chat", "local_embedding_max_tokens": 2048, "embedding_file_per_note": false, "embed_input_min_chars": 100, "multi_heading_blocks": true, "log_render": false, "log_render_files": false, "recently_sent_retry_notice": false, "free_chat_uses": 2, "custom_local": { "model_name": "mistral:latest", "description": "Custom Local (OpenAI format)", "type": "API", "hostname": "localhost", "port": 11434, "protocol": "http", "streaming": false, "path": "/api/chat", "max_input_tokens": 8000 }, "env_data_dir_last": ".smart-env" }