--- model_url: https://huggingface.co/openaccess-ai-collective/minotaur-13b typer: delay: 0.1 runpod: endpoint_id: bibqwcb2lynbsb prefer_async: true llm: top_k: 40 top_p: 0.9 temperature: 0.8 repetition_penalty: last_n_tokens: seed: -1 batch_size: 8 threads: -1 stop: - "" queue: max_size: 16 concurrency_count: 3 # recommend setting this no larger than your current