submission_id: cognitivecomputations-do_9214_v1
developer_uid: Azazelle
alignment_samples: 0
best_of: 4
celo_rating: 1082.67
display_name: dolphin-2_9_3-mistral-nemo-12b
formatter: {'memory_template': '<|im_start|>system\n{memory}<|im_end|>\n', 'prompt_template': '<|im_start|>user\n{prompt}<|im_end|>\n', 'bot_template': '<|im_start|>assistant\n{bot_name}: {message}<|im_end|>\n', 'user_template': '<|im_start|>user\n{user_name}: {message}<|im_end|>\n', 'response_template': '<|im_start|>assistant\n{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 0.7, 'top_p': 1.0, 'min_p': 0.07, 'top_k': 1024, 'presence_penalty': 0.03, 'frequency_penalty': 0.01, 'stopping_words': ['\n', '<|im_end|>'], 'max_input_tokens': 512, 'best_of': 4, 'max_output_tokens': 64, 'reward_max_token_input': 1024}
is_internal_developer: False
language_model: cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b
max_input_tokens: 512
max_output_tokens: 64
model_architecture: MistralForCausalLM
model_group: cognitivecomputations/do
model_name: dolphin-2_9_3-mistral-nemo-12b
model_num_parameters: 12772090880.0
model_repo: cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b
model_size: 13B
num_battles: 13036
num_wins: 4457
propriety_score: 0.7294573643410853
propriety_total_count: 1290.0
ranking_group: single
reward_formatter: {'bot_template': '{bot_name}: {message}\\n', 'memory_template': "{bot_name}'s Persona: {memory}\\n####\\n", 'prompt_template': '{prompt}\\n<START>\\n', 'response_template': '{bot_name}:', 'truncate_by_message': False, 'user_template': '{user_name}: {message}\\n'}
reward_repo: ChaiML/reward_gpt2_medium_preference_24m_e2
status: torndown
submission_type: basic
timestamp: 2024-07-28T18:51:09+00:00
us_pacific_date: 2024-07-28
win_ratio: 0.3418993556305615
Resubmit model