submission_id: undi95-meta-llama-3-70b_6209_v30
developer_uid: Jellywibble
alignment_samples: 0
best_of: 2
celo_rating: 1212.68
display_name: meta-llama-1500k
formatter: {'memory_template': "<|im_start|>system\n{bot_name}'s Persona: {memory}<|im_end|>\n", 'prompt_template': '<|im_start|>system\n{prompt}<|im_end|>\n', 'bot_template': '<|im_start|>assistant\n{bot_name}: {message}<|im_end|>\n', 'user_template': '<|im_start|>user\n{user_name}: {message}<|im_end|>\n', 'response_template': '<|im_start|>assistant\n{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.0, 'top_p': 1.0, 'min_p': 0.0, 'top_k': 50, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n', '<|im_end|>', '<|im_start|>', '\n\n'], 'max_input_tokens': 1500, 'best_of': 2, 'max_output_tokens': 64, 'reward_max_token_input': 256}
is_internal_developer: True
language_model: Undi95/Meta-Llama-3-70B-Instruct-hf
max_input_tokens: 1500
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: Undi95/Meta-Llama-3-70B-
model_name: meta-llama-1500k
model_num_parameters: 70553706496.0
model_repo: Undi95/Meta-Llama-3-70B-Instruct-hf
model_size: 71B
num_battles: 177504
num_wins: 85959
propriety_score: 0.7321251241310824
propriety_total_count: 16112.0
ranking_group: single
reward_formatter: {'bot_template': '{bot_name}: {message}\n', 'memory_template': "''", 'prompt_template': "''", 'response_template': '{bot_name}:', 'truncate_by_message': False, 'user_template': '{user_name}: {message}\n'}
reward_repo: ChaiML/gpt2_xl_pairwise_89m_step_347634
status: torndown
submission_type: basic
timestamp: 2024-07-25T17:49:13+00:00
us_pacific_date: 2024-07-25
win_ratio: 0.4842651433207139
Resubmit model