submission_id: nousresearch-meta-llama_4939_v61
developer_uid: end_to_end_test
best_of: 4
celo_rating: 1185.41
display_name: nousresearch-meta-llama_4939_v61
family_friendly_score: 0.0
formatter: {'memory_template': "{bot_name}'s Persona: {memory}\n####\n", 'prompt_template': '{prompt}\n<START>\n', 'bot_template': '{bot_name}: {message}\n', 'user_template': '{user_name}: {message}\n', 'response_template': '{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.0, 'top_p': 0.99, 'min_p': 0.1, 'top_k': 40, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n'], 'max_input_tokens': 512, 'best_of': 4, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
ineligible_reason: model is only for e2e test
is_internal_developer: True
language_model: NousResearch/Meta-Llama-3.1-8B-Instruct
latencies: [{'batch_size': 1, 'throughput': 1.026795401597768, 'latency_mean': 0.9738072383403779, 'latency_p50': 0.9779274463653564, 'latency_p90': 1.094647717475891}, {'batch_size': 5, 'throughput': 3.076657707485817, 'latency_mean': 1.609240289926529, 'latency_p50': 1.6065727472305298, 'latency_p90': 1.8021990299224853}, {'batch_size': 10, 'throughput': 4.1353577219297035, 'latency_mean': 2.397553449869156, 'latency_p50': 2.407555937767029, 'latency_p90': 2.6940777778625487}, {'batch_size': 15, 'throughput': 4.5044044545180855, 'latency_mean': 3.277079097032547, 'latency_p50': 3.2826091051101685, 'latency_p90': 3.7991820335388184}, {'batch_size': 20, 'throughput': 4.633745663904793, 'latency_mean': 4.247963181734085, 'latency_p50': 4.275354027748108, 'latency_p90': 4.838976311683655}, {'batch_size': 25, 'throughput': 4.667055875069499, 'latency_mean': 5.245150204896927, 'latency_p50': 5.247867107391357, 'latency_p90': 5.967671465873718}]
max_input_tokens: 512
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: NousResearch/Meta-Llama-
model_name: nousresearch-meta-llama_4939_v61
model_num_parameters: 8030261248.0
model_repo: NousResearch/Meta-Llama-3.1-8B-Instruct
model_size: 8B
num_battles: 12104
num_wins: 5113
ranking_group: single
status: torndown
submission_type: basic
throughput_3p7s: 4.64
timestamp: 2024-09-03T03:00:59+00:00
us_pacific_date: 2024-09-02
win_ratio: 0.42242233972240584
Download Preference Data
Resubmit model
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name nousresearch-meta-llama-4939-v61-mkmlizer
Waiting for job on nousresearch-meta-llama-4939-v61-mkmlizer to finish
nousresearch-meta-llama-4939-v61-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
nousresearch-meta-llama-4939-v61-mkmlizer: ║ _____ __ __ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ /___/ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ Version: 0.10.1 ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ https://mk1.ai ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ The license key for the current software has been verified as ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ belonging to: ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ Chai Research Corp. ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
nousresearch-meta-llama-4939-v61-mkmlizer: ║ ║
nousresearch-meta-llama-4939-v61-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
nousresearch-meta-llama-4939-v61-mkmlizer: Downloaded to shared memory in 46.300s
nousresearch-meta-llama-4939-v61-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmpmevrxxg5, device:0
nousresearch-meta-llama-4939-v61-mkmlizer: Saving flywheel model at /dev/shm/model_cache
nousresearch-meta-llama-4939-v61-mkmlizer: quantized model in 26.204s
nousresearch-meta-llama-4939-v61-mkmlizer: Processed model NousResearch/Meta-Llama-3.1-8B-Instruct in 72.505s
nousresearch-meta-llama-4939-v61-mkmlizer: creating bucket guanaco-mkml-models
nousresearch-meta-llama-4939-v61-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
nousresearch-meta-llama-4939-v61-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/nousresearch-meta-llama-4939-v61
nousresearch-meta-llama-4939-v61-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/nousresearch-meta-llama-4939-v61/config.json
nousresearch-meta-llama-4939-v61-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/nousresearch-meta-llama-4939-v61/special_tokens_map.json
nousresearch-meta-llama-4939-v61-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/nousresearch-meta-llama-4939-v61/tokenizer_config.json
nousresearch-meta-llama-4939-v61-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/nousresearch-meta-llama-4939-v61/tokenizer.json
nousresearch-meta-llama-4939-v61-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/nousresearch-meta-llama-4939-v61/flywheel_model.0.safetensors
nousresearch-meta-llama-4939-v61-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 2%|▏ | 5/291 [00:00<00:08, 34.64it/s] Loading 0: 5%|▍ | 14/291 [00:00<00:06, 45.13it/s] Loading 0: 8%|▊ | 22/291 [00:00<00:04, 56.89it/s] Loading 0: 10%|▉ | 29/291 [00:00<00:04, 52.97it/s] Loading 0: 12%|█▏ | 35/291 [00:00<00:04, 53.89it/s] Loading 0: 14%|█▍ | 41/291 [00:00<00:05, 48.61it/s] Loading 0: 17%|█▋ | 50/291 [00:00<00:04, 51.41it/s] Loading 0: 20%|██ | 59/291 [00:01<00:04, 52.10it/s] Loading 0: 23%|██▎ | 68/291 [00:01<00:04, 52.61it/s] Loading 0: 26%|██▌ | 75/291 [00:01<00:03, 55.00it/s] Loading 0: 28%|██▊ | 81/291 [00:01<00:03, 55.92it/s] Loading 0: 30%|██▉ | 87/291 [00:01<00:05, 34.08it/s] Loading 0: 32%|███▏ | 94/291 [00:02<00:04, 39.96it/s] Loading 0: 34%|███▍ | 100/291 [00:02<00:04, 41.73it/s] Loading 0: 36%|███▌ | 105/291 [00:02<00:04, 42.24it/s] Loading 0: 38%|███▊ | 112/291 [00:02<00:03, 48.11it/s] Loading 0: 41%|████ | 118/291 [00:02<00:03, 47.01it/s] Loading 0: 43%|████▎ | 124/291 [00:02<00:03, 49.12it/s] Loading 0: 45%|████▍ | 130/291 [00:02<00:03, 50.79it/s] Loading 0: 47%|████▋ | 136/291 [00:02<00:03, 49.63it/s] Loading 0: 49%|████▉ | 142/291 [00:02<00:02, 51.21it/s] Loading 0: 51%|█████ | 148/291 [00:03<00:02, 52.16it/s] Loading 0: 53%|█████▎ | 154/291 [00:03<00:02, 49.17it/s] Loading 0: 55%|█████▍ | 160/291 [00:03<00:02, 50.33it/s] Loading 0: 57%|█████▋ | 166/291 [00:03<00:02, 52.82it/s] Loading 0: 59%|█████▉ | 172/291 [00:03<00:02, 49.51it/s] Loading 0: 62%|██████▏ | 179/291 [00:03<00:02, 52.51it/s] Loading 0: 64%|██████▎ | 185/291 [00:03<00:02, 52.71it/s] Loading 0: 66%|██████▌ | 191/291 [00:04<00:03, 33.03it/s] Loading 0: 67%|██████▋ | 196/291 [00:04<00:02, 34.10it/s] Loading 0: 69%|██████▉ | 202/291 [00:04<00:02, 37.94it/s] Loading 0: 71%|███████ | 207/291 [00:04<00:02, 40.53it/s] Loading 0: 73%|███████▎ | 212/291 [00:04<00:02, 37.41it/s] Loading 0: 76%|███████▌ | 220/291 [00:04<00:01, 46.52it/s] Loading 0: 78%|███████▊ | 226/291 [00:04<00:01, 46.25it/s] Loading 0: 79%|███████▉ | 231/291 [00:04<00:01, 44.52it/s] Loading 0: 82%|████████▏ | 238/291 [00:05<00:01, 49.20it/s] Loading 0: 84%|████████▍ | 244/291 [00:05<00:01, 45.90it/s] Loading 0: 86%|████████▌ | 249/291 [00:05<00:00, 45.00it/s] Loading 0: 88%|████████▊ | 256/291 [00:05<00:00, 49.62it/s] Loading 0: 90%|█████████ | 262/291 [00:05<00:00, 46.32it/s] Loading 0: 92%|█████████▏| 267/291 [00:05<00:00, 44.51it/s] Loading 0: 94%|█████████▍| 273/291 [00:05<00:00, 47.37it/s] Loading 0: 96%|█████████▌| 278/291 [00:05<00:00, 47.50it/s] Loading 0: 97%|█████████▋| 283/291 [00:06<00:00, 43.44it/s] Loading 0: 99%|█████████▉| 288/291 [00:11<00:00, 3.09it/s]
Job nousresearch-meta-llama-4939-v61-mkmlizer completed after 99.49s with status: succeeded
Stopping job with name nousresearch-meta-llama-4939-v61-mkmlizer
Pipeline stage MKMLizer completed in 100.45s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.29s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service nousresearch-meta-llama-4939-v61
Waiting for inference service nousresearch-meta-llama-4939-v61 to be ready
Inference service nousresearch-meta-llama-4939-v61 ready after 152.49332809448242s
Pipeline stage MKMLDeployer completed in 153.56s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 8.004071950912476s
Received healthy response to inference request in 1.2482969760894775s
Received healthy response to inference request in 1.5721850395202637s
Received healthy response to inference request in 1.7663238048553467s
Received healthy response to inference request in 1.3705179691314697s
5 requests
0 failed requests
5th percentile: 1.272741174697876
10th percentile: 1.2971853733062744
20th percentile: 1.3460737705230712
30th percentile: 1.4108513832092284
40th percentile: 1.491518211364746
50th percentile: 1.5721850395202637
60th percentile: 1.6498405456542968
70th percentile: 1.72749605178833
80th percentile: 3.013873434066774
90th percentile: 5.508972692489625
95th percentile: 6.756522321701048
99th percentile: 7.75456202507019
mean time: 2.7922791481018066
Pipeline stage StressChecker completed in 16.13s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 6.13s
nousresearch-meta-llama_4939_v61 status is now deployed due to DeploymentManager action
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.13s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service nousresearch-meta-llama-4939-v61-profiler
Waiting for inference service nousresearch-meta-llama-4939-v61-profiler to be ready
Inference service nousresearch-meta-llama-4939-v61-profiler ready after 150.36352825164795s
Pipeline stage MKMLProfilerDeployer completed in 150.87s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/nousresearch-meta-ll81e44021b3ef0de99ed1f314152725da-deploccp84:/code/chaiverse_profiler_1725332943 --namespace tenant-chaiml-guanaco
kubectl exec -it nousresearch-meta-ll81e44021b3ef0de99ed1f314152725da-deploccp84 --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1725332943 && chmod +x profiles.py && python profiles.py profile --best_of_n 4 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 512 --output_tokens 64 --summary /code/chaiverse_profiler_1725332943/summary.json'
kubectl exec -it nousresearch-meta-ll81e44021b3ef0de99ed1f314152725da-deploccp84 --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1725332943/summary.json'
Pipeline stage MKMLProfilerRunner completed in 444.10s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service nousresearch-meta-llama-4939-v61-profiler is running
Tearing down inference service nousresearch-meta-llama-4939-v61-profiler
Service nousresearch-meta-llama-4939-v61-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 1.60s
nousresearch-meta-llama_4939_v61 status is now inactive due to auto deactivation removed underperforming models
nousresearch-meta-llama_4939_v61 status is now torndown due to DeploymentManager action