submission_id: nousresearch-meta-llama-3-8b_v22
developer_uid: chai_backend_admin
alignment_samples: 10358
alignment_score: -0.8764693421297216
best_of: 2
celo_rating: 1150.8
display_name: nousresearch-meta-llama-3-8b_v22
formatter: {'memory_template': "{bot_name}'s Persona: {memory}\n####\n", 'prompt_template': '{prompt}\n<START>\n', 'bot_template': '{bot_name}: {message}\n', 'user_template': '{user_name}: {message}\n', 'response_template': '{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.0, 'top_p': 0.99, 'min_p': 0.1, 'top_k': 40, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n'], 'max_input_tokens': 512, 'best_of': 2, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: True
language_model: NousResearch/Meta-Llama-3-8B
latencies: [{'batch_size': 1, 'throughput': 1.0351619700666135, 'latency_mean': 0.9659505844116211, 'latency_p50': 0.9741024971008301, 'latency_p90': 1.078101110458374}, {'batch_size': 5, 'throughput': 3.3509825596715883, 'latency_mean': 1.4809125995635986, 'latency_p50': 1.4855740070343018, 'latency_p90': 1.6692485332489013}, {'batch_size': 10, 'throughput': 4.863511765340669, 'latency_mean': 2.0427065670490263, 'latency_p50': 2.0120996236801147, 'latency_p90': 2.311907982826233}, {'batch_size': 15, 'throughput': 5.535853672518951, 'latency_mean': 2.664359735250473, 'latency_p50': 2.663399577140808, 'latency_p90': 2.999755096435547}, {'batch_size': 20, 'throughput': 5.9393146435629065, 'latency_mean': 3.3125356566905975, 'latency_p50': 3.3143640756607056, 'latency_p90': 3.7996168613433836}, {'batch_size': 25, 'throughput': 6.238818186333269, 'latency_mean': 3.9218257224559783, 'latency_p50': 3.884913444519043, 'latency_p90': 4.610813546180725}, {'batch_size': 30, 'throughput': 6.304347523838465, 'latency_mean': 4.636166912317276, 'latency_p50': 4.621354460716248, 'latency_p90': 5.44654335975647}, {'batch_size': 35, 'throughput': 6.380837916115832, 'latency_mean': 5.334252271652222, 'latency_p50': 5.338291645050049, 'latency_p90': 6.105378127098083}]
max_input_tokens: 512
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: NousResearch/Meta-Llama-
model_name: nousresearch-meta-llama-3-8b_v22
model_num_parameters: 8030261248.0
model_repo: NousResearch/Meta-Llama-3-8B
model_size: 8B
num_battles: 10358
num_wins: 3898
propriety_score: 0.7465388711395101
propriety_total_count: 939.0
ranking_group: single
status: inactive
submission_type: basic
throughput_3p7s: 6.26
timestamp: 2024-09-07T02:49:35+00:00
us_pacific_date: 2024-09-06
win_ratio: 0.3763274763467851
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name nousresearch-meta-llama-3-8b-v22-mkmlizer
Waiting for job on nousresearch-meta-llama-3-8b-v22-mkmlizer to finish
Failed to get response for submission blend_nemek_2024-08-22: ('http://chaiml-llama-8b-pairwis-8189-v19-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:46860->127.0.0.1:8080: read: connection reset by peer\n')
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
nousresearch-meta-llama-3-8b-v22-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ _____ __ __ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ /___/ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ Version: 0.10.1 ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ https://mk1.ai ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ The license key for the current software has been verified as ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ belonging to: ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ Chai Research Corp. ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ║ ║
nousresearch-meta-llama-3-8b-v22-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
nousresearch-meta-llama-3-8b-v22-mkmlizer: Downloaded to shared memory in 28.159s
nousresearch-meta-llama-3-8b-v22-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmpv97nrxvx, device:0
nousresearch-meta-llama-3-8b-v22-mkmlizer: Saving flywheel model at /dev/shm/model_cache
nousresearch-meta-llama-3-8b-v22-mkmlizer: quantized model in 26.565s
nousresearch-meta-llama-3-8b-v22-mkmlizer: Processed model NousResearch/Meta-Llama-3-8B in 54.725s
nousresearch-meta-llama-3-8b-v22-mkmlizer: creating bucket guanaco-mkml-models
nousresearch-meta-llama-3-8b-v22-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
nousresearch-meta-llama-3-8b-v22-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/nousresearch-meta-llama-3-8b-v22
nousresearch-meta-llama-3-8b-v22-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/nousresearch-meta-llama-3-8b-v22/config.json
nousresearch-meta-llama-3-8b-v22-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/nousresearch-meta-llama-3-8b-v22/special_tokens_map.json
nousresearch-meta-llama-3-8b-v22-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/nousresearch-meta-llama-3-8b-v22/tokenizer_config.json
nousresearch-meta-llama-3-8b-v22-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/nousresearch-meta-llama-3-8b-v22/tokenizer.json
nousresearch-meta-llama-3-8b-v22-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/nousresearch-meta-llama-3-8b-v22/flywheel_model.0.safetensors
nousresearch-meta-llama-3-8b-v22-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 2%|▏ | 5/291 [00:00<00:07, 37.40it/s] Loading 0: 5%|▍ | 14/291 [00:00<00:05, 47.89it/s] Loading 0: 8%|▊ | 23/291 [00:00<00:05, 50.62it/s] Loading 0: 11%|█ | 32/291 [00:00<00:04, 52.73it/s] Loading 0: 14%|█▍ | 41/291 [00:00<00:04, 52.23it/s] Loading 0: 17%|█▋ | 49/291 [00:00<00:04, 58.52it/s] Loading 0: 19%|█▉ | 56/291 [00:01<00:04, 55.04it/s] Loading 0: 21%|██▏ | 62/291 [00:01<00:04, 54.42it/s] Loading 0: 23%|██▎ | 68/291 [00:01<00:04, 46.25it/s] Loading 0: 26%|██▌ | 75/291 [00:01<00:04, 50.71it/s] Loading 0: 28%|██▊ | 81/291 [00:01<00:04, 51.05it/s] Loading 0: 30%|██▉ | 87/291 [00:01<00:06, 31.61it/s] Loading 0: 32%|███▏ | 94/291 [00:02<00:05, 37.46it/s] Loading 0: 34%|███▍ | 99/291 [00:02<00:04, 39.65it/s] Loading 0: 36%|███▌ | 104/291 [00:02<00:05, 35.54it/s] Loading 0: 38%|███▊ | 112/291 [00:02<00:04, 43.45it/s] Loading 0: 41%|████ | 118/291 [00:02<00:04, 42.71it/s] Loading 0: 42%|████▏ | 123/291 [00:02<00:03, 42.92it/s] Loading 0: 45%|████▍ | 130/291 [00:02<00:03, 48.51it/s] Loading 0: 47%|████▋ | 136/291 [00:02<00:03, 45.78it/s] Loading 0: 48%|████▊ | 141/291 [00:03<00:03, 44.59it/s] Loading 0: 51%|█████ | 148/291 [00:03<00:02, 50.53it/s] Loading 0: 53%|█████▎ | 154/291 [00:03<00:02, 47.06it/s] Loading 0: 55%|█████▍ | 159/291 [00:03<00:02, 45.42it/s] Loading 0: 57%|█████▋ | 165/291 [00:03<00:02, 47.47it/s] Loading 0: 58%|█████▊ | 170/291 [00:03<00:02, 46.58it/s] Loading 0: 60%|██████ | 176/291 [00:03<00:02, 49.81it/s] Loading 0: 63%|██████▎ | 182/291 [00:04<00:02, 41.24it/s] Loading 0: 64%|██████▍ | 187/291 [00:04<00:03, 31.58it/s] Loading 0: 66%|██████▌ | 191/291 [00:04<00:03, 32.63it/s] Loading 0: 67%|██████▋ | 195/291 [00:04<00:02, 33.02it/s] Loading 0: 69%|██████▉ | 202/291 [00:04<00:02, 40.97it/s] Loading 0: 71%|███████▏ | 208/291 [00:04<00:01, 41.56it/s] Loading 0: 73%|███████▎ | 213/291 [00:04<00:01, 42.07it/s] Loading 0: 76%|███████▌ | 220/291 [00:04<00:01, 48.63it/s] Loading 0: 78%|███████▊ | 226/291 [00:05<00:01, 46.96it/s] Loading 0: 79%|███████▉ | 231/291 [00:05<00:01, 46.09it/s] Loading 0: 82%|████████▏ | 238/291 [00:05<00:01, 50.77it/s] Loading 0: 84%|████████▍ | 244/291 [00:05<00:00, 47.40it/s] Loading 0: 86%|████████▌ | 249/291 [00:05<00:00, 44.43it/s] Loading 0: 88%|████████▊ | 255/291 [00:05<00:00, 47.51it/s] Loading 0: 89%|████████▉ | 260/291 [00:05<00:00, 45.35it/s] Loading 0: 91%|█████████ | 265/291 [00:05<00:00, 45.97it/s] Loading 0: 93%|█████████▎| 271/291 [00:06<00:00, 43.97it/s] Loading 0: 95%|█████████▍| 276/291 [00:06<00:00, 39.48it/s] Loading 0: 97%|█████████▋| 282/291 [00:06<00:00, 38.19it/s] Loading 0: 99%|█████████▊| 287/291 [00:11<00:01, 3.15it/s]
Job nousresearch-meta-llama-3-8b-v22-mkmlizer completed after 125.89s with status: succeeded
Stopping job with name nousresearch-meta-llama-3-8b-v22-mkmlizer
Pipeline stage MKMLizer completed in 127.72s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.29s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service nousresearch-meta-llama-3-8b-v22
Waiting for inference service nousresearch-meta-llama-3-8b-v22 to be ready
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Inference service nousresearch-meta-llama-3-8b-v22 ready after 151.02107524871826s
Pipeline stage MKMLDeployer completed in 151.49s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 2.674750566482544s
Received healthy response to inference request in 0.8840882778167725s
Received healthy response to inference request in 1.8782949447631836s
Received healthy response to inference request in 1.3544738292694092s
Received healthy response to inference request in 1.3602044582366943s
5 requests
0 failed requests
5th percentile: 0.9781653881072998
10th percentile: 1.0722424983978271
20th percentile: 1.2603967189788818
30th percentile: 1.3556199550628663
40th percentile: 1.3579122066497802
50th percentile: 1.3602044582366943
60th percentile: 1.56744065284729
70th percentile: 1.7746768474578856
80th percentile: 2.0375860691070558
90th percentile: 2.3561683177947996
95th percentile: 2.515459442138672
99th percentile: 2.6428923416137695
mean time: 1.6303624153137206
Pipeline stage StressChecker completed in 8.86s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.71s
Shutdown handler de-registered
nousresearch-meta-llama-3-8b_v22 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.63s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service nousresearch-meta-llama-3-8b-v22-profiler
Waiting for inference service nousresearch-meta-llama-3-8b-v22-profiler to be ready
Inference service nousresearch-meta-llama-3-8b-v22-profiler ready after 150.3404016494751s
Pipeline stage MKMLProfilerDeployer completed in 150.68s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/nousresearch-meta-ll79553ff42fde4b11b9d082a88b708005-deplovkfj5:/code/chaiverse_profiler_1725677856 --namespace tenant-chaiml-guanaco
kubectl exec -it nousresearch-meta-ll79553ff42fde4b11b9d082a88b708005-deplovkfj5 --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1725677856 && python profiles.py profile --best_of_n 2 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 512 --output_tokens 64 --summary /code/chaiverse_profiler_1725677856/summary.json'
kubectl exec -it nousresearch-meta-ll79553ff42fde4b11b9d082a88b708005-deplovkfj5 --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1725677856/summary.json'
Pipeline stage MKMLProfilerRunner completed in 466.07s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service nousresearch-meta-llama-3-8b-v22-profiler is running
Tearing down inference service nousresearch-meta-llama-3-8b-v22-profiler
Service nousresearch-meta-llama-3-8b-v22-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 2.64s
Shutdown handler de-registered
nousresearch-meta-llama-3-8b_v22 status is now inactive due to auto deactivation removed underperforming models

Usage Metrics

Latency Metrics