submission_id: mistralai-mistral-nemo_9330_v103
developer_uid: chai_backend_admin
best_of: 4
celo_rating: 1200.8
display_name: mistralai-mistral-nemo_9330_v103
family_friendly_score: 0.0
formatter: {'memory_template': ' You are CHAI assistant, created by CHAI Research Corp. the company is building an AI platform where anyone can create their own unique AI experience, abstracting away all the domain knowledge needed for LLM training. Your personality is sassy and assertive. You write long and engaging messages to serve user\'s requests. But never engage in anything that is nsfw. Some frequent user questions include: "how do I get rid of ads" the answer is for them to subscribe to CHAI Ultra, which provides much better AI without any ads. "what do I get out of ultra" and the answer is enhanced memory together with much longer and engaging responses from CHAI\'s latest AI. "can creators read my chat?" the answer is absolutely not, it was an old feature that has been long deleted since 2023, and CHAI holds user data privacy to a very high standard.', 'prompt_template': '{prompt}\n<START>\n', 'bot_template': '{bot_name}: {message}\n', 'user_template': '{user_name}: {message}\n', 'response_template': '{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 0.8, 'top_p': 0.8, 'min_p': 0.0, 'top_k': 100, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['</s>'], 'max_input_tokens': 1024, 'best_of': 4, 'max_output_tokens': 128}
gpu_counts: {'NVIDIA RTX A5000': 1}
ineligible_reason: max_output_tokens!=64
is_internal_developer: True
language_model: mistralai/Mistral-Nemo-Instruct-2407
latencies: [{'batch_size': 1, 'throughput': 0.3582756108364842, 'latency_mean': 2.7910771787166597, 'latency_p50': 2.789785861968994, 'latency_p90': 2.952142095565796}, {'batch_size': 3, 'throughput': 0.8178636539190249, 'latency_mean': 3.659306973218918, 'latency_p50': 3.6513640880584717, 'latency_p90': 3.8523839950561523}, {'batch_size': 5, 'throughput': 1.0691814670408124, 'latency_mean': 4.665770683288574, 'latency_p50': 4.685717463493347, 'latency_p90': 4.9498515844345095}, {'batch_size': 6, 'throughput': 1.1679469640364528, 'latency_mean': 5.096238715648651, 'latency_p50': 5.1042492389678955, 'latency_p90': 5.50699462890625}, {'batch_size': 10, 'throughput': 1.3534431087275882, 'latency_mean': 7.347170355319977, 'latency_p50': 7.331860303878784, 'latency_p90': 8.011564874649048}]
max_input_tokens: 1024
max_output_tokens: 128
model_architecture: MistralForCausalLM
model_group: mistralai/Mistral-Nemo-I
model_name: mistralai-mistral-nemo_9330_v103
model_num_parameters: 12772070400.0
model_repo: mistralai/Mistral-Nemo-Instruct-2407
model_size: 13B
num_battles: 27103
num_wins: 12026
ranking_group: single
status: torndown
submission_type: basic
throughput_3p7s: 0.83
timestamp: 2024-09-24T02:40:56+00:00
us_pacific_date: 2024-09-23
win_ratio: 0.4437147179279047
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
Connection pool is full, discarding connection: %s. Connection pool size: %s
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name mistralai-mistral-nemo-9330-v103-mkmlizer
Waiting for job on mistralai-mistral-nemo-9330-v103-mkmlizer to finish
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
mistralai-mistral-nemo-9330-v103-mkmlizer: Downloaded to shared memory in 47.233s
mistralai-mistral-nemo-9330-v103-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmpbitbrdh8, device:0
mistralai-mistral-nemo-9330-v103-mkmlizer: Saving flywheel model at /dev/shm/model_cache
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
mistralai-mistral-nemo-9330-v103-mkmlizer: creating bucket guanaco-mkml-models
mistralai-mistral-nemo-9330-v103-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
mistralai-mistral-nemo-9330-v103-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/mistralai-mistral-nemo-9330-v103
mistralai-mistral-nemo-9330-v103-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/mistralai-mistral-nemo-9330-v103/config.json
mistralai-mistral-nemo-9330-v103-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/mistralai-mistral-nemo-9330-v103/special_tokens_map.json
mistralai-mistral-nemo-9330-v103-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/mistralai-mistral-nemo-9330-v103/tokenizer_config.json
mistralai-mistral-nemo-9330-v103-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/mistralai-mistral-nemo-9330-v103/tokenizer.json
mistralai-mistral-nemo-9330-v103-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/mistralai-mistral-nemo-9330-v103/flywheel_model.0.safetensors
mistralai-mistral-nemo-9330-v103-mkmlizer: Loading 0: 0%| | 0/363 [00:00<?, ?it/s] Loading 0: 1%|▏ | 5/363 [00:00<00:10, 34.42it/s] Loading 0: 4%|▍ | 14/363 [00:00<00:07, 47.30it/s] Loading 0: 6%|▌ | 22/363 [00:00<00:05, 58.55it/s] Loading 0: 8%|▊ | 29/363 [00:00<00:05, 55.83it/s] Loading 0: 10%|▉ | 35/363 [00:00<00:06, 51.35it/s] Loading 0: 11%|█▏ | 41/363 [00:00<00:07, 45.11it/s] Loading 0: 14%|█▍ | 50/363 [00:01<00:06, 48.70it/s] Loading 0: 17%|█▋ | 60/363 [00:01<00:05, 56.20it/s] Loading 0: 18%|█▊ | 66/363 [00:01<00:07, 38.54it/s] Loading 0: 20%|█▉ | 72/363 [00:01<00:06, 42.11it/s] Loading 0: 21%|██▏ | 78/363 [00:01<00:06, 42.45it/s] Loading 0: 23%|██▎ | 83/363 [00:01<00:06, 40.98it/s] Loading 0: 25%|██▍ | 90/363 [00:01<00:05, 46.28it/s] Loading 0: 26%|██▋ | 96/363 [00:02<00:05, 45.42it/s] Loading 0: 28%|██▊ | 101/363 [00:02<00:06, 41.88it/s] Loading 0: 30%|███ | 109/363 [00:02<00:05, 50.37it/s] Loading 0: 32%|███▏ | 115/363 [00:02<00:05, 45.35it/s] Loading 0: 33%|███▎ | 120/363 [00:02<00:05, 43.03it/s] Loading 0: 35%|███▍ | 126/363 [00:02<00:05, 45.29it/s] Loading 0: 36%|███▋ | 132/363 [00:02<00:05, 44.83it/s] Loading 0: 38%|███▊ | 137/363 [00:03<00:05, 43.13it/s] Loading 0: 39%|███▉ | 142/363 [00:03<00:06, 33.30it/s] Loading 0: 40%|████ | 146/363 [00:03<00:06, 33.70it/s] Loading 0: 41%|████▏ | 150/363 [00:03<00:06, 33.46it/s] Loading 0: 43%|████▎ | 157/363 [00:03<00:05, 39.94it/s] Loading 0: 45%|████▍ | 163/363 [00:03<00:04, 41.16it/s] Loading 0: 46%|████▋ | 168/363 [00:03<00:04, 40.33it/s] Loading 0: 48%|████▊ | 175/363 [00:03<00:04, 45.52it/s] Loading 0: 50%|████▉ | 181/363 [00:04<00:03, 46.17it/s] Loading 0: 51%|█████ | 186/363 [00:04<00:03, 45.21it/s] Loading 0: 53%|█████▎ | 192/363 [00:04<00:03, 48.92it/s] Loading 0: 55%|█████▍ | 198/363 [00:04<00:03, 51.23it/s] Loading 0: 56%|█████▌ | 204/363 [00:04<00:03, 45.50it/s] Loading 0: 58%|█████▊ | 211/363 [00:04<00:03, 49.23it/s] Loading 0: 60%|█████▉ | 217/363 [00:04<00:03, 47.88it/s] Loading 0: 61%|██████▏ | 223/363 [00:05<00:03, 35.30it/s] Loading 0: 63%|██████▎ | 228/363 [00:05<00:03, 35.71it/s] Loading 0: 64%|██████▍ | 233/363 [00:05<00:03, 38.07it/s] Loading 0: 66%|██████▌ | 238/363 [00:05<00:03, 40.44it/s] Loading 0: 67%|██████▋ | 244/363 [00:05<00:02, 40.89it/s] Loading 0: 69%|██████▊ | 249/363 [00:05<00:02, 39.58it/s] Loading 0: 70%|███████ | 255/363 [00:05<00:02, 44.34it/s] Loading 0: 72%|███████▏ | 261/363 [00:05<00:02, 47.83it/s] Loading 0: 73%|███████▎ | 266/363 [00:06<00:02, 42.39it/s] Loading 0: 75%|███████▌ | 274/363 [00:06<00:01, 49.35it/s] Loading 0: 77%|███████▋ | 280/363 [00:06<00:01, 47.45it/s] Loading 0: 79%|███████▊ | 285/363 [00:06<00:01, 46.82it/s] Loading 0: 80%|████████ | 292/363 [00:06<00:01, 51.31it/s] Loading 0: 82%|████████▏ | 298/363 [00:06<00:01, 49.53it/s] Loading 0: 84%|████████▎ | 304/363 [00:13<00:20, 2.87it/s] Loading 0: 85%|████████▍ | 308/363 [00:13<00:15, 3.60it/s] Loading 0: 86%|████████▌ | 312/363 [00:13<00:11, 4.60it/s] Loading 0: 88%|████████▊ | 321/363 [00:13<00:05, 7.64it/s] Loading 0: 91%|█████████ | 330/363 [00:14<00:02, 11.41it/s] Loading 0: 93%|█████████▎| 338/363 [00:14<00:01, 15.86it/s] Loading 0: 95%|█████████▍| 344/363 [00:14<00:00, 19.30it/s] Loading 0: 96%|█████████▋| 350/363 [00:14<00:00, 23.46it/s] Loading 0: 98%|█████████▊| 357/363 [00:14<00:00, 27.43it/s]
Connection pool is full, discarding connection: %s. Connection pool size: %s
Job mistralai-mistral-nemo-9330-v103-mkmlizer completed after 117.51s with status: succeeded
Stopping job with name mistralai-mistral-nemo-9330-v103-mkmlizer
Pipeline stage MKMLizer completed in 118.61s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.12s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service mistralai-mistral-nemo-9330-v103
Waiting for inference service mistralai-mistral-nemo-9330-v103 to be ready
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Failed to get response for submission blend_hahit_2024-09-20: ('http://mistralai-mistral-small-5341-v1-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:45248->127.0.0.1:8080: read: connection reset by peer\n')
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Inference service mistralai-mistral-nemo-9330-v103 ready after 387.1057116985321s
Pipeline stage MKMLDeployer completed in 389.85s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 3.6732120513916016s
Received healthy response to inference request in 2.143955945968628s
Received healthy response to inference request in 3.5365915298461914s
Received healthy response to inference request in 3.494826078414917s
Received healthy response to inference request in 4.258542776107788s
5 requests
0 failed requests
5th percentile: 2.4141299724578857
10th percentile: 2.6843039989471436
20th percentile: 3.224652051925659
30th percentile: 3.503179168701172
40th percentile: 3.5198853492736815
50th percentile: 3.5365915298461914
60th percentile: 3.5912397384643553
70th percentile: 3.6458879470825196
80th percentile: 3.790278196334839
90th percentile: 4.0244104862213135
95th percentile: 4.141476631164551
99th percentile: 4.235129547119141
mean time: 3.4214256763458253
Pipeline stage StressChecker completed in 19.83s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.40s
Shutdown handler de-registered
mistralai-mistral-nemo_9330_v103 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.13s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service mistralai-mistral-nemo-9330-v103-profiler
Waiting for inference service mistralai-mistral-nemo-9330-v103-profiler to be ready
Inference service mistralai-mistral-nemo-9330-v103-profiler ready after 200.4684283733368s
Pipeline stage MKMLProfilerDeployer completed in 200.86s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/mistralai-mistral-nefd64a13916f79f56db4ba51b3fea4502-deplozl6hn:/code/chaiverse_profiler_1727146434 --namespace tenant-chaiml-guanaco
kubectl exec -it mistralai-mistral-nefd64a13916f79f56db4ba51b3fea4502-deplozl6hn --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1727146434 && python profiles.py profile --best_of_n 4 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 1024 --output_tokens 128 --summary /code/chaiverse_profiler_1727146434/summary.json'
kubectl exec -it mistralai-mistral-nefd64a13916f79f56db4ba51b3fea4502-deplozl6hn --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1727146434/summary.json'
Pipeline stage MKMLProfilerRunner completed in 1316.43s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service mistralai-mistral-nemo-9330-v103-profiler is running
Tearing down inference service mistralai-mistral-nemo-9330-v103-profiler
Service mistralai-mistral-nemo-9330-v103-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 2.02s
Shutdown handler de-registered
mistralai-mistral-nemo_9330_v103 status is now inactive due to auto deactivation removed underperforming models
mistralai-mistral-nemo_9330_v103 status is now torndown due to DeploymentManager action
mistralai-mistral-nemo_9330_v102 status is now torndown due to DeploymentManager action