submission_id: trace2333-mistral-trail9-1476_v2
developer_uid: Trace2333
alignment_samples: 12093
alignment_score: -0.4342335099783315
best_of: 8
celo_rating: 1247.27
display_name: trace2333-mistral-trail9-1476_v2
formatter: {'memory_template': "{bot_name}'s Persona: {memory}\n####\n", 'prompt_template': '{prompt}\n<START>\n', 'bot_template': '{bot_name}: {message}\n', 'user_template': '{user_name}: {message}\n', 'response_template': '{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.0, 'top_p': 1.0, 'min_p': 0.06, 'top_k': 80, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n', '</s>'], 'max_input_tokens': 512, 'best_of': 8, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: False
language_model: Trace2333/mistral_trail9_1476
latencies: [{'batch_size': 1, 'throughput': 0.6996644192919137, 'latency_mean': 1.4291820156574249, 'latency_p50': 1.4357500076293945, 'latency_p90': 1.584079384803772}, {'batch_size': 3, 'throughput': 1.3257962108206403, 'latency_mean': 2.25396889090538, 'latency_p50': 2.2553192377090454, 'latency_p90': 2.472990894317627}, {'batch_size': 5, 'throughput': 1.5534097746284485, 'latency_mean': 3.193175584077835, 'latency_p50': 3.208712100982666, 'latency_p90': 3.587921690940857}, {'batch_size': 6, 'throughput': 1.6100887003019055, 'latency_mean': 3.701253155469894, 'latency_p50': 3.7101744413375854, 'latency_p90': 4.243804836273194}, {'batch_size': 8, 'throughput': 1.5928264720755072, 'latency_mean': 4.991180132627488, 'latency_p50': 5.021739363670349, 'latency_p90': 5.67339859008789}, {'batch_size': 10, 'throughput': 1.5513855795776053, 'latency_mean': 6.39257887840271, 'latency_p50': 6.467413783073425, 'latency_p90': 7.318268632888794}]
max_input_tokens: 512
max_output_tokens: 64
model_architecture: MistralForCausalLM
model_group: Trace2333/mistral_trail9
model_name: trace2333-mistral-trail9-1476_v2
model_num_parameters: 12772070400.0
model_repo: Trace2333/mistral_trail9_1476
model_size: 13B
num_battles: 12090
num_wins: 6112
propriety_score: 0.7643622200584226
propriety_total_count: 1027.0
ranking_group: single
status: inactive
submission_type: basic
throughput_3p7s: 1.62
timestamp: 2024-09-10T16:43:08+00:00
us_pacific_date: 2024-09-10
win_ratio: 0.5055417700578991
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name trace2333-mistral-trail9-1476-v2-mkmlizer
Waiting for job on trace2333-mistral-trail9-1476-v2-mkmlizer to finish
trace2333-mistral-trail9-1476-v2-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ _____ __ __ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ /___/ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ Version: 0.10.1 ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ https://mk1.ai ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ The license key for the current software has been verified as ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ belonging to: ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ Chai Research Corp. ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ║ ║
trace2333-mistral-trail9-1476-v2-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
trace2333-mistral-trail9-1476-v2-mkmlizer: Downloaded to shared memory in 29.755s
trace2333-mistral-trail9-1476-v2-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmpf59celph, device:0
trace2333-mistral-trail9-1476-v2-mkmlizer: Saving flywheel model at /dev/shm/model_cache
trace2333-mistral-trail9-1476-v2-mkmlizer: quantized model in 36.834s
trace2333-mistral-trail9-1476-v2-mkmlizer: Processed model Trace2333/mistral_trail9_1476 in 66.590s
trace2333-mistral-trail9-1476-v2-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
trace2333-mistral-trail9-1476-v2-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/trace2333-mistral-trail9-1476-v2
trace2333-mistral-trail9-1476-v2-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/trace2333-mistral-trail9-1476-v2/config.json
trace2333-mistral-trail9-1476-v2-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/trace2333-mistral-trail9-1476-v2/special_tokens_map.json
trace2333-mistral-trail9-1476-v2-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/trace2333-mistral-trail9-1476-v2/tokenizer_config.json
trace2333-mistral-trail9-1476-v2-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/trace2333-mistral-trail9-1476-v2/tokenizer.json
trace2333-mistral-trail9-1476-v2-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/trace2333-mistral-trail9-1476-v2/flywheel_model.0.safetensors
trace2333-mistral-trail9-1476-v2-mkmlizer: Loading 0: 0%| | 0/363 [00:00<?, ?it/s] Loading 0: 2%|▏ | 7/363 [00:00<00:07, 49.30it/s] Loading 0: 4%|▍ | 16/363 [00:00<00:05, 68.28it/s] Loading 0: 7%|▋ | 25/363 [00:00<00:04, 74.63it/s] Loading 0: 9%|▉ | 34/363 [00:00<00:04, 78.86it/s] Loading 0: 12%|█▏ | 43/363 [00:00<00:03, 80.55it/s] Loading 0: 14%|█▍ | 52/363 [00:00<00:03, 81.81it/s] Loading 0: 17%|█▋ | 61/363 [00:01<00:15, 19.53it/s] Loading 0: 19%|█▉ | 70/363 [00:01<00:11, 25.39it/s] Loading 0: 22%|██▏ | 79/363 [00:02<00:08, 32.26it/s] Loading 0: 24%|██▍ | 88/363 [00:02<00:07, 38.14it/s] Loading 0: 27%|██▋ | 97/363 [00:02<00:05, 44.72it/s] Loading 0: 29%|██▉ | 106/363 [00:02<00:05, 47.86it/s] Loading 0: 32%|███▏ | 115/363 [00:02<00:04, 52.67it/s] Loading 0: 34%|███▍ | 124/363 [00:02<00:04, 56.71it/s] Loading 0: 37%|███▋ | 133/363 [00:02<00:03, 60.32it/s] Loading 0: 39%|███▉ | 142/363 [00:04<00:11, 19.66it/s] Loading 0: 42%|████▏ | 151/363 [00:04<00:08, 24.97it/s] Loading 0: 44%|████▍ | 160/363 [00:04<00:06, 30.76it/s] Loading 0: 47%|████▋ | 169/363 [00:04<00:05, 37.20it/s] Loading 0: 49%|████▉ | 178/363 [00:04<00:04, 43.68it/s] Loading 0: 52%|█████▏ | 187/363 [00:04<00:03, 47.95it/s] Loading 0: 54%|█████▍ | 196/363 [00:04<00:03, 51.91it/s] Loading 0: 56%|█████▋ | 205/363 [00:05<00:02, 55.44it/s] Loading 0: 59%|█████▉ | 214/363 [00:05<00:02, 59.25it/s] Loading 0: 61%|██████▏ | 223/363 [00:06<00:07, 19.56it/s] Loading 0: 64%|██████▍ | 232/363 [00:06<00:05, 24.57it/s] Loading 0: 66%|██████▋ | 241/363 [00:06<00:04, 29.89it/s] Loading 0: 69%|██████▉ | 250/363 [00:06<00:03, 36.88it/s] Loading 0: 71%|███████▏ | 259/363 [00:06<00:02, 44.01it/s] Loading 0: 74%|███████▍ | 268/363 [00:06<00:01, 49.77it/s] Loading 0: 76%|███████▋ | 277/363 [00:07<00:01, 54.98it/s] Loading 0: 79%|███████▉ | 286/363 [00:07<00:01, 58.41it/s] Loading 0: 81%|████████▏ | 295/363 [00:07<00:01, 62.04it/s] Loading 0: 84%|████████▎ | 304/363 [00:08<00:02, 20.69it/s] Loading 0: 86%|████████▌ | 313/363 [00:08<00:01, 25.92it/s] Loading 0: 89%|████████▊ | 322/363 [00:08<00:01, 31.23it/s] Loading 0: 91%|█████████ | 331/363 [00:08<00:00, 37.63it/s] Loading 0: 94%|█████████▎| 340/363 [00:08<00:00, 44.00it/s] Loading 0: 96%|█████████▌| 349/363 [00:09<00:00, 48.82it/s] Loading 0: 99%|█████████▊| 358/363 [00:09<00:00, 54.01it/s]
Job trace2333-mistral-trail9-1476-v2-mkmlizer completed after 94.72s with status: succeeded
Stopping job with name trace2333-mistral-trail9-1476-v2-mkmlizer
Pipeline stage MKMLizer completed in 95.99s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.09s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service trace2333-mistral-trail9-1476-v2
Waiting for inference service trace2333-mistral-trail9-1476-v2 to be ready
Failed to get response for submission chaiml-llama-8b-pairwis_8189_v19: ('http://chaiml-llama-8b-pairwis-8189-v19-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:36744->127.0.0.1:8080: read: connection reset by peer\n')
Failed to get response for submission function_sabet_2024-09-10: ('http://chaiml-llama-8b-pairwis-8189-v19-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'readfrom tcp 127.0.0.1:47036->127.0.0.1:8080: write tcp 127.0.0.1:47036->127.0.0.1:8080: use of closed network connection\n')
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Failed to get response for submission blend_sanen_2024-09-09: ('http://zonemercy-lexical-nemo-1518-v18-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:56686->127.0.0.1:8080: read: connection reset by peer\n')
Failed to get response for submission blend_siken_2024-09-09: ('http://zonemercy-lexical-nemo-1518-v18-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:35604->127.0.0.1:8080: read: connection reset by peer\n')
Inference service trace2333-mistral-trail9-1476-v2 ready after 240.68230271339417s
Pipeline stage MKMLDeployer completed in 241.00s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 3.045686721801758s
Received healthy response to inference request in 2.6070258617401123s
Received healthy response to inference request in 2.4983909130096436s
Received healthy response to inference request in 2.390974760055542s
Received healthy response to inference request in 1.850240707397461s
5 requests
0 failed requests
5th percentile: 1.958387517929077
10th percentile: 2.066534328460693
20th percentile: 2.282827949523926
30th percentile: 2.412457990646362
40th percentile: 2.455424451828003
50th percentile: 2.4983909130096436
60th percentile: 2.541844892501831
70th percentile: 2.5852988719940186
80th percentile: 2.6947580337524415
90th percentile: 2.8702223777770994
95th percentile: 2.9579545497894286
99th percentile: 3.028140287399292
mean time: 2.4784637928009032
Pipeline stage StressChecker completed in 13.46s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.22s
Shutdown handler de-registered
trace2333-mistral-trail9-1476_v2 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service trace2333-mistral-trail9-1476-v2-profiler
Waiting for inference service trace2333-mistral-trail9-1476-v2-profiler to be ready
Inference service trace2333-mistral-trail9-1476-v2-profiler ready after 491.2933852672577s
Pipeline stage MKMLProfilerDeployer completed in 491.67s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/trace2333-mistral-tr246c04e6801c8748a2a4baf53fbda549-deplo6qr2g:/code/chaiverse_profiler_1725987476 --namespace tenant-chaiml-guanaco
kubectl exec -it trace2333-mistral-tr246c04e6801c8748a2a4baf53fbda549-deplo6qr2g --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1725987476 && python profiles.py profile --best_of_n 8 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 512 --output_tokens 64 --summary /code/chaiverse_profiler_1725987476/summary.json'
kubectl exec -it trace2333-mistral-tr246c04e6801c8748a2a4baf53fbda549-deplo6qr2g --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1725987476/summary.json'
Pipeline stage MKMLProfilerRunner completed in 949.32s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service trace2333-mistral-trail9-1476-v2-profiler is running
Tearing down inference service trace2333-mistral-trail9-1476-v2-profiler
Service trace2333-mistral-trail9-1476-v2-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 1.82s
Shutdown handler de-registered
trace2333-mistral-trail9-1476_v2 status is now inactive due to auto deactivation removed underperforming models