submission_id: sao10k-hanami-1_v2
developer_uid: sao10k
best_of: 8
celo_rating: 1252.14
display_name: Hanami1
family_friendly_score: 0.0
formatter: {'memory_template': "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{bot_name}'s Persona: {memory}\n\n", 'prompt_template': '{prompt}<|eot_id|>', 'bot_template': '<|start_header_id|>assistant<|end_header_id|>\n\n{bot_name}: {message}<|eot_id|>', 'user_template': '<|start_header_id|>user<|end_header_id|>\n\n{user_name}: {message}<|eot_id|>', 'response_template': '<|start_header_id|>assistant<|end_header_id|>\n\n{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.2, 'top_p': 1.0, 'min_p': 0.2, 'top_k': 50, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n', '\n\n', '<|eot_id|>', '\n\n{user_name}', '\nYou:', '<|end_header_id|>'], 'max_input_tokens': 1024, 'best_of': 8, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: False
language_model: Sao10K/Hanami-1
latencies: [{'batch_size': 1, 'throughput': 0.8750346291917347, 'latency_mean': 1.1427462375164033, 'latency_p50': 1.1416246891021729, 'latency_p90': 1.2613062143325806}, {'batch_size': 4, 'throughput': 1.8672577843964218, 'latency_mean': 2.1377370989322664, 'latency_p50': 2.1320830583572388, 'latency_p90': 2.424647903442383}, {'batch_size': 5, 'throughput': 2.000367713811642, 'latency_mean': 2.487578624486923, 'latency_p50': 2.491390347480774, 'latency_p90': 2.773827600479126}, {'batch_size': 8, 'throughput': 2.158974599278588, 'latency_mean': 3.6742761278152467, 'latency_p50': 3.67004132270813, 'latency_p90': 4.122018265724182}, {'batch_size': 10, 'throughput': 2.211193450921814, 'latency_mean': 4.485945956707001, 'latency_p50': 4.47420334815979, 'latency_p90': 5.075998902320862}, {'batch_size': 12, 'throughput': 2.217418985022801, 'latency_mean': 5.360130703449249, 'latency_p50': 5.394302725791931, 'latency_p90': 6.0384762525558475}, {'batch_size': 15, 'throughput': 2.177376936082954, 'latency_mean': 6.806864657402039, 'latency_p50': 6.811468839645386, 'latency_p90': 7.6050426959991455}]
max_input_tokens: 1024
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: Sao10K/Hanami-1
model_name: Hanami1
model_num_parameters: 8030261248.0
model_repo: Sao10K/Hanami-1
model_size: 8B
num_battles: 11150
num_wins: 5531
ranking_group: single
status: torndown
submission_type: basic
throughput_3p7s: 2.18
timestamp: 2024-09-14T21:21:09+00:00
us_pacific_date: 2024-09-14
win_ratio: 0.4960538116591928
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name sao10k-hanami-1-v2-mkmlizer
Waiting for job on sao10k-hanami-1-v2-mkmlizer to finish
sao10k-hanami-1-v2-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
sao10k-hanami-1-v2-mkmlizer: ║ _____ __ __ ║
sao10k-hanami-1-v2-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
sao10k-hanami-1-v2-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
sao10k-hanami-1-v2-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
sao10k-hanami-1-v2-mkmlizer: ║ /___/ ║
sao10k-hanami-1-v2-mkmlizer: ║ ║
sao10k-hanami-1-v2-mkmlizer: ║ Version: 0.10.1 ║
sao10k-hanami-1-v2-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
sao10k-hanami-1-v2-mkmlizer: ║ https://mk1.ai ║
sao10k-hanami-1-v2-mkmlizer: ║ ║
sao10k-hanami-1-v2-mkmlizer: ║ The license key for the current software has been verified as ║
sao10k-hanami-1-v2-mkmlizer: ║ belonging to: ║
sao10k-hanami-1-v2-mkmlizer: ║ ║
sao10k-hanami-1-v2-mkmlizer: ║ Chai Research Corp. ║
sao10k-hanami-1-v2-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
sao10k-hanami-1-v2-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
sao10k-hanami-1-v2-mkmlizer: ║ ║
sao10k-hanami-1-v2-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
sao10k-hanami-1-v2-mkmlizer: Downloaded to shared memory in 33.191s
sao10k-hanami-1-v2-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmp6n_ktma1, device:0
sao10k-hanami-1-v2-mkmlizer: Saving flywheel model at /dev/shm/model_cache
Connection pool is full, discarding connection: %s. Connection pool size: %s
sao10k-hanami-1-v2-mkmlizer: quantized model in 25.335s
sao10k-hanami-1-v2-mkmlizer: Processed model Sao10K/Hanami-1 in 58.526s
sao10k-hanami-1-v2-mkmlizer: creating bucket guanaco-mkml-models
sao10k-hanami-1-v2-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
sao10k-hanami-1-v2-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/sao10k-hanami-1-v2
sao10k-hanami-1-v2-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/sao10k-hanami-1-v2/config.json
sao10k-hanami-1-v2-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/sao10k-hanami-1-v2/special_tokens_map.json
sao10k-hanami-1-v2-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/sao10k-hanami-1-v2/tokenizer_config.json
sao10k-hanami-1-v2-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/sao10k-hanami-1-v2/tokenizer.json
sao10k-hanami-1-v2-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/sao10k-hanami-1-v2/flywheel_model.0.safetensors
sao10k-hanami-1-v2-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 1%| | 2/291 [00:04<11:28, 2.38s/it] Loading 0: 2%|▏ | 6/291 [00:04<03:03, 1.56it/s] Loading 0: 5%|▍ | 14/291 [00:05<00:59, 4.63it/s] Loading 0: 7%|▋ | 20/291 [00:05<00:36, 7.48it/s] Loading 0: 9%|▉ | 27/291 [00:05<00:22, 11.80it/s] Loading 0: 11%|█▏ | 33/291 [00:05<00:16, 15.53it/s] Loading 0: 14%|█▍ | 42/291 [00:05<00:11, 22.41it/s] Loading 0: 18%|█▊ | 51/291 [00:05<00:08, 28.97it/s] Loading 0: 20%|██ | 59/291 [00:05<00:06, 36.45it/s] Loading 0: 23%|██▎ | 66/291 [00:05<00:05, 40.23it/s] Loading 0: 25%|██▍ | 72/291 [00:06<00:05, 43.16it/s] Loading 0: 27%|██▋ | 78/291 [00:06<00:05, 42.17it/s] Loading 0: 29%|██▉ | 84/291 [00:06<00:06, 34.11it/s] Loading 0: 31%|███ | 89/291 [00:06<00:05, 36.56it/s] Loading 0: 33%|███▎ | 96/291 [00:06<00:05, 38.56it/s] Loading 0: 36%|███▌ | 104/291 [00:06<00:03, 46.84it/s] Loading 0: 38%|███▊ | 110/291 [00:06<00:03, 46.49it/s] Loading 0: 40%|███▉ | 116/291 [00:07<00:03, 47.67it/s] Loading 0: 42%|████▏ | 123/291 [00:07<00:03, 45.62it/s] Loading 0: 45%|████▌ | 131/291 [00:07<00:03, 52.54it/s] Loading 0: 47%|████▋ | 137/291 [00:07<00:03, 51.06it/s] Loading 0: 49%|████▉ | 143/291 [00:07<00:02, 52.33it/s] Loading 0: 52%|█████▏ | 150/291 [00:07<00:02, 48.70it/s] Loading 0: 55%|█████▍ | 159/291 [00:07<00:02, 51.09it/s] Loading 0: 58%|█████▊ | 168/291 [00:08<00:02, 53.03it/s] Loading 0: 60%|██████ | 176/291 [00:08<00:01, 58.57it/s] Loading 0: 63%|██████▎ | 183/291 [00:08<00:01, 56.27it/s] Loading 0: 65%|██████▍ | 189/291 [00:08<00:01, 55.07it/s] Loading 0: 67%|██████▋ | 195/291 [00:08<00:01, 49.43it/s] Loading 0: 70%|██████▉ | 203/291 [00:08<00:01, 55.83it/s] Loading 0: 72%|███████▏ | 209/291 [00:09<00:02, 35.16it/s] Loading 0: 74%|███████▎ | 214/291 [00:09<00:02, 36.74it/s] Loading 0: 76%|███████▌ | 221/291 [00:09<00:01, 43.05it/s] Loading 0: 78%|███████▊ | 227/291 [00:09<00:01, 44.26it/s] Loading 0: 80%|████████ | 233/291 [00:09<00:01, 47.53it/s] Loading 0: 82%|████████▏ | 240/291 [00:09<00:01, 45.96it/s] Loading 0: 86%|████████▌ | 249/291 [00:09<00:00, 49.14it/s] Loading 0: 89%|████████▊ | 258/291 [00:09<00:00, 50.76it/s] Loading 0: 91%|█████████▏| 266/291 [00:10<00:00, 56.85it/s] Loading 0: 94%|█████████▍| 273/291 [00:10<00:00, 56.34it/s] Loading 0: 96%|█████████▌| 279/291 [00:10<00:00, 55.78it/s] Loading 0: 98%|█████████▊| 285/291 [00:10<00:00, 48.68it/s]
Job sao10k-hanami-1-v2-mkmlizer completed after 82.59s with status: succeeded
Stopping job with name sao10k-hanami-1-v2-mkmlizer
Pipeline stage MKMLizer completed in 83.06s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.08s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service sao10k-hanami-1-v2
Waiting for inference service sao10k-hanami-1-v2 to be ready
Failed to get response for submission blend_hokok_2024-09-09: ('http://neversleep-noromaid-v0-8068-v150-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', '')
Failed to get response for submission blend_hokok_2024-09-09: ('http://neversleep-noromaid-v0-8068-v150-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', '')
Inference service sao10k-hanami-1-v2 ready after 170.69670248031616s
Pipeline stage MKMLDeployer completed in 171.42s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 4.582271575927734s
Received healthy response to inference request in 1.8826463222503662s
Received healthy response to inference request in 2.057023286819458s
Received healthy response to inference request in 3.832284927368164s
Failed to get response for submission blend_hokok_2024-09-09: ('http://neversleep-noromaid-v0-8068-v150-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', '')
Received healthy response to inference request in 2.006648063659668s
5 requests
0 failed requests
5th percentile: 1.9074466705322266
10th percentile: 1.932247018814087
20th percentile: 1.9818477153778076
30th percentile: 2.016723108291626
40th percentile: 2.036873197555542
50th percentile: 2.057023286819458
60th percentile: 2.76712794303894
70th percentile: 3.4772325992584223
80th percentile: 3.982282257080078
90th percentile: 4.282276916503906
95th percentile: 4.43227424621582
99th percentile: 4.552272109985352
mean time: 2.872174835205078
Pipeline stage StressChecker completed in 17.64s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.69s
Shutdown handler de-registered
sao10k-hanami-1_v2 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.10s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service sao10k-hanami-1-v2-profiler
Waiting for inference service sao10k-hanami-1-v2-profiler to be ready
Inference service sao10k-hanami-1-v2-profiler ready after 160.37365436553955s
Pipeline stage MKMLProfilerDeployer completed in 160.72s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/sao10k-hanami-1-v2-profiler-predictor-00001-deployment-7d7br47z:/code/chaiverse_profiler_1726349351 --namespace tenant-chaiml-guanaco
kubectl exec -it sao10k-hanami-1-v2-profiler-predictor-00001-deployment-7d7br47z --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1726349351 && python profiles.py profile --best_of_n 8 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 1024 --output_tokens 64 --summary /code/chaiverse_profiler_1726349351/summary.json'
kubectl exec -it sao10k-hanami-1-v2-profiler-predictor-00001-deployment-7d7br47z --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1726349351/summary.json'
Pipeline stage MKMLProfilerRunner completed in 808.65s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service sao10k-hanami-1-v2-profiler is running
Tearing down inference service sao10k-hanami-1-v2-profiler
Service sao10k-hanami-1-v2-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 1.89s
Shutdown handler de-registered
sao10k-hanami-1_v2 status is now inactive due to auto deactivation removed underperforming models
sao10k-hanami-1_v2 status is now torndown due to DeploymentManager action