submission_id: sao10k-hanami-2_v1
developer_uid: sao10k
alignment_samples: 11435
alignment_score: -0.49550024439766155
best_of: 8
celo_rating: 1219.06
display_name: Hanami2
formatter: {'memory_template': "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{bot_name}'s Persona: {memory}\n\n", 'prompt_template': '{prompt}<|eot_id|>', 'bot_template': '<|start_header_id|>assistant<|end_header_id|>\n\n{bot_name}: {message}<|eot_id|>', 'user_template': '<|start_header_id|>user<|end_header_id|>\n\n{user_name}: {message}<|eot_id|>', 'response_template': '<|start_header_id|>assistant<|end_header_id|>\n\n{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.2, 'top_p': 1.0, 'min_p': 0.2, 'top_k': 50, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n', '\n\n', '<|eot_id|>', '\n\n{user_name}', '\nYou:', '<|end_header_id|>'], 'max_input_tokens': 1024, 'best_of': 8, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: False
language_model: Sao10K/Hanami-2
latencies: [{'batch_size': 1, 'throughput': 0.8668801245244013, 'latency_mean': 1.153506624698639, 'latency_p50': 1.1504459381103516, 'latency_p90': 1.2616971492767333}, {'batch_size': 4, 'throughput': 1.8157872181845123, 'latency_mean': 2.1920435988903044, 'latency_p50': 2.1939196586608887, 'latency_p90': 2.4789340496063232}, {'batch_size': 5, 'throughput': 1.948476383555466, 'latency_mean': 2.5493593287467955, 'latency_p50': 2.5506266355514526, 'latency_p90': 2.8575411319732664}, {'batch_size': 8, 'throughput': 2.105749189595719, 'latency_mean': 3.783366450071335, 'latency_p50': 3.8174272775650024, 'latency_p90': 4.229527735710144}, {'batch_size': 10, 'throughput': 2.153707502928161, 'latency_mean': 4.609490240812302, 'latency_p50': 4.602207064628601, 'latency_p90': 5.258481478691101}, {'batch_size': 12, 'throughput': 2.1522378718986963, 'latency_mean': 5.525738024711609, 'latency_p50': 5.571789860725403, 'latency_p90': 6.228268551826477}, {'batch_size': 15, 'throughput': 2.1511105704833082, 'latency_mean': 6.888236154317855, 'latency_p50': 6.920414090156555, 'latency_p90': 7.693304705619812}]
max_input_tokens: 1024
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: Sao10K/Hanami-2
model_name: Hanami2
model_num_parameters: 8030261248.0
model_repo: Sao10K/Hanami-2
model_size: 8B
num_battles: 11434
num_wins: 5352
propriety_score: 0.7539906103286385
propriety_total_count: 1065.0
ranking_group: single
status: inactive
submission_type: basic
throughput_3p7s: 2.11
timestamp: 2024-09-07T18:02:09+00:00
us_pacific_date: 2024-09-07
win_ratio: 0.46807766311002275
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name sao10k-hanami-2-v1-mkmlizer
Waiting for job on sao10k-hanami-2-v1-mkmlizer to finish
sao10k-hanami-2-v1-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
sao10k-hanami-2-v1-mkmlizer: ║ _____ __ __ ║
sao10k-hanami-2-v1-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
sao10k-hanami-2-v1-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
sao10k-hanami-2-v1-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
sao10k-hanami-2-v1-mkmlizer: ║ /___/ ║
sao10k-hanami-2-v1-mkmlizer: ║ ║
sao10k-hanami-2-v1-mkmlizer: ║ Version: 0.10.1 ║
sao10k-hanami-2-v1-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
sao10k-hanami-2-v1-mkmlizer: ║ https://mk1.ai ║
sao10k-hanami-2-v1-mkmlizer: ║ ║
sao10k-hanami-2-v1-mkmlizer: ║ The license key for the current software has been verified as ║
sao10k-hanami-2-v1-mkmlizer: ║ belonging to: ║
sao10k-hanami-2-v1-mkmlizer: ║ ║
sao10k-hanami-2-v1-mkmlizer: ║ Chai Research Corp. ║
sao10k-hanami-2-v1-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
sao10k-hanami-2-v1-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
sao10k-hanami-2-v1-mkmlizer: ║ ║
sao10k-hanami-2-v1-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
sao10k-hanami-2-v1-mkmlizer: Downloaded to shared memory in 33.154s
sao10k-hanami-2-v1-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmp8uat40nz, device:0
sao10k-hanami-2-v1-mkmlizer: Saving flywheel model at /dev/shm/model_cache
Failed to get response for submission blend_sehof_2024-08-22: ('http://mistralai-mixtral-8x7b-3473-v130-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:49226->127.0.0.1:8080: read: connection reset by peer\n')
sao10k-hanami-2-v1-mkmlizer: quantized model in 26.580s
sao10k-hanami-2-v1-mkmlizer: Processed model Sao10K/Hanami-2 in 59.734s
sao10k-hanami-2-v1-mkmlizer: creating bucket guanaco-mkml-models
sao10k-hanami-2-v1-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
sao10k-hanami-2-v1-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/sao10k-hanami-2-v1
sao10k-hanami-2-v1-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/sao10k-hanami-2-v1/config.json
sao10k-hanami-2-v1-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/sao10k-hanami-2-v1/special_tokens_map.json
sao10k-hanami-2-v1-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/sao10k-hanami-2-v1/tokenizer_config.json
sao10k-hanami-2-v1-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/sao10k-hanami-2-v1/tokenizer.json
sao10k-hanami-2-v1-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/sao10k-hanami-2-v1/flywheel_model.0.safetensors
sao10k-hanami-2-v1-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 1%| | 2/291 [00:04<11:35, 2.41s/it] Loading 0: 2%|▏ | 6/291 [00:04<03:05, 1.54it/s] Loading 0: 4%|▍ | 13/291 [00:05<01:06, 4.21it/s] Loading 0: 6%|▌ | 18/291 [00:05<00:41, 6.62it/s] Loading 0: 8%|▊ | 23/291 [00:05<00:27, 9.62it/s] Loading 0: 10%|▉ | 28/291 [00:05<00:19, 13.27it/s] Loading 0: 11%|█▏ | 33/291 [00:05<00:16, 15.91it/s] Loading 0: 14%|█▎ | 40/291 [00:05<00:10, 22.90it/s] Loading 0: 15%|█▌ | 45/291 [00:05<00:09, 26.94it/s] Loading 0: 18%|█▊ | 51/291 [00:05<00:08, 28.92it/s] Loading 0: 20%|██ | 59/291 [00:06<00:06, 38.08it/s] Loading 0: 22%|██▏ | 65/291 [00:06<00:05, 38.09it/s] Loading 0: 24%|██▍ | 70/291 [00:06<00:05, 39.10it/s] Loading 0: 26%|██▌ | 76/291 [00:06<00:04, 43.30it/s] Loading 0: 28%|██▊ | 82/291 [00:06<00:04, 46.13it/s] Loading 0: 30%|███ | 88/291 [00:06<00:04, 42.20it/s] Loading 0: 33%|███▎ | 95/291 [00:06<00:04, 48.16it/s] Loading 0: 35%|███▍ | 101/291 [00:06<00:04, 43.37it/s] Loading 0: 36%|███▋ | 106/291 [00:07<00:06, 29.76it/s] Loading 0: 39%|███▉ | 113/291 [00:07<00:04, 36.33it/s] Loading 0: 41%|████ | 119/291 [00:07<00:04, 36.83it/s] Loading 0: 43%|████▎ | 124/291 [00:07<00:04, 38.16it/s] Loading 0: 45%|████▌ | 131/291 [00:07<00:03, 44.15it/s] Loading 0: 47%|████▋ | 137/291 [00:07<00:03, 42.20it/s] Loading 0: 49%|████▉ | 142/291 [00:08<00:03, 41.88it/s] Loading 0: 51%|█████ | 149/291 [00:08<00:02, 47.39it/s] Loading 0: 53%|█████▎ | 155/291 [00:08<00:03, 44.64it/s] Loading 0: 55%|█████▍ | 160/291 [00:08<00:02, 43.88it/s] Loading 0: 57%|█████▋ | 167/291 [00:08<00:02, 49.51it/s] Loading 0: 59%|█████▉ | 173/291 [00:08<00:02, 46.30it/s] Loading 0: 61%|██████ | 178/291 [00:08<00:02, 44.85it/s] Loading 0: 64%|██████▎ | 185/291 [00:08<00:02, 49.19it/s] Loading 0: 66%|██████▌ | 191/291 [00:09<00:02, 44.11it/s] Loading 0: 67%|██████▋ | 196/291 [00:09<00:02, 42.59it/s] Loading 0: 70%|██████▉ | 203/291 [00:09<00:01, 47.49it/s] Loading 0: 71%|███████▏ | 208/291 [00:09<00:01, 47.58it/s] Loading 0: 73%|███████▎ | 213/291 [00:09<00:01, 39.88it/s] Loading 0: 76%|███████▌ | 220/291 [00:09<00:01, 46.50it/s] Loading 0: 77%|███████▋ | 225/291 [00:09<00:01, 46.31it/s] Loading 0: 79%|███████▉ | 231/291 [00:10<00:01, 41.43it/s] Loading 0: 82%|████████▏ | 239/291 [00:10<00:01, 49.08it/s] Loading 0: 84%|████████▍ | 245/291 [00:10<00:01, 44.99it/s] Loading 0: 86%|████████▌ | 250/291 [00:10<00:01, 30.37it/s] Loading 0: 88%|████████▊ | 257/291 [00:10<00:00, 36.69it/s] Loading 0: 90%|█████████ | 262/291 [00:10<00:00, 39.35it/s] Loading 0: 92%|█████████▏| 267/291 [00:11<00:00, 34.60it/s] Loading 0: 95%|█████████▍| 275/291 [00:11<00:00, 43.16it/s] Loading 0: 97%|█████████▋| 281/291 [00:11<00:00, 40.54it/s] Loading 0: 98%|█████████▊| 286/291 [00:11<00:00, 40.09it/s]
Job sao10k-hanami-2-v1-mkmlizer completed after 85.5s with status: succeeded
Stopping job with name sao10k-hanami-2-v1-mkmlizer
Pipeline stage MKMLizer completed in 86.59s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.08s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service sao10k-hanami-2-v1
Waiting for inference service sao10k-hanami-2-v1 to be ready
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Failed to get response for submission blend_remul_2024-08-22: ('http://chaiml-llama-8b-pairwis-8189-v19-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:33798->127.0.0.1:8080: read: connection reset by peer\n')
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Inference service sao10k-hanami-2-v1 ready after 150.56451106071472s
Pipeline stage MKMLDeployer completed in 151.50s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 2.1175765991210938s
Received healthy response to inference request in 1.6966838836669922s
Received healthy response to inference request in 1.9430243968963623s
Received healthy response to inference request in 1.4487237930297852s
Received healthy response to inference request in 2.0333638191223145s
5 requests
0 failed requests
5th percentile: 1.4983158111572266
10th percentile: 1.547907829284668
20th percentile: 1.6470918655395508
30th percentile: 1.7459519863128663
40th percentile: 1.8444881916046143
50th percentile: 1.9430243968963623
60th percentile: 1.9791601657867433
70th percentile: 2.015295934677124
80th percentile: 2.0502063751220705
90th percentile: 2.083891487121582
95th percentile: 2.1007340431213377
99th percentile: 2.1142080879211425
mean time: 1.8478744983673097
Pipeline stage StressChecker completed in 10.87s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.68s
Shutdown handler de-registered
sao10k-hanami-2_v1 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.10s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.10s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service sao10k-hanami-2-v1-profiler
Waiting for inference service sao10k-hanami-2-v1-profiler to be ready
Inference service sao10k-hanami-2-v1-profiler ready after 150.34396767616272s
Pipeline stage MKMLProfilerDeployer completed in 150.70s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/sao10k-hanami-2-v1-profiler-predictor-00001-deployment-7ffzdk5v:/code/chaiverse_profiler_1725732586 --namespace tenant-chaiml-guanaco
kubectl exec -it sao10k-hanami-2-v1-profiler-predictor-00001-deployment-7ffzdk5v --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1725732586 && python profiles.py profile --best_of_n 8 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 1024 --output_tokens 64 --summary /code/chaiverse_profiler_1725732586/summary.json'
kubectl exec -it sao10k-hanami-2-v1-profiler-predictor-00001-deployment-7ffzdk5v --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1725732586/summary.json'
Pipeline stage MKMLProfilerRunner completed in 824.27s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service sao10k-hanami-2-v1-profiler is running
Tearing down inference service sao10k-hanami-2-v1-profiler
Service sao10k-hanami-2-v1-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 1.73s
Shutdown handler de-registered
sao10k-hanami-2_v1 status is now inactive due to auto deactivation removed underperforming models

Usage Metrics

Latency Metrics