submission_id: cycy233-l3-p-v1-c4_v4
developer_uid: shiroe40
alignment_samples: 12271
alignment_score: 0.4316246554269542
best_of: 4
celo_rating: 1219.89
display_name: auto
formatter: {'memory_template': "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{bot_name}'s Persona: {memory}\n\n", 'prompt_template': '{prompt}<|eot_id|>', 'bot_template': '<|start_header_id|>assistant<|end_header_id|>\n\n{bot_name}: {message}<|eot_id|>', 'user_template': '<|start_header_id|>user<|end_header_id|>\n\n{user_name}: {message}<|eot_id|>', 'response_template': '<|start_header_id|>assistant<|end_header_id|>\n\n{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.0, 'top_p': 0.9, 'min_p': 0.05, 'top_k': 80, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['<|end_header_id|>', '<|eot_id|>'], 'max_input_tokens': 512, 'best_of': 4, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: False
language_model: cycy233/L3-p-v1-c4
latencies: [{'batch_size': 1, 'throughput': 1.0035656022806794, 'latency_mean': 0.9962413454055786, 'latency_p50': 1.0026366710662842, 'latency_p90': 1.1026429653167724}, {'batch_size': 5, 'throughput': 3.1185769517850606, 'latency_mean': 1.5939801609516144, 'latency_p50': 1.5888537168502808, 'latency_p90': 1.7936667442321776}, {'batch_size': 10, 'throughput': 4.232919677506588, 'latency_mean': 2.3380055940151214, 'latency_p50': 2.328342318534851, 'latency_p90': 2.636258292198181}, {'batch_size': 15, 'throughput': 4.649907003057253, 'latency_mean': 3.1847725677490235, 'latency_p50': 3.1700278520584106, 'latency_p90': 3.6573745965957642}, {'batch_size': 20, 'throughput': 4.824884124898246, 'latency_mean': 4.066748658418655, 'latency_p50': 4.053651452064514, 'latency_p90': 4.608044147491455}, {'batch_size': 25, 'throughput': 4.902400932290997, 'latency_mean': 4.99918382525444, 'latency_p50': 5.047520995140076, 'latency_p90': 5.687427926063537}, {'batch_size': 30, 'throughput': 4.989539203057516, 'latency_mean': 5.842621558904648, 'latency_p50': 5.773520350456238, 'latency_p90': 6.6702505350112915}]
max_input_tokens: 512
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: cycy233/L3-p-v1-c4
model_name: auto
model_num_parameters: 8030261248.0
model_repo: cycy233/L3-p-v1-c4
model_size: 8B
num_battles: 12270
num_wins: 5728
propriety_score: 0.7252336448598131
propriety_total_count: 1070.0
ranking_group: single
status: inactive
submission_type: basic
throughput_3p7s: 4.84
timestamp: 2024-09-11T08:35:37+00:00
us_pacific_date: 2024-09-11
win_ratio: 0.46682966585167074
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name cycy233-l3-p-v1-c4-v4-mkmlizer
Waiting for job on cycy233-l3-p-v1-c4-v4-mkmlizer to finish
cycy233-l3-p-v1-c4-v4-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ _____ __ __ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ /___/ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ Version: 0.10.1 ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ https://mk1.ai ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ The license key for the current software has been verified as ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ belonging to: ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ Chai Research Corp. ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ║ ║
cycy233-l3-p-v1-c4-v4-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
cycy233-l3-p-v1-c4-v4-mkmlizer: Downloaded to shared memory in 22.355s
cycy233-l3-p-v1-c4-v4-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmp0naae383, device:0
cycy233-l3-p-v1-c4-v4-mkmlizer: Saving flywheel model at /dev/shm/model_cache
cycy233-l3-p-v1-c4-v4-mkmlizer: quantized model in 25.789s
cycy233-l3-p-v1-c4-v4-mkmlizer: Processed model cycy233/L3-p-v1-c4 in 48.144s
cycy233-l3-p-v1-c4-v4-mkmlizer: creating bucket guanaco-mkml-models
cycy233-l3-p-v1-c4-v4-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
cycy233-l3-p-v1-c4-v4-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/cycy233-l3-p-v1-c4-v4
cycy233-l3-p-v1-c4-v4-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/cycy233-l3-p-v1-c4-v4/config.json
cycy233-l3-p-v1-c4-v4-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/cycy233-l3-p-v1-c4-v4/special_tokens_map.json
cycy233-l3-p-v1-c4-v4-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/cycy233-l3-p-v1-c4-v4/tokenizer_config.json
cycy233-l3-p-v1-c4-v4-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/cycy233-l3-p-v1-c4-v4/tokenizer.json
cycy233-l3-p-v1-c4-v4-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/cycy233-l3-p-v1-c4-v4/flywheel_model.0.safetensors
cycy233-l3-p-v1-c4-v4-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 1%|▏ | 4/291 [00:00<00:07, 38.97it/s] Loading 0: 5%|▌ | 16/291 [00:00<00:04, 64.86it/s] Loading 0: 9%|▊ | 25/291 [00:00<00:03, 72.29it/s] Loading 0: 14%|█▎ | 40/291 [00:00<00:02, 87.76it/s] Loading 0: 18%|█▊ | 52/291 [00:00<00:02, 87.83it/s] Loading 0: 21%|██ | 61/291 [00:00<00:02, 83.67it/s] Loading 0: 26%|██▌ | 76/291 [00:00<00:02, 92.81it/s] Loading 0: 30%|██▉ | 86/291 [00:02<00:08, 25.44it/s] Loading 0: 32%|███▏ | 94/291 [00:02<00:06, 30.36it/s] Loading 0: 35%|███▌ | 103/291 [00:02<00:05, 36.60it/s] Loading 0: 38%|███▊ | 112/291 [00:02<00:04, 42.56it/s] Loading 0: 42%|████▏ | 121/291 [00:02<00:03, 49.93it/s] Loading 0: 46%|████▌ | 133/291 [00:02<00:02, 59.60it/s] Loading 0: 51%|█████ | 148/291 [00:02<00:01, 71.66it/s] Loading 0: 54%|█████▍ | 157/291 [00:02<00:01, 73.18it/s] Loading 0: 57%|█████▋ | 166/291 [00:02<00:01, 75.88it/s] Loading 0: 60%|██████ | 175/291 [00:03<00:01, 77.46it/s] Loading 0: 63%|██████▎ | 184/291 [00:03<00:01, 79.28it/s] Loading 0: 66%|██████▋ | 193/291 [00:04<00:04, 22.40it/s] Loading 0: 69%|██████▉ | 202/291 [00:04<00:03, 28.47it/s] Loading 0: 73%|███████▎ | 211/291 [00:04<00:02, 35.12it/s] Loading 0: 76%|███████▌ | 220/291 [00:04<00:01, 42.47it/s] Loading 0: 79%|███████▊ | 229/291 [00:04<00:01, 49.23it/s] Loading 0: 83%|████████▎ | 241/291 [00:04<00:00, 59.12it/s] Loading 0: 88%|████████▊ | 256/291 [00:05<00:00, 72.80it/s] Loading 0: 91%|█████████▏| 266/291 [00:05<00:00, 75.34it/s] Loading 0: 95%|█████████▌| 277/291 [00:05<00:00, 76.16it/s] Loading 0: 98%|█████████▊| 286/291 [00:05<00:00, 78.83it/s]
Job cycy233-l3-p-v1-c4-v4-mkmlizer completed after 65.08s with status: succeeded
Stopping job with name cycy233-l3-p-v1-c4-v4-mkmlizer
Pipeline stage MKMLizer completed in 66.44s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.07s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service cycy233-l3-p-v1-c4-v4
Waiting for inference service cycy233-l3-p-v1-c4-v4 to be ready
Failed to get response for submission blend_jugel_2024-09-09: ('http://chaiml-elo-alignment-run-3-v34-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', '{"error":"ValueError : [TypeError(\\"\'numpy.int64\' object is not iterable\\"), TypeError(\'vars() argument must have __dict__ attribute\')]"}')
Inference service cycy233-l3-p-v1-c4-v4 ready after 160.77799654006958s
Pipeline stage MKMLDeployer completed in 161.22s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 1.7239856719970703s
Received healthy response to inference request in 1.3135442733764648s
Received healthy response to inference request in 1.571608066558838s
Received healthy response to inference request in 1.4296529293060303s
Received healthy response to inference request in 1.940007209777832s
5 requests
0 failed requests
5th percentile: 1.336766004562378
10th percentile: 1.359987735748291
20th percentile: 1.4064311981201172
30th percentile: 1.4580439567565917
40th percentile: 1.5148260116577148
50th percentile: 1.571608066558838
60th percentile: 1.632559108734131
70th percentile: 1.6935101509094237
80th percentile: 1.7671899795532227
90th percentile: 1.8535985946655273
95th percentile: 1.8968029022216797
99th percentile: 1.9313663482666015
mean time: 1.595759630203247
Pipeline stage StressChecker completed in 8.58s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.74s
Shutdown handler de-registered
cycy233-l3-p-v1-c4_v4 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.12s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service cycy233-l3-p-v1-c4-v4-profiler
Waiting for inference service cycy233-l3-p-v1-c4-v4-profiler to be ready
Inference service cycy233-l3-p-v1-c4-v4-profiler ready after 160.37858152389526s
Pipeline stage MKMLProfilerDeployer completed in 160.74s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/cycy233-l3-p-v1-c4-v4-profiler-predictor-00001-deployment-8cbgc:/code/chaiverse_profiler_1726044182 --namespace tenant-chaiml-guanaco
kubectl exec -it cycy233-l3-p-v1-c4-v4-profiler-predictor-00001-deployment-8cbgc --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1726044182 && python profiles.py profile --best_of_n 4 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 512 --output_tokens 64 --summary /code/chaiverse_profiler_1726044182/summary.json'
kubectl exec -it cycy233-l3-p-v1-c4-v4-profiler-predictor-00001-deployment-8cbgc --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1726044182/summary.json'
Pipeline stage MKMLProfilerRunner completed in 483.43s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service cycy233-l3-p-v1-c4-v4-profiler is running
Tearing down inference service cycy233-l3-p-v1-c4-v4-profiler
Service cycy233-l3-p-v1-c4-v4-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 2.19s
Shutdown handler de-registered
cycy233-l3-p-v1-c4_v4 status is now inactive due to auto deactivation removed underperforming models