submission_id: sanchuanhehe-llama3-1-lora_v1
developer_uid: sanchuanhehe
alignment_samples: 10969
alignment_score: -1.2294006107721147
best_of: 16
celo_rating: 1216.98
display_name: sanchuanhehe-llama3-1-lora_v1
formatter: {'memory_template': "{bot_name}'s Persona: {memory}\n####\n", 'prompt_template': '{prompt}\n<START>\n', 'bot_template': '{bot_name}: {message}\n', 'user_template': '{user_name}: {message}\n', 'response_template': '{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.0, 'top_p': 1.0, 'min_p': 0.0, 'top_k': 40, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n'], 'max_input_tokens': 512, 'best_of': 16, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: False
language_model: sanchuanhehe/llama3.1_lora
latencies: [{'batch_size': 1, 'throughput': 0.9051557123203465, 'latency_mean': 1.1046868503093719, 'latency_p50': 1.1035336256027222, 'latency_p90': 1.2243805170059203}, {'batch_size': 3, 'throughput': 1.6063124130435547, 'latency_mean': 1.8603183794021607, 'latency_p50': 1.861351728439331, 'latency_p90': 2.0880107641220094}, {'batch_size': 5, 'throughput': 1.760081465135349, 'latency_mean': 2.826321997642517, 'latency_p50': 2.8347920179367065, 'latency_p90': 3.123083448410034}, {'batch_size': 6, 'throughput': 1.7652493680868457, 'latency_mean': 3.3777331471443177, 'latency_p50': 3.3891477584838867, 'latency_p90': 3.7784100770950317}, {'batch_size': 8, 'throughput': 1.772358878789678, 'latency_mean': 4.48729099392891, 'latency_p50': 4.500192403793335, 'latency_p90': 5.102722072601319}, {'batch_size': 10, 'throughput': 1.7611759990314655, 'latency_mean': 5.626819614171982, 'latency_p50': 5.658068537712097, 'latency_p90': 6.45403094291687}]
max_input_tokens: 512
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: sanchuanhehe/llama3.1_lo
model_name: sanchuanhehe-llama3-1-lora_v1
model_num_parameters: 8030261248.0
model_repo: sanchuanhehe/llama3.1_lora
model_size: 8B
num_battles: 10969
num_wins: 5086
propriety_score: 0.7034261241970021
propriety_total_count: 934.0
ranking_group: single
status: inactive
submission_type: basic
throughput_3p7s: 1.78
timestamp: 2024-09-11T05:45:02+00:00
us_pacific_date: 2024-09-10
win_ratio: 0.4636703436958702
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name sanchuanhehe-llama3-1-lora-v1-mkmlizer
Waiting for job on sanchuanhehe-llama3-1-lora-v1-mkmlizer to finish
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ _____ __ __ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ /___/ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ Version: 0.10.1 ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ https://mk1.ai ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ The license key for the current software has been verified as ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ belonging to: ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ Chai Research Corp. ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ║ ║
sanchuanhehe-llama3-1-lora-v1-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
sanchuanhehe-llama3-1-lora-v1-mkmlizer: Downloaded to shared memory in 34.003s
sanchuanhehe-llama3-1-lora-v1-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmpxep35xfn, device:0
sanchuanhehe-llama3-1-lora-v1-mkmlizer: Saving flywheel model at /dev/shm/model_cache
sanchuanhehe-llama3-1-lora-v1-mkmlizer: quantized model in 25.524s
sanchuanhehe-llama3-1-lora-v1-mkmlizer: Processed model sanchuanhehe/llama3.1_lora in 59.527s
sanchuanhehe-llama3-1-lora-v1-mkmlizer: creating bucket guanaco-mkml-models
sanchuanhehe-llama3-1-lora-v1-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
sanchuanhehe-llama3-1-lora-v1-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/sanchuanhehe-llama3-1-lora-v1
sanchuanhehe-llama3-1-lora-v1-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/sanchuanhehe-llama3-1-lora-v1/config.json
sanchuanhehe-llama3-1-lora-v1-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/sanchuanhehe-llama3-1-lora-v1/special_tokens_map.json
sanchuanhehe-llama3-1-lora-v1-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/sanchuanhehe-llama3-1-lora-v1/tokenizer_config.json
sanchuanhehe-llama3-1-lora-v1-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/sanchuanhehe-llama3-1-lora-v1/tokenizer.json
sanchuanhehe-llama3-1-lora-v1-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/sanchuanhehe-llama3-1-lora-v1/flywheel_model.0.safetensors
sanchuanhehe-llama3-1-lora-v1-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 2%|▏ | 7/291 [00:00<00:05, 54.12it/s] Loading 0: 5%|▌ | 16/291 [00:00<00:03, 68.92it/s] Loading 0: 9%|▊ | 25/291 [00:00<00:03, 73.88it/s] Loading 0: 14%|█▎ | 40/291 [00:00<00:02, 89.80it/s] Loading 0: 18%|█▊ | 52/291 [00:00<00:02, 90.12it/s] Loading 0: 23%|██▎ | 67/291 [00:00<00:02, 98.16it/s] Loading 0: 26%|██▋ | 77/291 [00:00<00:02, 98.03it/s] Loading 0: 30%|██▉ | 87/291 [00:01<00:08, 25.33it/s] Loading 0: 33%|███▎ | 97/291 [00:02<00:06, 31.22it/s] Loading 0: 37%|███▋ | 107/291 [00:02<00:04, 39.04it/s] Loading 0: 42%|████▏ | 121/291 [00:02<00:03, 50.85it/s] Loading 0: 45%|████▌ | 132/291 [00:02<00:02, 60.30it/s] Loading 0: 49%|████▉ | 142/291 [00:02<00:02, 56.84it/s] Loading 0: 54%|█████▍ | 157/291 [00:02<00:01, 70.01it/s] Loading 0: 58%|█████▊ | 169/291 [00:02<00:01, 74.90it/s] Loading 0: 63%|██████▎ | 184/291 [00:03<00:01, 84.20it/s] Loading 0: 67%|██████▋ | 194/291 [00:04<00:03, 26.72it/s] Loading 0: 69%|██████▉ | 202/291 [00:04<00:02, 30.81it/s] Loading 0: 73%|███████▎ | 212/291 [00:04<00:02, 38.31it/s] Loading 0: 77%|███████▋ | 223/291 [00:04<00:01, 45.63it/s] Loading 0: 82%|████████▏ | 238/291 [00:04<00:00, 58.92it/s] Loading 0: 86%|████████▌ | 250/291 [00:04<00:00, 64.71it/s] Loading 0: 89%|████████▉ | 259/291 [00:04<00:00, 66.13it/s] Loading 0: 94%|█████████▍| 274/291 [00:05<00:00, 77.37it/s] Loading 0: 98%|█████████▊| 286/291 [00:05<00:00, 79.99it/s]
Job sanchuanhehe-llama3-1-lora-v1-mkmlizer completed after 84.17s with status: succeeded
Stopping job with name sanchuanhehe-llama3-1-lora-v1-mkmlizer
Pipeline stage MKMLizer completed in 85.32s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.08s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service sanchuanhehe-llama3-1-lora-v1
Waiting for inference service sanchuanhehe-llama3-1-lora-v1 to be ready
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Inference service sanchuanhehe-llama3-1-lora-v1 ready after 161.01156044006348s
Pipeline stage MKMLDeployer completed in 161.47s
run pipeline stage %s
Running pipeline stage StressChecker
Failed to get response for submission neversleep-noromaid-v0_8068_v150: ('http://chaiml-llama-8b-pairwis-8189-v19-predictor.tenant-chaiml-guanaco.k.chaiverse.com/v1/models/GPT-J-6B-lit-v2:predict', 'read tcp 127.0.0.1:60012->127.0.0.1:8080: read: connection reset by peer\n')
Received healthy response to inference request in 2.703917980194092s
Received healthy response to inference request in 1.7196557521820068s
Received healthy response to inference request in 1.378427267074585s
Received healthy response to inference request in 1.4005863666534424s
Received healthy response to inference request in 1.8207643032073975s
5 requests
0 failed requests
5th percentile: 1.3828590869903565
10th percentile: 1.3872909069061279
20th percentile: 1.3961545467376708
30th percentile: 1.4644002437591552
40th percentile: 1.5920279979705811
50th percentile: 1.7196557521820068
60th percentile: 1.760099172592163
70th percentile: 1.8005425930023193
80th percentile: 1.9973950386047366
90th percentile: 2.350656509399414
95th percentile: 2.527287244796753
99th percentile: 2.668591833114624
mean time: 1.8046703338623047
Pipeline stage StressChecker completed in 12.33s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 6.10s
Shutdown handler de-registered
sanchuanhehe-llama3-1-lora_v1 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.11s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.10s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service sanchuanhehe-llama3-1-lora-v1-profiler
Waiting for inference service sanchuanhehe-llama3-1-lora-v1-profiler to be ready
Inference service sanchuanhehe-llama3-1-lora-v1-profiler ready after 160.37351417541504s
Pipeline stage MKMLProfilerDeployer completed in 160.76s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/sanchuanhehe-llama3-826aaa0079fd3718ac1c912f3a91234d-deplongptb:/code/chaiverse_profiler_1726033968 --namespace tenant-chaiml-guanaco
kubectl exec -it sanchuanhehe-llama3-826aaa0079fd3718ac1c912f3a91234d-deplongptb --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1726033968 && python profiles.py profile --best_of_n 16 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 512 --output_tokens 64 --summary /code/chaiverse_profiler_1726033968/summary.json'
kubectl exec -it sanchuanhehe-llama3-826aaa0079fd3718ac1c912f3a91234d-deplongptb --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1726033968/summary.json'
Pipeline stage MKMLProfilerRunner completed in 803.91s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service sanchuanhehe-llama3-1-lora-v1-profiler is running
Tearing down inference service sanchuanhehe-llama3-1-lora-v1-profiler
Service sanchuanhehe-llama3-1-lora-v1-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 1.82s
Shutdown handler de-registered
sanchuanhehe-llama3-1-lora_v1 status is now inactive due to auto deactivation removed underperforming models