submission_id: stark2000s-ult-du-gpt-v1-7_v1
developer_uid: stark2000s
alignment_samples: 11360
alignment_score: -0.022708953778949718
best_of: 16
celo_rating: 1179.66
display_name: stark2000s-ult-du-gpt-v1-7_v1
formatter: {'memory_template': "{bot_name}'s Persona: {memory}\n####\n", 'prompt_template': '{prompt}\n<START>\n', 'bot_template': '{bot_name}: {message}\n', 'user_template': '{user_name}: {message}\n', 'response_template': '{bot_name}:', 'truncate_by_message': False}
generation_params: {'temperature': 1.15, 'top_p': 0.95, 'min_p': 0.0, 'top_k': 40, 'presence_penalty': 0.0, 'frequency_penalty': 0.0, 'stopping_words': ['\n'], 'max_input_tokens': 512, 'best_of': 16, 'max_output_tokens': 64}
gpu_counts: {'NVIDIA RTX A5000': 1}
is_internal_developer: False
language_model: stark2000s/ult-du-gpt-v1.7
latencies: [{'batch_size': 1, 'throughput': 0.9014080632564649, 'latency_mean': 1.1092844820022583, 'latency_p50': 1.100347638130188, 'latency_p90': 1.2646196842193604}, {'batch_size': 4, 'throughput': 1.79869735419625, 'latency_mean': 2.217616169452667, 'latency_p50': 2.2185858488082886, 'latency_p90': 2.4661107063293457}, {'batch_size': 5, 'throughput': 1.8895171418398908, 'latency_mean': 2.632147508859634, 'latency_p50': 2.61978542804718, 'latency_p90': 2.9481388568878173}, {'batch_size': 8, 'throughput': 2.016394531945917, 'latency_mean': 3.9393192660808563, 'latency_p50': 3.9383301734924316, 'latency_p90': 4.438633131980896}, {'batch_size': 10, 'throughput': 2.0390389713145463, 'latency_mean': 4.851014424562454, 'latency_p50': 4.817586064338684, 'latency_p90': 5.681129813194275}, {'batch_size': 12, 'throughput': 2.0446700930169226, 'latency_mean': 5.80317134141922, 'latency_p50': 5.791596293449402, 'latency_p90': 6.7988077878952025}, {'batch_size': 15, 'throughput': 2.0184201900921424, 'latency_mean': 7.277418428659439, 'latency_p50': 7.381770968437195, 'latency_p90': 8.15075261592865}]
max_input_tokens: 512
max_output_tokens: 64
model_architecture: LlamaForCausalLM
model_group: stark2000s/ult-du-gpt-v1
model_name: stark2000s-ult-du-gpt-v1-7_v1
model_num_parameters: 8030261248.0
model_repo: stark2000s/ult-du-gpt-v1.7
model_size: 8B
num_battles: 11360
num_wins: 4676
propriety_score: 0.7471967380224261
propriety_total_count: 981.0
ranking_group: single
status: inactive
submission_type: basic
throughput_3p7s: 2.01
timestamp: 2024-09-09T08:57:56+00:00
us_pacific_date: 2024-09-09
win_ratio: 0.41161971830985916
Download Preference Data
Resubmit model
Shutdown handler not registered because Python interpreter is not running in the main thread
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLizer
Starting job with name stark2000s-ult-du-gpt-v1-7-v1-mkmlizer
Waiting for job on stark2000s-ult-du-gpt-v1-7-v1-mkmlizer to finish
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ╔═════════════════════════════════════════════════════════════════════╗
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ _____ __ __ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ / _/ /_ ___ __/ / ___ ___ / / ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ / _/ / // / |/|/ / _ \/ -_) -_) / ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ /_//_/\_, /|__,__/_//_/\__/\__/_/ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ /___/ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ Version: 0.10.1 ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ Copyright 2023 MK ONE TECHNOLOGIES Inc. ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ https://mk1.ai ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ The license key for the current software has been verified as ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ belonging to: ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ Chai Research Corp. ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ Account ID: 7997a29f-0ceb-4cc7-9adf-840c57b4ae6f ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ Expiration: 2024-10-15 23:59:59 ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ║ ║
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: ╚═════════════════════════════════════════════════════════════════════╝
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: Downloaded to shared memory in 28.509s
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: quantizing model to /dev/shm/model_cache, profile:s0, folder:/tmp/tmprq4o9d9_, device:0
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: Saving flywheel model at /dev/shm/model_cache
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: quantized model in 25.434s
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: Processed model stark2000s/ult-du-gpt-v1.7 in 53.943s
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: creating bucket guanaco-mkml-models
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: Bucket 's3://guanaco-mkml-models/' created
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: uploading /dev/shm/model_cache to s3://guanaco-mkml-models/stark2000s-ult-du-gpt-v1-7-v1
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: cp /dev/shm/model_cache/config.json s3://guanaco-mkml-models/stark2000s-ult-du-gpt-v1-7-v1/config.json
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: cp /dev/shm/model_cache/special_tokens_map.json s3://guanaco-mkml-models/stark2000s-ult-du-gpt-v1-7-v1/special_tokens_map.json
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: cp /dev/shm/model_cache/tokenizer_config.json s3://guanaco-mkml-models/stark2000s-ult-du-gpt-v1-7-v1/tokenizer_config.json
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: cp /dev/shm/model_cache/tokenizer.json s3://guanaco-mkml-models/stark2000s-ult-du-gpt-v1-7-v1/tokenizer.json
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: cp /dev/shm/model_cache/flywheel_model.0.safetensors s3://guanaco-mkml-models/stark2000s-ult-du-gpt-v1-7-v1/flywheel_model.0.safetensors
stark2000s-ult-du-gpt-v1-7-v1-mkmlizer: Loading 0: 0%| | 0/291 [00:00<?, ?it/s] Loading 0: 2%|▏ | 7/291 [00:00<00:05, 52.77it/s] Loading 0: 8%|▊ | 22/291 [00:00<00:03, 87.48it/s] Loading 0: 11%|█ | 31/291 [00:00<00:02, 86.98it/s] Loading 0: 14%|█▎ | 40/291 [00:00<00:02, 87.94it/s] Loading 0: 18%|█▊ | 52/291 [00:00<00:02, 86.50it/s] Loading 0: 21%|██▏ | 62/291 [00:00<00:02, 89.82it/s] Loading 0: 26%|██▌ | 76/291 [00:00<00:02, 94.75it/s] Loading 0: 30%|██▉ | 86/291 [00:01<00:08, 25.14it/s] Loading 0: 32%|███▏ | 94/291 [00:02<00:06, 30.23it/s] Loading 0: 36%|███▋ | 106/291 [00:02<00:04, 38.85it/s] Loading 0: 42%|████▏ | 121/291 [00:02<00:03, 51.39it/s] Loading 0: 45%|████▌ | 131/291 [00:02<00:02, 58.95it/s] Loading 0: 48%|████▊ | 140/291 [00:02<00:02, 64.26it/s] Loading 0: 52%|█████▏ | 151/291 [00:02<00:02, 67.83it/s] Loading 0: 57%|█████▋ | 166/291 [00:02<00:01, 78.27it/s] Loading 0: 60%|██████ | 176/291 [00:02<00:01, 81.80it/s] Loading 0: 64%|██████▍ | 187/291 [00:04<00:04, 25.53it/s] Loading 0: 67%|██████▋ | 196/291 [00:04<00:03, 31.19it/s] Loading 0: 70%|███████ | 205/291 [00:04<00:02, 37.36it/s] Loading 0: 76%|███████▌ | 220/291 [00:04<00:01, 49.98it/s] Loading 0: 80%|███████▉ | 232/291 [00:04<00:01, 57.28it/s] Loading 0: 83%|████████▎ | 241/291 [00:04<00:00, 62.75it/s] Loading 0: 86%|████████▌ | 250/291 [00:04<00:00, 67.88it/s] Loading 0: 91%|█████████ | 265/291 [00:04<00:00, 78.62it/s] Loading 0: 95%|█████████▌| 277/291 [00:05<00:00, 79.79it/s] Loading 0: 98%|█████████▊| 286/291 [00:05<00:00, 81.25it/s]
Job stark2000s-ult-du-gpt-v1-7-v1-mkmlizer completed after 75.79s with status: succeeded
Stopping job with name stark2000s-ult-du-gpt-v1-7-v1-mkmlizer
Pipeline stage MKMLizer completed in 77.17s
run pipeline stage %s
Running pipeline stage MKMLTemplater
Pipeline stage MKMLTemplater completed in 0.08s
run pipeline stage %s
Running pipeline stage MKMLDeployer
Creating inference service stark2000s-ult-du-gpt-v1-7-v1
Waiting for inference service stark2000s-ult-du-gpt-v1-7-v1 to be ready
Connection pool is full, discarding connection: %s. Connection pool size: %s
Connection pool is full, discarding connection: %s. Connection pool size: %s
Inference service stark2000s-ult-du-gpt-v1-7-v1 ready after 151.33684992790222s
Pipeline stage MKMLDeployer completed in 151.77s
run pipeline stage %s
Running pipeline stage StressChecker
Received healthy response to inference request in 2.185231924057007s
Received healthy response to inference request in 1.7156200408935547s
Received healthy response to inference request in 1.908773422241211s
Received healthy response to inference request in 1.3985576629638672s
Received healthy response to inference request in 1.5444238185882568s
5 requests
0 failed requests
5th percentile: 1.4277308940887452
10th percentile: 1.456904125213623
20th percentile: 1.5152505874633788
30th percentile: 1.5786630630493164
40th percentile: 1.6471415519714356
50th percentile: 1.7156200408935547
60th percentile: 1.792881393432617
70th percentile: 1.8701427459716797
80th percentile: 1.9640651226043702
90th percentile: 2.0746485233306884
95th percentile: 2.1299402236938474
99th percentile: 2.1741735839843748
mean time: 1.7505213737487793
Pipeline stage StressChecker completed in 9.71s
run pipeline stage %s
Running pipeline stage TriggerMKMLProfilingPipeline
run_pipeline:run_in_cloud %s
starting trigger_guanaco_pipeline args=%s
Pipeline stage TriggerMKMLProfilingPipeline completed in 5.63s
Shutdown handler de-registered
stark2000s-ult-du-gpt-v1-7_v1 status is now deployed due to DeploymentManager action
Shutdown handler registered
run pipeline %s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Skipping teardown as no inference service was successfully deployed
Pipeline stage MKMLProfilerDeleter completed in 0.12s
run pipeline stage %s
Running pipeline stage MKMLProfilerTemplater
Pipeline stage MKMLProfilerTemplater completed in 0.10s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeployer
Creating inference service stark2000s-ult-du-gpt-v1-7-v1-profiler
Waiting for inference service stark2000s-ult-du-gpt-v1-7-v1-profiler to be ready
Inference service stark2000s-ult-du-gpt-v1-7-v1-profiler ready after 150.34919953346252s
Pipeline stage MKMLProfilerDeployer completed in 150.69s
run pipeline stage %s
Running pipeline stage MKMLProfilerRunner
kubectl cp /code/guanaco/guanaco_inference_services/src/inference_scripts tenant-chaiml-guanaco/stark2000s-ult-du-gp57e3d694b27027fe6201b3a8c0f812a8-deplocsmbx:/code/chaiverse_profiler_1725872711 --namespace tenant-chaiml-guanaco
kubectl exec -it stark2000s-ult-du-gp57e3d694b27027fe6201b3a8c0f812a8-deplocsmbx --namespace tenant-chaiml-guanaco -- sh -c 'cd /code/chaiverse_profiler_1725872711 && python profiles.py profile --best_of_n 16 --auto_batch 5 --batches 1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195 --samples 200 --input_tokens 512 --output_tokens 64 --summary /code/chaiverse_profiler_1725872711/summary.json'
kubectl exec -it stark2000s-ult-du-gp57e3d694b27027fe6201b3a8c0f812a8-deplocsmbx --namespace tenant-chaiml-guanaco -- bash -c 'cat /code/chaiverse_profiler_1725872711/summary.json'
Pipeline stage MKMLProfilerRunner completed in 838.43s
run pipeline stage %s
Running pipeline stage MKMLProfilerDeleter
Checking if service stark2000s-ult-du-gpt-v1-7-v1-profiler is running
Tearing down inference service stark2000s-ult-du-gpt-v1-7-v1-profiler
Service stark2000s-ult-du-gpt-v1-7-v1-profiler has been torndown
Pipeline stage MKMLProfilerDeleter completed in 1.52s
Shutdown handler de-registered
stark2000s-ult-du-gpt-v1-7_v1 status is now inactive due to auto deactivation removed underperforming models