-
-
Notifications
You must be signed in to change notification settings - Fork 197
Expand file tree
/
Copy pathharbor.sh
More file actions
executable file
·5888 lines (5300 loc) · 167 KB
/
harbor.sh
File metadata and controls
executable file
·5888 lines (5300 loc) · 167 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
set -eo pipefail
# ========================================================================
# == Functions
# ========================================================================
show_version() {
echo "Harbor CLI version: $version"
}
show_help() {
show_version
echo "Usage: $0 <command> [options]"
echo
echo "Compose Setup Commands:"
echo " up|u|start|s [handle(s)] - Start the service(s)"
echo " up --tail - Start and tail the logs"
echo " up --open - Start and open in the browser"
echo " up --no-defaults - Do not include default services"
echo " down|d - Stop and remove the containers"
echo " restart|r [handle] - Down then up"
echo " ps - List the running containers"
echo " logs|l <handle> - View the logs of the containers"
echo " exec <handle> [command] - Execute a command in a running service"
echo " pull <handle> - Pull the latest images or models"
echo " pull <service> - Pull Docker images for a service"
echo " pull <model> - Pull Ollama model or llama.cpp HF model"
echo " dive <handle> - Run the Dive CLI to inspect Docker images"
echo " run <alias> - Run a command defined as an alias"
echo " run <handle> [command] - Run a one-off command in a service container"
echo " shell <handle> - Load shell in the given service main container"
echo " build <handle> - Build the given service"
echo " stats - Show resource usage statistics"
echo " attach <handle> - Attach to a running service container"
echo " cmd <handle> - Print the docker compose command"
echo
echo "Setup Management Commands:"
echo " webui - Configure Open WebUI Service"
echo " llamacpp - Configure llamacpp service"
echo " tgi - Configure text-generation-inference service"
echo " litellm - Configure LiteLLM service"
echo " langflow - Configure Langflow UI Service"
echo " openai - Configure OpenAI API keys and URLs"
echo " vllm - Configure VLLM service"
echo " aphrodite - Configure Aphrodite service"
echo " tabbyapi - Configure TabbyAPI service"
echo " mistralrs - Configure mistral.rs service"
echo " cfd - Run cloudflared CLI"
echo " airllm - Configure AirLLM service"
echo " txtai - Configure txtai service"
echo " chatui - Configure HuggingFace ChatUI service"
echo " comfyui - Configure ComfyUI service"
echo " parler - Configure Parler service"
echo " sglang - Configure SGLang CLI"
echo " omnichain - Work with Omnichain service"
echo " jupyter - Configure Jupyter service"
echo " ol1 - Configure ol1 service"
echo " ktransformers - Configure ktransformers service"
echo " kobold - Configure Koboldcpp service"
echo " morphic - Configure Morphic service"
echo " modularmax - Configure Modular MAX service"
echo " boost - Configure Harbor Boost service"
echo " hermes - Configure Hermes Agent service"
echo " stt - Configure Speech-to-Text service"
echo " speaches - Configure Speaches service"
echo " webtop - Configure Webtop service"
echo " mcp - Configure MCP service"
echo " oterm - Configure oterm service"
echo
echo "Service CLIs:"
echo " ollama - Run Ollama CLI (docker). Service should be running."
echo " aider - Launch Aider CLI"
echo " aichat - Run aichat CLI"
echo " interpreter|opint - Launch Open Interpreter CLI"
echo " fabric - Run Fabric CLI"
echo " plandex - Launch Plandex CLI"
echo " cmdh - Run cmdh CLI"
echo " parllama - Launch Parllama - TUI for chatting with Ollama models"
echo " bench - Run and manage Harbor Bench"
echo " lmeval|lm_eval - Run LM Evaluation Harness"
echo " openhands|oh - Run OpenHands service"
echo " repopack - Run the Repopack CLI"
echo " nexa - Run the Nexa CLI, configure the service"
echo " gptme - Run gptme CLI, configure the service"
echo " nanobot - Run nanobot CLI"
echo " promptfoo|pf - Run promptfoo CLI for LLM testing and evaluation"
echo " hf - Run the Harbor's Hugging Face CLI. Expanded with a few additional commands."
echo " hf dl - HuggingFaceModelDownloader CLI"
echo " hf parse-url - Parse file URL from Hugging Face"
echo " hf token - Get/set the Hugging Face Hub token"
echo " hf cache - Get/set the path to Hugging Face cache"
echo " hf find <query> - Open HF Hub with a query (trending by default)"
echo " hf path <spec> - Print a folder in HF cache for a given model spec"
echo " hf * - Anything else is passed to the official Hugging Face CLI"
echo " models - Manage models across Ollama and HuggingFace caches"
echo " k6 - Run K6 CLI"
echo
echo "Harbor CLI Commands:"
echo " open <handle> - Open a service in the default browser"
echo
echo " url <handle> - Get the URL for a service"
echo " url <handle> - Url on the local host"
echo " url [-a|--addressable|--lan] <handle> - (supposed) LAN URL"
echo " url [-i|--internal] <handle> - URL within Harbor's docker network"
echo
echo " qr <handle> - Print a QR code for a service"
echo
echo " t|tunnel <handle> - Expose given service to the internet"
echo " tunnel down|stop|d|s - Stop all running tunnels (including auto)"
echo " tunnels [ls|rm|add] - Manage services that will be tunneled on 'up'"
echo " tunnels rm <handle|index> - Remove, also accepts handle or index"
echo " tunnels add <handle> - Add a service to the tunnel list"
echo
echo " config [get|set|ls] - Manage the Harbor environment configuration"
echo " config ls - All config values in ENV format"
echo " config get <field> - Get a specific config value"
echo " config set <field> <value> - Set a specific config value"
echo " config reset - Reset Harbor configuration to default .env"
echo " config update - Merge upstream config changes from default .env"
echo " config search <query> - Search config keys and values"
echo
echo " env <service> [key] [value] - Manage override.env variables for a service"
echo " env <service> - List all variables for a service"
echo " env <service> <key> - Get a specific variable for a service"
echo " env <service> <key> <value> - Set a specific variable for a service"
echo " env <service> get <key> - Get a specific variable (explicit form)"
echo " env <service> unset <key> - Remove a specific variable for a service"
echo
echo " profile|profiles|p [ls|rm|add] - Manage Harbor profiles"
echo " profile ls|list - List all profiles"
echo " profile rm|remove <name> - Remove a profile"
echo " profile add|save <name> - Add current config as a profile"
echo " profile set|use|load <name> - Use a profile"
echo
echo " alias|aliases|a [ls|get|set|rm] - Manage Harbor aliases"
echo " alias ls|list - List all aliases"
echo " alias get <name> - Get an alias"
echo " alias set <name> <command> - Set an alias"
echo " alias rm|remove <name> - Remove an alias"
echo
echo " history|h [ls|rm|add] - Harbor command history."
echo " When run without arguments, launches interactive selector."
echo " history clear - Clear the history"
echo " history size - Get/set the history size"
echo " history list|ls - List recorded history"
echo
echo " defaults [ls|rm|add] - List default services"
echo " defaults rm <handle|index> - Remove, also accepts handle or index"
echo " defaults add <handle> - Add"
echo
echo " find <file> - Find a file in the caches visible to Harbor"
echo " ls|list [--active|-a] - List available/active Harbor services"
echo " ln|link [--short] - Create a symlink to the CLI, --short for 'h' link"
echo " unlink - Remove CLI symlinks"
echo " eject - Eject the Compose configuration, accepts same options as 'up'"
echo " help|--help|-h - Show this help message"
echo " version|--version|-v - Show the CLI version"
echo " gum - Run the Gum terminal commands"
echo " update [-l|--latest] - Update Harbor. --latest for the dev version"
echo " info - Show system information for debug/issues"
echo " doctor - Tiny troubleshooting script"
echo " how - Ask questions about Harbor CLI, uses cmdh under the hood"
echo " smi - Show NVIDIA GPU information"
echo " top - Run nvtop to monitor GPU usage"
echo " size - Print the size of caches Harbor is aware of"
echo " eval - Run promptfoo evaluation"
echo " routine - Run internal Harbor routines"
echo " dev <script> - Run Harbor development scripts"
echo " tools - Run Harbor development tools"
echo
echo "Harbor Workspace Commands:"
echo " home - Show path to the Harbor workspace"
echo " vscode - Open Harbor Workspace in VS Code"
echo " fixfs - Fix file system ACLs for service volumes"
}
run_harbor_doctor() {
log_info "Running Harbor Doctor..."
has_errors=false
# Check if Docker is installed
if command -v docker &>/dev/null; then
log_info "${ok} Docker is installed"
# Check if Docker can be called without sudo
local docker_access_output
docker_access_output=$(docker info 2>&1)
if [ $? -eq 0 ]; then
log_info "${ok} Docker can be called without sudo"
log_info "${ok} Docker daemon is running"
else
if echo "$docker_access_output" | grep -qi "permission denied\|got permission denied while trying to connect to the docker daemon socket"; then
log_error "${nok} Docker requires sudo for this user. Add your user to the 'docker' group and re-login."
else
log_error "${nok} Docker daemon is not running or not reachable. Please start Docker."
fi
has_errors=true
fi
else
log_error "${nok} Docker is not installed. Please install Docker."
has_errors=true
fi
# Check if Docker Compose (v2) is installed
if command -v docker &>/dev/null && docker compose version &>/dev/null; then
log_info "${ok} Docker Compose (v2) is installed"
else
log_error "${nok} Docker Compose (v2) is not installed. Please install Docker Compose (v2)."
has_errors=true
fi
if ! has_modern_compose; then
log_error "${nok} Docker Compose version is older than $desired_compose_major.$desired_compose_minor.$desired_compose_patch. Please update Docker Compose (v2)."
has_errors=true
else
log_info "${ok} Docker Compose (v2) version is newer than $desired_compose_major.$desired_compose_minor.$desired_compose_patch"
fi
# Check if the Harbor workspace directory exists
if [ -d "$harbor_home" ]; then
log_info "${ok} Harbor home: $harbor_home"
else
log_error "${nok} Harbor home does not exist or is not reachable."
has_errors=true
fi
# Check if the default profile file exists and is readable
if [ -f $default_profile ] && [ -r $default_profile ]; then
log_info "${ok} Default profile exists and is readable"
else
log_error "${nok} Default profile is missing or not readable. Please ensure it exists and has the correct permissions."
has_errors=true
fi
# Check if the .env file exists and is readable
if [ -f ".env" ] && [ -r ".env" ]; then
log_info "${ok} Current profile (.env) exists and is readable"
else
log_error "${nok} Current profile (.env) is missing or not readable. Please ensure it exists and has the correct permissions."
has_errors=true
fi
# Check if CLI is linked
if [ -L "$(eval echo "$(env_manager get cli.path)")/$(env_manager get cli.name)" ]; then
log_info "${ok} CLI is linked"
else
log_error "${nok} CLI is not linked. Run 'harbor link' to create a symlink."
has_errors=true
fi
if has_nvidia; then
log_info "${ok} NVIDIA GPU is available"
else
log_warn "${nok} NVIDIA GPU is not available. NVIDIA GPU support may not work."
fi
# Check if nvidia-container-toolkit is installed
if has_nvidia_ctk; then
log_info "${ok} NVIDIA Container Toolkit is installed"
else
log_warn "${nok} NVIDIA Container Toolkit is not installed. NVIDIA GPU support may not work."
fi
# Check if rocm is installed
if has_rocm; then
log_info "${ok} ROCm is installed"
else
log_warn "${nok} ROCm in not installed. AMD GPU support may not work."
fi
if $has_errors; then
log_error "Harbor Doctor checks failed. Please resolve the issues above."
return 1
else
log_info "Harbor Doctor checks completed successfully."
return 0
fi
}
has_nvidia() {
command -v nvidia-smi &>/dev/null
}
has_nvidia_ctk() {
command -v nvidia-container-toolkit &>/dev/null
}
has_nvidia_cdi() {
# Check if nvidia.yaml is present in either
# /etc/cdi or /var/run/cdi
if [ -f /etc/cdi/nvidia.yaml ] || [ -f /var/run/cdi/nvidia.yaml ] || [ -f /var/run/cdi/nvidia-container-toolkit.json ]; then
return 0
else
return 1
fi
}
has_rocm() {
# 1. Hardware/Kernel check
[[ -e "/dev/kfd" ]] || return 1
# 2. Docker configuration check
# Check if 'amd' is listed in docker runtimes
if docker info 2>/dev/null | grep -i "runtimes" | grep -q "amd"; then
return 0
fi
return 1
}
has_modern_compose() {
local compose_version_raw
compose_version_raw=$(docker compose version --short 2>/dev/null | sed -e 's/-desktop//')
if [ -z "$compose_version_raw" ]; then
log_debug "Could not detect Docker Compose version"
return 1
fi
local compose_version=${compose_version_raw#v}
if [ "$compose_version" = "dev" ]; then
log_debug "Docker Compose reports version 'dev'; assuming it is modern"
return 0
fi
local major_version minor_version patch_version
if [[ "$compose_version" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+) ]]; then
major_version=${BASH_REMATCH[1]}
minor_version=${BASH_REMATCH[2]}
patch_version=${BASH_REMATCH[3]}
else
log_debug "Unrecognized Docker Compose version '$compose_version_raw'; skipping numeric comparison"
return 0
fi
if ((major_version > desired_compose_major)); then
return 0
elif ((major_version < desired_compose_major)); then
log_debug "Major version is less than $desired_compose_major"
return 1
fi
if ((minor_version > desired_compose_minor)); then
return 0
elif ((minor_version < desired_compose_minor)); then
log_debug "Minor version is less than $desired_compose_minor"
return 1
fi
if ((patch_version < desired_compose_patch)); then
log_debug "Patch version is less than $desired_compose_patch"
return 1
fi
return 0
}
# shellcheck disable=SC2034
__anchor_fns=true
resolve_compose_files() {
# Find all .yml files in the services directory,
# but do not go into subdirectories
find "$harbor_home/services" -maxdepth 1 -name "*.yml" |
# For each file, count the number of dots in the filename
# and prepend this count to the filename
awk -F. '{print NF-1, $0}' |
# Sort the files based on the
# number of dots, in ascending order
sort -n |
# Remove the dot count, leaving
# just the sorted filenames
cut -d' ' -f2-
}
run_routine() {
local routine_name="$1"
if [ -z "$routine_name" ]; then
log_error "run_routine requires a routine name"
return 1
fi
if [[ "$routine_name" == *.* ]]; then
local routine_path="$harbor_home/routines/$routine_name"
else
local routine_path="$harbor_home/routines/$routine_name.ts"
fi
if [ ! -f $routine_path ]; then
log_error "Routine '$routine_name' not identified"
return 1
fi
shift
log_debug "Running routine: $routine_name"
docker run --rm \
-v "$harbor_home:$harbor_home" \
-v harbor-deno-cache:/deno-dir:rw \
-w "$harbor_home" \
-e "HARBOR_LOG_LEVEL=$default_log_level" \
-e "HARBOR_COMPOSE_CACHE=$HARBOR_COMPOSE_CACHE" \
$default_routine_runtime \
$routine_path "$@"
}
routine_compose_with_options() {
local options=()
if [ "$default_auto_capabilities" = "true" ]; then
if has_nvidia && has_nvidia_ctk; then
options+=("nvidia")
elif has_nvidia_cdi; then
options+=("cdi")
fi
if has_rocm; then
options+=("rocm")
fi
if has_modern_compose; then
options+=("mdc")
fi
fi
run_routine mergeComposeFiles "$@" "${options[@]}"
}
compose_with_options() {
if [[ $default_legacy_cli == 'false' ]]; then
routine_compose_with_options "$@"
return
fi
local base_dir="$harbor_home"
local compose_files=("$base_dir/compose.yml") # Always include the base compose file
local options=("${default_options[@]}" "${default_capabilities[@]}")
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--dir=*)
base_dir="${1#*=}"
shift
;;
--no-defaults)
options=()
shift
;;
*)
options+=("$1")
shift
;;
esac
done
if [ "$default_auto_capabilities" = "true" ]; then
if has_nvidia && has_nvidia_ctk; then
options+=("nvidia")
elif has_nvidia_cdi; then
options+=("cdi")
fi
if has_rocm; then
options+=("rocm")
fi
if has_modern_compose; then
options+=("mdc")
fi
fi
for file in $(resolve_compose_files); do
if [ -f "$file" ]; then
local filename=$(basename "$file")
local match=false
# This is a "cross" file, only to be included
# if we're running all the mentioned services
if [[ $filename == *".x."* ]]; then
local cross="${filename#compose.x.}"
cross="${cross%.yml}"
# Convert dot notation to array
local filename_parts=(${cross//./ })
local all_matched=true
for part in "${filename_parts[@]}"; do
# Skip capability files for wildcard match
if is_capability "$part"; then
# Capabilities must match exactly, no wildcards
if [[ ! " ${options[*]} " =~ " ${part} " ]]; then
all_matched=false
break
fi
else
if [[ ! " ${options[*]} " =~ " ${part} " ]] && [[ ! " ${options[*]} " =~ " * " ]]; then
all_matched=false
break
fi
fi
done
if $all_matched; then
compose_files+=("$file")
fi
# Either way, the processing
# for this file is done
continue
fi
# Check if file matches any of the options
for option in "${options[@]}"; do
if [[ $option == "*" ]]; then
# Capabilities should not be matched by "*", otherwise
# we'll run "nvidia" or "mdc" or "cdi" when we don't want to
if ! is_capability_file "$filename"; then
match=true
fi
break
fi
if [[ $filename == *".$option."* ]]; then
match=true
break
fi
done
if $match; then
compose_files+=("$file")
fi
fi
done
# Prepare docker compose command
local cmd="docker compose"
for file in "${compose_files[@]}"; do
cmd+=" -f $file"
done
# Log amount of matched files
log_debug "Matched compose files: ${#compose_files[@]}"
# Return the command string
echo "$cmd"
}
is_capability() {
local capability="$1"
local capabilities=("nvidia" "mdc" "cdi" "rocm" "build" "${default_capabilities[@]}")
for cap in "${capabilities[@]}"; do
if [ "$cap" = "$capability" ]; then
return 0
fi
done
return 1
}
is_capability_file() {
local filename="$1"
local capabilities=("nvidia" "mdc" "cdi" "rocm" "build" "${default_capabilities[@]}")
for cap in "${capabilities[@]}"; do
if [[ $filename == *".$cap."* ]]; then
return 0
fi
done
return 1
}
service_compose_exists() {
local service="$1"
local services_dir="${2:-$harbor_home/services}"
if [ -f "$services_dir/compose.$service.yml" ] || [ -f "$services_dir/compose.$service.ts" ]; then
return 0
fi
if compgen -G "$services_dir/compose.$service.*.yml" >/dev/null || compgen -G "$services_dir/compose.$service.*.ts" >/dev/null; then
return 0
fi
return 1
}
resolve_compose_command() {
local is_human=false
case "$1" in
--human | -h)
shift
is_human=true
;;
esac
for arg in "$@"; do
if [[ "$arg" == --* ]]; then
continue
fi
if ! is_capability "$arg" && ! service_compose_exists "$arg"; then
log_error "Service '$arg' not found."
return 1
fi
done
local cmd=$(compose_with_options --no-merge "$@")
if $is_human; then
echo "$cmd" | sed "s|-f $harbor_home/|\n - |g"
else
echo "$cmd"
fi
}
run_up() {
local should_tail=false
local should_open=false
local should_attach=false
local no_defaults=false
local filtered_args=()
local up_args=()
for arg in "$@"; do
case "$arg" in
--no-defaults)
no_defaults=true
up_args+=("$arg")
;;
--open | -o)
should_open=true
;;
--tail | -t)
should_tail=true
;;
--attach | -a)
should_attach=true
;;
*)
filtered_args+=("$arg")
;;
esac
done
local display_services=("${filtered_args[@]}")
if [ ${#display_services[@]} -eq 0 ] && ! $no_defaults; then
display_services=("${default_options[@]}")
fi
# Verify that requested services exist
for service in "${filtered_args[@]}"; do
if is_capability "$service"; then
continue
fi
if ! service_compose_exists "$service"; then
log_error "Service '$service' not found."
return 1
fi
done
if [ ${#display_services[@]} -gt 0 ]; then
log_info "Starting services: ${display_services[*]}"
else
log_info "Starting services..."
fi
log_debug "Running 'up' for services: ${up_args[@]} ${filtered_args[@]}"
$(compose_with_options "${up_args[@]}" "${filtered_args[@]}") up -d --wait
local up_exit=$?
if [ $up_exit -ne 0 ]; then
log_error "Failed to start services (exit code: $up_exit)"
return $up_exit
fi
for service in "${display_services[@]}"; do
local url
if url=$(get_service_url "$service" 2>/dev/null); then
log_info " ${c_g}${service}${c_nc} - $url"
else
log_info " ${c_g}${service}${c_nc}"
fi
done
if [ "$default_autoopen" = "true" ]; then
run_open "$default_open"
fi
for service in "${default_tunnels[@]}"; do
establish_tunnel "$service"
done
if $should_attach; then
run_attach "$filtered_args"
return
fi
if $should_tail; then
run_logs "$filtered_args"
fi
if $should_open; then
run_open "$filtered_args"
fi
}
run_down() {
local services=$(get_active_services)
local matched_services=()
local compose_targets=("$@")
log_debug "Active services: $services"
services=$(echo "$services" | tr ' ' '\n')
for service in "$@"; do
log_debug "Checking if service '$service' is in active services list..."
matched_service=$(echo "$services" | grep "^$service-")
if [ -n "$matched_service" ]; then
matched_services+=("$matched_service")
fi
done
log_debug "Matched: ${matched_services[*]}"
if [ $# -eq 0 ]; then
log_info "Stopping all services..."
compose_targets=("*")
else
log_info "Stopping services: $*"
fi
matched_services_str=$(printf " %s" "${matched_services[@]}")
$(compose_with_options "${compose_targets[@]}") down --remove-orphans --timeout 10 "$@" $matched_services_str
local down_exit=$?
if [ $down_exit -eq 0 ]; then
log_info "Services stopped."
else
log_error "Failed to stop services (exit code: $down_exit)"
return $down_exit
fi
}
run_restart() {
local active_services=$(get_active_services)
if [ -z "$active_services" ] && [ $# -eq 0 ]; then
log_error "No active services to restart."
return 0
fi
local services=()
local flags=()
for arg in "$@"; do
if [[ "$arg" == -* ]]; then
flags+=("$arg")
else
services+=("$arg")
fi
done
local unique_services=()
local all_services=($active_services "${services[@]}")
for s in "${all_services[@]}"; do
local is_seen=0
for u in "${unique_services[@]}"; do
if [[ "$s" == "$u" ]]; then
is_seen=1
break
fi
done
if (( ! is_seen )); then
unique_services+=("$s")
fi
done
run_down "${services[@]}"
run_up "${unique_services[@]}" "${flags[@]}"
}
run_ps() {
local compose_targets=("$@")
if [ $# -eq 0 ]; then
compose_targets=("*")
fi
$(compose_with_options "${compose_targets[@]}") ps "$@"
}
run_build() {
service=$1
shift
if [ -z "$service" ]; then
log_error "Usage: harbor build <service>"
return 1
fi
local services=$(get_services --silent)
log_debug "Checking if service '$service' has subservices..."
matched_service=$(echo "$services" | grep "^$service-")
if [ -n "$matched_service" ]; then
log_debug "Matched service: $matched_service"
matched_services+=("$matched_service")
fi
matched_services_str=$(printf " %s" "${matched_services[@]}")
log_debug "Building" "$service" "$@" $matched_services_str
$(compose_with_options "*") build "$service" "$@" $matched_services_str
}
run_shell() {
service=$1
shift
if [ -z "$service" ]; then
log_error "Usage: harbor shell <service>"
exit 1
fi
local shell="bash"
if [ -n "$1" ]; then
shell="$1"
fi
$(compose_with_options "*") run -it --entrypoint "$shell" "$service"
}
run_logs() {
$(compose_with_options "*") logs -n 20 -f "$@"
}
run_pull() {
available_services=$(get_services --silent)
for service in "$@"; do
if echo "$available_services" | grep -q "^$service$"; then
log_info "Pulling service $service"
else
# Probe HF repo existence (200 OK) to determine if it's a Llama.cpp target
# Strip tag if present for the check
local repo="${service%:*}"
# Must look like org/repo (at least one slash) to be a candidate
if [[ "$service" == *"/"* ]] && curl --output /dev/null --silent --head --fail --connect-timeout 5 "https://huggingface.co/$repo"; then
run_llamacpp_pull "$service"
return 0
fi
run_ollama_command pull "$service"
return 0
fi
done
$(compose_with_options "$@") pull
}
run_llamacpp_pull() {
local model="$1"
log_info "Detected Llama.cpp target: $model"
log_info "Starting ephemeral llama-server to pull model to cache..."
local safe_model_name=$(echo "$model" | sed 's/[^a-zA-Z0-9._-]/-/g')
local c_log="/tmp/pull-${safe_model_name}.log"
# Embed simple logger to match Harbor's CLI style inside the container
# Using printf for better portability and avoiding echo -e issues
local script_logger="
log_info() {
printf \"\\033[90m%s\\033[0m [INFO] %s\\n\" \"\$(date +'%H:%M:%S')\" \"\$*\"
}
log_success() {
printf \"\\033[90m%s\\033[0m [INFO] \\033[32m✔\\033[0m %s\\n\" \"\$(date +'%H:%M:%S')\" \"\$*\"
}
log_error() {
printf \"\\033[90m%s\\033[0m [ERROR] \\033[31m✘\\033[0m %s\\n\" \"\$(date +'%H:%M:%S')\" \"\$*\"
}
"
local cmd="
$script_logger
touch \"$c_log\"
tail -f \"$c_log\" &
TAIL_PID=\$!
log_info 'Starting download process for $model...' >> \"$c_log\"
/app/llama-server -hf '$model' --port 8080 --host 0.0.0.0 --n-gpu-layers 0 -c 128 >> \"$c_log\" 2>&1 &
SRV_PID=\$!
while true; do
if grep -q 'using cached file' \"$c_log\"; then
log_success 'Model is already cached.'
kill \$SRV_PID 2>/dev/null
kill \$TAIL_PID 2>/dev/null
exit 0
fi
# 'loading model' indicates download finished
if grep -q 'main: loading model' \"$c_log\" || grep -q 'load_model: loading model' \"$c_log\"; then
log_success 'Download completed successfully.'
kill \$SRV_PID 2>/dev/null
kill \$TAIL_PID 2>/dev/null
exit 0
fi
# Fallback success
if grep -q 'HTTP server listening' \"$c_log\"; then
log_success 'Server ready (Download success).'
kill \$SRV_PID 2>/dev/null
kill \$TAIL_PID 2>/dev/null
exit 0
fi
if ! kill -0 \$SRV_PID 2>/dev/null; then
log_error 'Download process exited prematurely. Check logs above.'
kill \$TAIL_PID 2>/dev/null
exit 1
fi
sleep 0.5
done
"
$(compose_with_options "llamacpp") run \
--rm \
--entrypoint /bin/sh \
llamacpp \
-c "$cmd"
}
run_run() {
service=$1
shift
# Check if it is an alias first
local maybe_cmd=$(env_manager_dict aliases --silent get "$service")
if [ -n "$maybe_cmd" ]; then
log_info "Running alias $service -> \"$maybe_cmd\""
eval "$maybe_cmd"
return 0
fi
log_debug "'harbor run': no alias found for $service, running as service"
local services=$(get_active_services)
local tty_opt=""
if [ ! -t 0 ] || [ ! -t 1 ]; then
tty_opt="-T"
fi
$(compose_with_options $services "$service") run $tty_opt --rm "$service" "$@"
}
run_stats() {
if [ ! -t 1 ]; then
$(compose_with_options "*") stats --no-stream "$@"
else
$(compose_with_options "*") stats "$@"
fi
}
run_attach() {
local service_name=$1
if [ -z "$service_name" ]; then
log_error "Usage: harbor attach <service>"
return 1
fi
local container_name=$(get_container_name "$service_name")
if docker ps --filter "name=$container_name" | grep -q "$container_name"; then
log_info "Attaching to container $container_name..."
docker attach "$container_name"
else
log_error "Container $container_name is not running."
return 1
fi
}
run_hf_open() {
local search_term="${*// /+}"
local hf_url="https://huggingface.co/models?sort=trending&search=${search_term}"
sys_open "$hf_url"
}