/
chatgpt.sh
executable file
·4800 lines (4430 loc) · 154 KB
/
chatgpt.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
# chatgpt.sh -- Shell Wrapper for ChatGPT/DALL-E/Whisper/TTS
# v0.58.8 may/2024 by mountaineerbr GPL+3
set -o pipefail; shopt -s extglob checkwinsize cmdhist lithist histappend;
export COLUMNS LINES; ((COLUMNS>2)) || COLUMNS=80; ((LINES>2)) || LINES=24;
# API keys
#OPENAI_API_KEY=
#GOOGLE_API_KEY=
#MISTRAL_API_KEY=
# DEFAULTS
# Text cmpls model
MOD="gpt-3.5-turbo-instruct"
# Chat cmpls model
MOD_CHAT="${MOD_CHAT:-gpt-4-turbo}" #"gpt-4o"
# Image model (generations)
MOD_IMAGE="${MOD_IMAGE:-dall-e-3}"
# Whisper model (STT)
MOD_AUDIO="${MOD_AUDIO:-whisper-1}"
# Speech model (TTS)
MOD_SPEECH="${MOD_SPEECH:-tts-1}" #"tts-1-hd"
# LocalAI model
MOD_LOCALAI="${MOD_LOCALAI:-phi-2}"
# Ollama model
MOD_OLLAMA="${MOD_OLLAMA:-llama2}" #"llama2-uncensored:latest"
# Google AI model
MOD_GOOGLE="${MOD_GOOGLE:-gemini-1.0-pro-latest}"
# Mistral AI model
MOD_MISTRAL="${MOD_MISTRAL:-mistral-large-latest}"
# Bash readline mode
READLINEOPT="emacs" #"vi"
# Stream response
STREAM=1
# Prompter flush with <CTRL-D>
#OPTCTRD=
# Temperature
#OPTT=
# Top_p probability mass (nucleus sampling)
#OPTP=1
# Maximum response tokens
OPTMAX=1024
# Model capacity (auto)
#MODMAX=
# Presence penalty
#OPTA=
# Frequency penalty
#OPTAA=
# N responses of Best_of
#OPTB=
# Number of responses
OPTN=1
# Keep Alive (seconds, Ollama)
#OPT_KEEPALIVE=
# Set python tiktoken
#OPTTIK=
# Image size
#OPTS=1024x1024
# Image out format
OPTI_FMT=b64_json #url
# TTS voice
OPTZ_VOICE=echo #alloy, echo, fable, onyx, nova, and shimmer
# TTS voice speed
#OPTZ_SPEED= #0.25 - 4.0
# TTS out file format
OPTZ_FMT=opus #mp3, opus, aac, flac
# Recorder command, e.g. "sox -d"
#REC_CMD=""
# Media player command, e.g. "cvlc"
#PLAY_CMD=""
# Clipboard set command, e.g. "xsel -b", "pbcopy"
#CLIP_CMD=""
# Markdown renderer, e.g. "pygmentize -s -lmd", "glow", "mdless", "mdcat"
#MD_CMD="bat"
# Fold response (wrap at white spaces)
OPTFOLD=1
# Inject restart text
#RESTART=""
# Inject start text
#START=""
# Chat mode of text cmpls sets "\nQ: " and "\nA:"
# Restart/Start seqs have priority
# INSTRUCTION
# Chat completions, chat mode only
# INSTRUCTION=""
INSTRUCTION_CHAT="${INSTRUCTION_CHAT-The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.}"
# Awesome-chatgpt-prompts URL
AWEURL="https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv"
AWEURLZH="https://raw.githubusercontent.com/PlexPt/awesome-chatgpt-prompts-zh/main/prompts-zh.json" #prompts-zh-TW.json
# CACHE AND OUTPUT DIRECTORIES
CACHEDIR="${CACHEDIR:-${XDG_CACHE_HOME:-$HOME/.cache}}/chatgptsh"
OUTDIR="${OUTDIR:-${XDG_DOWNLOAD_DIR:-$HOME/Downloads}}"
# Colour palette
# Normal Colours # Bold # Background
Black='\e[0;30m' BBlack='\e[1;30m' On_Black='\e[40m' \
Red='\e[0;31m' BRed='\e[1;31m' On_Red='\e[41m' \
Green='\e[0;32m' BGreen='\e[1;32m' On_Green='\e[42m' \
Yellow='\e[0;33m' BYellow='\e[1;33m' On_Yellow='\e[43m' \
Blue='\e[0;34m' BBlue='\e[1;34m' On_Blue='\e[44m' \
Purple='\e[0;35m' BPurple='\e[1;35m' On_Purple='\e[45m' \
Cyan='\e[0;36m' BCyan='\e[1;36m' On_Cyan='\e[46m' \
White='\e[0;37m' BWhite='\e[1;37m' On_White='\e[47m' \
Inv='\e[0;7m' Nc='\e[m' Alert=$BWhite$On_Red \
Bold='\033[0;1m'
# Load user defaults
CONFFILE="${CHATGPTRC:-$HOME/.chatgpt.conf}"
[[ -f "${OPTF}${CONFFILE}" ]] && . "$CONFFILE"; OPTMM= #!#fix <=248c483-github
# Set file paths
FILE="${CACHEDIR%/}/chatgpt.json"
FILECHAT="${FILECHAT:-${CACHEDIR%/}/chatgpt.tsv}"
FILEWHISPER="${FILECHAT%/*}/whisper.json"
FILEWHISPERLOG="${OUTDIR%/*}/whisper_log.txt"
FILETXT="${CACHEDIR%/}/chatgpt.txt"
FILEOUT="${OUTDIR%/}/dalle_out.png"
FILEOUT_TTS="${OUTDIR%/}/tts.${OPTZ_FMT:=mp3}"
FILEIN="${CACHEDIR%/}/dalle_in.png"
FILEINW="${CACHEDIR%/}/whisper_in.mp3"
FILEAWE="${CACHEDIR%/}/awesome-prompts.csv"
FILEFIFO="${CACHEDIR%/}/fifo.buff"
USRLOG="${OUTDIR%/}/${FILETXT##*/}"
HISTFILE="${CACHEDIR%/}/history_bash"
HISTCONTROL=erasedups:ignoredups
HISTSIZE=512 SAVEHIST=512 HISTTIMEFORMAT='%F %T '
# API URL / endpoint
API_HOST="https://api.openai.com";
# Def hist, txt chat types
Q_TYPE="\\nQ: "
A_TYPE="\\nA:"
S_TYPE="\\n\\nSYSTEM: "
I_TYPE="[insert]"
# Globs
SPC="*([$IFS])"
SPC1="*(\\\\[ntrvf]|[$IFS])"
NL=$'\n' BS=$'\b'
UAG='user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36' #chrome on win10
PLACEHOLDER='sk-CbCCb0CC0bbbCbb0CCCbC0CbbbCC00bC00bbCbbCbbbCbb0C'
HELP="Name
${0##*/} -- Wrapper for ChatGPT / DALL-E / Whisper / TTS
Synopsis
${0##*/} [-cc|-d|-qq] [opt..] [PROMPT|TEXT_FILE]
${0##*/} -i [opt..] [X|L|P][hd] [PROMPT] #dall-e-3
${0##*/} -i [opt..] [S|M|L] [PROMPT]
${0##*/} -i [opt..] [S|M|L] [PNG_FILE]
${0##*/} -i [opt..] [S|M|L] [PNG_FILE] [MASK_FILE] [PROMPT]
${0##*/} -w [opt..] [AUDIO_FILE|.] [LANG] [PROMPT]
${0##*/} -W [opt..] [AUDIO_FILE|.] [PROMPT-EN]
${0##*/} -z [OUTFILE|FORMAT|-] [VOICE] [SPEED] [PROMPT]
${0##*/} -ccWwz [opt..] -- [whisper_arg..] -- [tts_arg..]
${0##*/} -l [MODEL]
${0##*/} -TTT [-v] [-m[MODEL|ENCODING]] [INPUT|TEXT_FILE]
${0##*/} -HHH [/HIST_FILE|.]
${0##*/} -HHw
Description
With no options set, complete INPUT in single-turn mode of
plain text completions.
Option -d starts a multi-turn session in plain text completions,
and does not set further options automatically.
Set option -c to start multi-turn chat mode via text completions
(davinci and lesser models) or -cc for native chat completions
(gpt-3.5+ models). In chat mode, some options are automatically
set to un-lobotomise the bot. Set -E to exit on response.
Option -C resumes (continues from) last history session.
Positional arguments are read as a single PROMPT. Optionally set
INTRUCTION with option -S.
In multi-turn interactions, prompts starting with a colon \`:' are
appended as user messages to the request block, while double colons
\`::' append the prompt as instruction / system without initiating
a new API request.
With vision models, insert an image to the prompt with chat command
\`!img [url|filepath]'. Image urls and files can also be appended
by typing the operator pipe and a valid input at the end of the
text prompt, such as \`| [url|filepath]'.
If the first positional argument of the script starts with the
command operator \`/', the command \`/session [HIST_NAME]' to change
to or create a new history file is assumed (with options -ccCdHH).
Option -i generates or edits images. A text prompt is required for
generations. An image file is required for variations. Edits need
an image file, a mask (or the image must have a transparent layer),
and a text prompt to direct the editing.
Size of output image may be set as the first positional parameter,
options are: \`256x256' (S), \`512x512' (M), \`1024x1024' (L),
\`1792x1024' (X), and \`1024x1792' (P). The parameter \`hd' may also
be set for quality (Dall-E-3), such as \`Xhd', or \`1792x1024hd'.
Option -w transcribes audio to any language, and option -W translates
audio to English text. Set these options twice to have phrase-level
timestamps, e.g. -ww, and -WW.
Option -z synthesises voice from text (TTS models). Set a voice as
the first positional parameter (\`alloy', \`echo', \`fable', \`onyx',
\`nova', or \`shimmer'). Set the second positional parameter as the
speed (0.25 - 4.0), and, finally the output file name or the format,
such as \`./new_audio.mp3' (\`mp3', \`opus', \`aac', and \`flac'),
or \`-' for stdout. Set options -vz to not play received output.
Option -y sets python tiktoken instead of the default script hack
to preview token count. Set this option for accurate history
context length (fast).
Input sequences \`\\n' and \`\\t' are only treated specially in
restart, start and stop sequences!
A personal OpenAI API is required, set environment or option -K.
See Also
Check the man page for extended description of interface and
settings. See the online man page and script usage examples at:
<https://gitlab.com/fenixdragao/shellchatgpt>.
Environment
BLOCK_USR
BLOCK_USR_TTS Extra options for the request JSON block
(e.g. \`\"seed\": 33, \"dimensions\": 1024').
CACHEDIR Script cache directory base.
CHATGPTRC Path to the user configuration file.
Defaults=\"${CHATGPTRC:-${CONFFILE:-"$HOME/.chatgpt.conf"}}\"
FILECHAT Path to a history / session TSV file.
INSTRUCTION Initial instruction, or system message.
INSTRUCTION_CHAT
Initial instruction, or system message (chat mode).
MOD_CHAT
MOD_IMAGE
MOD_AUDIO
MOD_SPEECH
MOD_LOCALAI
MOD_OLLAMA
MOD_MISTRAL
MOD_GOOGLEAI Set defaults model for each endpoint / integration.
OLLAMA_API_HOST Ollama host URL (with option -O).
OPENAI_API_HOST
OPENAI_API_HOST_STATIC
Custom host URL. The STATIC parameter disables
endpoint auto-selection.
OPENAI_KEY
OPENAI_API_KEY
GOOGLE_API_KEY
MISTRAL_API_KEY OpenAI, GoogleAI, and MistralAI API keys.
OUTDIR Output directory for received images and audio.
VISUAL
EDITOR Text editor for external prompt editing.
Defaults=\"${VISUAL:-${EDITOR:-vim}}\"
CLIP_CMD Clipboard set command, e.g. \`xsel -b', \`pbcopy'.
PLAY_CMD Audio player command, e.g. \`mpv --no-video --vo=null'.
REC_CMD Audio recorder command, e.g. \`sox -d'.
Chat Commands
In chat mode, commands are introduced with either \`!' or \`/' as
operators. These commands allow users to modify their interaction
parameters within the chat.
------ ---------- ---------------------------------------
--- Misc Commands ---------------------------------------------
-S :, :: [PROMPT] Append user/system prompt to request.
-S. -. [NAME] Load and edit custom prompt.
-S/ -S% [NAME] Load and edit awesome prompt (zh).
-Z !last Print last response JSON.
! !r, !regen Regenerate last response.
!! !rr Regenerate response, edit prompt first.
!i !info Info on model and session settings.
!img !media [FILE|URL] Append image/media/url to prompt.
!url - [URL] Dump URL text, optionally edit it.
!url: - [URL] Same as !url but append output as user.
!j !jump Jump to request, append response primer.
!!j !!jump Jump to request, no response priming.
!md !markdown [SOFTW] Toggle markdown support in response.
!!md !!markdown [SOFTW] Render last response in markdown.
!rep !replay Replay last TTS audio response.
!res !resubmit Resubmit last TTS recorded input.
!sh !shell [CMD] Run shell, or command, and edit output.
!sh: !shell: [CMD] Same as !sh but apppend output as user.
!!sh !!shell [CMD] Run interactive shell (w/ cmd) and exit.
--- Script Settings and UX ------------------------------------
!fold !wrap Toggle response wrapping.
-g !stream Toggle response streaming.
-l !models [NAME] List language models or model details.
-o !clip Copy responses to clipboard.
-u !multi Toggle multiline, ctrl-d flush.
-uu !!multi Multiline, one-shot, ctrl-d flush.
-U -UU Toggle cat prompter, or set one-shot.
!cat - [FILE] Cat prompter (once, ctrd-d), or cat file.
!cat: - [FILE] Same as !cat but append prompt as user.
-V !context Print context before request (see -HH).
-VV !debug Dump raw request block and confirm.
-v !ver Toggle verbose modes.
-x !ed Toggle text editor interface.
-xx !!ed Single-shot text editor.
-y !tik Toggle python tiktoken use.
!q !quit Exit. Bye.
!? !help Print this help snippet.
--- Model Settings --------------------------------------------
-Nill !Nill Toggle model max response (chat cmpls).
-M !NUM !max [NUM] Set max response tokens.
-N !modmax [NUM] Set model token capacity.
-a !pre [VAL] Set presence penalty.
-A !freq [VAL] Set frequency penalty.
-b !best [NUM] Set best-of n results.
-K !topk [NUM] Set top_k.
!ka !keep-alive [NUM] Set duration of model load in memory
-m !mod [MOD] Set model by name, or pick from list.
-n !results [NUM] Set number of results.
-p !topp [VAL] Set top_p.
-r !restart [SEQ] Set restart sequence.
-R !start [SEQ] Set start sequence.
-s !stop [SEQ] Set one stop sequence.
-t !temp [VAL] Set temperature.
-w !rec [ARGS] Toggle voice chat mode (Whisper).
-z !tts [ARGS] Toggle TTS chat mode (speech out).
!blk !block [ARGS] Set and add options to JSON request.
- !multimodal Toggle model as multimodal.
--- Session Management ----------------------------------------
-H !hist Edit raw history file in editor.
-HH !req Print session history (see -V).
-L !log [FILEPATH] Save to log file (pretty-print).
!br !new, !break Start new session (session break).
!ls !list [GLOB] List History files with name glob,
Prompts \`pr', Awesome \`awe', or all \`.'.
!grep !sub [REGEX] Search sessions and copy to tail.
!c !copy [SRC_HIST] [DEST_HIST]
Copy session from source to destination.
!f !fork [DEST_HIST]
Fork current session to destination.
!k !kill [NUM] Comment out n last entries in hist file.
!!k !!kill [[0]NUM] Dry-run of command !kill.
!s !session [HIST_FILE]
Change to, search for, or create hist file.
!!s !!session [HIST_FILE]
Same as !session, break session.
------ ---------- ---------------------------------------
E.g.: \`/temp 0.7', \`!modgpt-4', \`-p 0.2', and \`/s hist_name'.
Change chat context at run time with the \`!hist' command to edit
the raw history file (delete or comment out entries).
To preview a prompt completion, append a forward slash \`/' to it.
Regenerate it again or flush / accept the prompt and response.
After a response has been written to the history file, regenerate
it with command \`!regen' or type in a single exclamation mark or
forward slash in the new empty prompt (twice for editing the
prompt before request).
Type in a backslash \`\\' as the last character of the input line
to append a literal newline, or press <CTRL-V> + <CTRL-J>.
Press <CTRL-\\> to terminate the script.
Options
Model Settings
-@, --alpha [[VAL%]COLOUR]
Set transparent colour of image mask. Def=black.
Fuzz intensity can be set with [VAL%]. Def=0%.
-Nill
Unset model max response (chat cmpls only).
-NUM
-M, --max [NUM[-NUM]]
Set maximum number of \`response tokens'. Def=$OPTMAX.
A second number in the argument sets model capacity.
-N, --modmax [NUM]
Set \`model capacity' tokens. Def=_auto_, Fallback=4000.
-a, --presence-penalty [VAL]
Set presence penalty (cmpls/chat, -2.0 - 2.0).
-A, --frequency-penalty [VAL]
Set frequency penalty (cmpls/chat, -2.0 - 2.0).
-b, --best-of [NUM]
Set best of, must be greater than opt -n (cmpls). Def=1.
-B, --logprobs [NUM]
Request log probabilities, see -Z (cmpls, 0 - 5),
-K, --top-k [NUM]
Set Top_k value (local-ai, ollama, google).
--keep-alive, --ka=[NUM]
Set how long the model will stay loaded into memory (ollama).
-m, --model [MOD]
Set language MODEL name, or set it as \`.' to pick
from the list. Def=$MOD, $MOD_CHAT.
--multimodal
Set model as multimodal.
-n, --results [NUM]
Set number of results. Def=$OPTN.
-p, --top-p [VAL]
Set Top_p value, nucleus sampling (cmpls/chat, 0.0 - 1.0).
-r, --restart [SEQ]
Set restart sequence string (cmpls).
-R, --start [SEQ]
Set start sequence string (cmpls).
-s, --stop [SEQ]
Set stop sequences, up to 4. Def=\"<|endoftext|>\".
-S, --instruction [INSTRUCTION|FILE]
Set an instruction prompt. It may be a text file.
-t, --temperature [VAL]
Set temperature value (cmpls/chat/whisper),
(0.0 - 2.0, whisper 0.0 - 1.0). Def=${OPTT:-0}.
Script Modes
-c, --chat
Chat mode in text completions, session break.
-cc Chat mode in chat completions, session break.
-C, --continue, --resume
Continue from (resume) last session (cmpls/chat).
-d, --text
Start new multi-turn session in plain text completions.
-e, --edit
Edit first input from stdin, or file read (cmpls/chat).
-E, --exit
Exit on first run (even with -cc).
-g, --stream (defaults)
Set response streaming.
-G, --no-stream
Unset response streaming.
-i, --image [PROMPT]
Generate images given a prompt.
-i [PNG]
Create variations of a given image.
-i [PNG] [MASK] [PROMPT]
Edit image with mask, and prompt (required).
-qq, --insert
Insert text mode. Use \`[insert]' tag within the prompt.
May be set twice for multi-turn.
-S .[PROMPT_NAME][.], -.[PROMPT_NAME][.]
-S ,[PROMPT_NAME], -,[PROMPT_NAME]
Load, search for, or create custom prompt.
Set \`..[prompt]' to silently load prompt.
Set \`.?' to list prompt template files.
Set \`,[prompt]' to edit the prompt file.
-S /[AWESOME_PROMPT_NAME]
-S %[AWESOME_PROMPT_NAME_ZH]
Set or search an awesome-chatgpt-prompt(-zh).
Set \`//' or \`%%' to refresh cache. Davinci+ models.
-TTT, --tiktoken
Count input tokens with Tiktoken. Set twice to print
tokens, thrice to available encodings. Set the model
or encoding with option -m. It heeds options -ccm.
-w, --transcribe [AUD] [LANG] [PROMPT]
Transcribe audio file into text (whisper models).
LANG is optional. A prompt that matches the audio language
is optional. Set twice to get phrase-level timestamps.
-W, --translate [AUD] [PROMPT-EN]
Translate audio file into English text (whisper models).
Set twice to get phrase-level timestamps.
Script Settings
--api-key [KEY]
Set OpenAI API key.
-f, --no-conf
Ignore user configuration file.
-F Edit configuration file, if it exists.
\$CHATGPTRC="${CONFFILE/"$HOME"/"~"}".
-FF Dump template configuration file to stdout.
--fold (defaults), --no-fold
Set or unset response folding (wrap at white spaces).
--google
Set Google Gemini integration (cmpls/chat).
-h, --help
Print this help page.
-H, --hist [/HIST_FILE]
Edit history file with text editor or pipe to stdout.
A hist file name can be optionally set as argument.
-HH, -HHH [/HIST_FILE]
Pretty print last history session to stdout (set twice).
Set thrice to print commented out hist entries, too.
Heeds -ccdrR to print the specified (re-)start seqs.
-k, --no-colour
Disable colour output. Def=auto.
-l, --list-models [MOD]
List models or print details of MODEL.
-L, --log [FILEPATH]
Set log file. FILEPATH is required.
--localai
Set LocalAI integration (cmpls/chat).
--mistral
Set Mistral AI integration (chat).
--md, --markdown, --markdown=[SOFTWARE]
Enable markdown rendering in response. Software is optional:
\`bat', \`pygmentize', \`glow', \`mdcat', or \`mdless'.
--no-md, --no-markdown
Disable markdown rendering.
-o, --clipboard
Copy response to clipboard.
-O, --ollama
Set and request to Ollama server (cmpls/chat).
-u, --multi
Toggle multiline prompter, <CTRL-D> flush.
-U, --cat
Set cat prompter, <CTRL-D> flush.
-v, --verbose
Less verbose. Sleep after response in voice chat (-vvccw).
May be set multiple times.
-V Pretty-print context before request.
-VV Dump raw request block to stderr (debug).
-x, --editor
Edit prompt in text editor.
-y, --tik
Set tiktoken for token count (cmpls/chat).
-Y, --no-tik (defaults)
Unset tiktoken use (cmpls/chat).
-z, --tts [OUTFILE|FORMAT|-] [VOICE] [SPEED] [PROMPT]
Synthesise speech from text prompt, set -v to not play.
-Z, --last
Print last response JSON data."
ENDPOINTS=(
/v1/completions #0
/v1/moderations #1
/v1/edits #2 -> chat/completions
/v1/images/generations #3
/v1/images/variations #4
/v1/embeddings #5
/v1/chat/completions #6
/v1/audio/transcriptions #7
/v1/audio/translations #8
/v1/images/edits #9
/v1/audio/speech #10
/v1/models #11
)
#https://platform.openai.com/docs/{deprecations/,models/,model-index-for-researchers/}
#https://help.openai.com/en/articles/{6779149,6643408}
#set model endpoint based on its name
function set_model_epnf
{
unset OPTEMBED TKN_ADJ EPN6
((LOCALAI+OLLAMA+GOOGLEAI)) && is_visionf "$1" && set -- vision;
case "$1" in
*dalle-e*|*stable*diffusion*)
# 3 generations 4 variations 9 edits
((OPTII)) && EPN=4 || EPN=3;
((OPTII_EDITS)) && EPN=9;;
tts-*|*-tts-*) EPN=10;;
*whisper*) ((OPTWW)) && EPN=8 || EPN=7;;
code-*) case "$1" in
*search*) EPN=5 OPTEMBED=1;;
*) EPN=0;;
esac;;
text-*|*turbo-instruct*|*davinci*|*babbage*|ada|text*moderation*|*embed*|*similarity*|*search*)
case "$1" in
*embed*|*similarity*|*search*) EPN=5 OPTEMBED=1;;
text*moderation*) EPN=1 OPTEMBED=1;;
*) EPN=0;;
esac;;
gpt-4*|gpt-3.5*|gpt-*|*turbo*|*vision*)
EPN=6 EPN6=6 OPTB= OPTBB=
((OPTC)) && OPTC=2
#set token adjustment per message
case "$MOD" in
gpt-3.5-turbo-0301) ((TKN_ADJ=4+1));;
gpt-3.5-turbo*|gpt-4*|*) ((TKN_ADJ=3+1));;
esac #https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
#also: <https://tiktokenizer.vercel.app/>
;;
*) #fallback
case "$1" in
*embed*|*similarity*|*search*)
EPN=5 OPTEMBED=1;;
*)
if ((OPTZ && !(MTURN+CHAT_ENV) ))
then OPTCMPL= OPTC= EPN=10;
elif ((OPTW && !(MTURN+CHAT_ENV) ))
then OPTCMPL= OPTC= EPN=7;
elif ((OPTI && !(MTURN+CHAT_ENV) ))
then # 3 generations 4 variations 9 edits
((OPTII)) && EPN=4 || EPN=3;
((OPTII_EDITS)) && EPN=9;
elif ((OPTEMBED))
then OPTCMPL= OPTC= EPN=1;
elif ((OPTCMPL))
then OPTC= EPN=0;
elif ((OPTC>1))
then OPTCMPL= EPN=6;
elif ((OPTC))
then OPTCMPL= EPN=0;
else EPN=0; #defaults
fi;;
esac
return 1;;
esac
}
#set ``model capacity''
function model_capf
{
case "${1##ft:}" in #set model max tokens, ft: fine-tune models
text*moderation*) MODMAX=150000;;
text-davinci-002-render-sha) MODMAX=8191;;
text-embedding-ada-002|*embedding*-002|*search*-002) MODMAX=8191;;
davinci-002|babbage-002) MODMAX=16384;;
davinci|curie|babbage|ada) MODMAX=2049;;
code-davinci-00[2-9]) MODMAX=8001;;
gpt-4[a-z]*|gpt-[5-9]*|gpt-4-1106*|gpt-4-*preview*|gpt-4-vision*|gpt-4-turbo|gpt-4-turbo-202[4-9]-[0-1][0-9]-[0-3][0-9]) MODMAX=128000;;
gpt-3.5-turbo-1106) MODMAX=16385;;
gpt-4*32k*|*32k) MODMAX=32768;;
gpt-3.5*16K*|*turbo*16k*|*16k) MODMAX=16384;;
gpt-4*|*-bison*|*-unicorn) MODMAX=8192;;
*turbo*|*davinci*) MODMAX=4096;;
gemini*-1.[5-9]*|gemini*-[2-9].[0-9]*) MODMAX=128000;;
gemini*-vision*) MODMAX=16384;;
gemini*-pro*) MODMAX=32760;;
*mi[sx]tral*) MODMAX=32000;;
*embedding-gecko*) MODMAX=3072;;
*embed*|*search*) MODMAX=2046;;
aqa) MODMAX=7168;;
*) MODMAX=4000;;
esac
}
#make cmpls request
function __promptf
{
curl "$@" --fail-with-body -L "${MISTRAL_API_HOST:-$API_HOST}${ENDPOINTS[EPN]}" \
-X POST \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${MISTRAL_API_KEY:-$OPENAI_API_KEY}" \
-d "$BLOCK" \
&& { [[ \ $*\ = *\ -s\ * ]] || __clr_lineupf ;}
}
function _promptf
{
typeset chunk_n chunk str n
json_minif
if ((STREAM))
then set -- -s "$@" -S --no-buffer; [[ -s $FILE ]] &&
mv -f -- "$FILE" "${FILE%.*}.2.${FILE##*.}"; : >"$FILE" #clear buffer asap
__promptf "$@" | while IFS= read -r chunk #|| [[ -n $chunk ]]
do
chunk=${chunk##*([$' \t'])[Dd][Aa][Tt][Aa]:*([$' \t'])}
[[ $chunk = *([$IFS]) ]] && continue
[[ $chunk = *([$IFS])\[+([A-Z])\] ]] && continue
if ((!n)) #first pass, del leading spaces
then ((OPTC)) && {
str='text":'; ((GOOGLEAI)) ||
if ((EPN==0)) && ((OLLAMA))
then str='response":';
elif ((EPN==6))
then str='content":';
fi
chunk_n="${chunk/${str}*(\ )\"+(\ |\\[ntr])/$str\"}"
[[ $chunk_n = *"${str}"\"\"* ]] && continue
}; ((++n));
printf '%s\n' "${chunk_n:-$chunk}"; chunk_n= ;
else printf '%s\n' "$chunk"
fi; printf '%s\n' "$chunk" >>"$FILE"
done
else
((OPTV>1)) && set -- -s "$@"
set -- -\# "$@" -o "$FILE"
__promptf "$@"
fi
}
function promptf
{
typeset pid
if ((OPTVV)) && ((!OPTII))
then block_printf || return
fi
if ((STREAM))
then if ((RETRY>1))
then cat -- "$FILE"
else ((OPTK)) || printf "${BYELLOW}%s\\b${NC}" "X" >&2;
_promptf;
fi | prompt_printf
else
printf "${BYELLOW}%*s\\r${YELLOW}" "$COLUMNS" "X" >&2;
((RETRY>1)) || COLUMNS=$((COLUMNS-3)) _promptf;
printf "${NC}" >&2;
if ((OPTI))
then prompt_imgprintf
else prompt_printf
fi
fi & pid=$! PIDS+=($!) #catch <CTRL-C>
trap "trap 'exit' INT; kill -- $pid 2>/dev/null; echo >&2;" INT;
wait $pid; echo >&2;
trap 'exit' INT;
if ((OPTCLIP)) || [[ ! -t 1 ]]
then typeset out; out=$(
((STREAM)) && set -- -j "$@"
prompt_pf -r "$@" "$FILE"
)
((!OPTCLIP)) || (${CLIP_CMD:-false} <<<"$out" &) #clipboard
[[ -t 1 ]] || printf '%s\n' "$out" >&2 #pipe + stderr
return 0
fi
}
#print tokens from response
function response_tknf
{
jq -r '.usage.prompt_tokens//"0",
.usage.completion_tokens//"0",
(.created//empty|strflocaltime("%Y-%m-%dT%H:%M:%S%Z"))' "$@";
}
#https://community.openai.com/t/usage-stats-now-available-when-using-streaming-with-the-chat-completions-api-or-completions-api/738156
#clear impending stream (tty)
function __clr_ttystf
{
typeset REPLY n;
while IFS= read -r -n 1 -t 0.1;
do ((++n)); ((n<16384)) || break;
done </dev/tty;
}
#clear n lines up as needed (assumes one `new line').
function __clr_lineupf
{
typeset chars n
chars="${1:-1}" ;((COLUMNS))||COLUMNS=80
for ((n=0;n<((chars+(COLUMNS-1))/COLUMNS);++n))
do printf '\e[A\e[K' >&2
done
}
#https://www.zsh.org/mla/workers//1999/msg01550.html
#https://superchlorine.com/2013/08/kill-winch-to-fix-bash-prompt-wrapping-to-the-same-line/
# spin.bash -- provide a `spinning wheel' to show progress
# Copyright 1997 Chester Ramey (adapted)
SPIN_CHARS=(⣟ ⣯ ⣷ ⣾ ⣽ ⣻ ⢿ ⡿)
SPIN_CHARS=(⠏ ⠇ ⠧ ⠦ ⠴ ⠼ ⠸ ⠹ ⠙ ⠋)
SPIN_CHARS=(\| \\ - /)
function __spinf
{
((++SPIN_INDEX)); ((SPIN_INDEX%=${#SPIN_CHARS[@]}));
printf "%s\\b" "${SPIN_CHARS[SPIN_INDEX]}" >&2;
}
#avoid animations on pipelines
[[ -t 1 ]] || function __spinf { : ;}
#print input and backspaces for all chars
function __printbf { printf "%s${1//?/\\b}" "${1}" >&2; };
#trim leading spaces
#usage: trim_leadf [string] [glob]
function trim_leadf
{
typeset var ind sub
var="$1" ind=${INDEX:-160}
sub="${var:0:$ind}"
((SMALLEST)) && sub="${sub#$2}" || sub="${sub##$2}"
var="${sub}${var:$ind}"
printf '%s\n' "$var"
}
#trim trailing spaces
#usage: trim_trailf [string] [glob]
function trim_trailf
{
typeset var ind sub
var="$1" ind=${INDEX:-160}
if ((${#var}>ind))
then sub="${var:$((${#var}-${ind}))}"
((SMALLEST)) && sub="${sub%$2}" || sub="${sub%%$2}"
var="${var:0:$((${#var}-${ind}))}${sub}"
else ((SMALLEST)) && var="${var%$2}" || var="${var%%$2}"
fi; printf '%s\n' "$var"
}
#fast trim
#usage: trimf [string] [glob]
function trimf
{
trim_leadf "$(trim_trailf "$1" "$2")" "$2"
}
#pretty print request body or dump and exit
function block_printf
{
if ((OPTVV>1))
then [[ ${BLOCK:0:10} = @* ]] && cat -- "${BLOCK##@}" | less >&2
printf '\n%s\n%s\n' "${ENDPOINTS[EPN]}" "$BLOCK"; OPTAWE= SKIP=
printf '\n%s\n' '<Enter> continue, <Ctrl-D> redo, <Ctrl-C> exit'
typeset REPLY; __clr_ttystf; read </dev/tty || return 200;
else ((STREAM)) && set -- -j "$@"
jq -r "$@" '.instruction//empty, .input//empty,
.prompt//(.messages[]|.role+": "+.content)//empty' <<<"$BLOCK" | foldf
((!OPTC)) || printf ' '
fi >&2
}
#prompt confirmation prompter
function new_prompt_confirmf
{
typeset REPLY extra
((${#1})) && extra=", te[x]t editor, m[u]ltiline"
((${#2})) && ((OPTW)) && extra="${extra}, [w]hisper_off"
_sysmsgf 'Confirm?' "[Y]es, [n]o, [e]dit${extra}, [r]edo, or [a]bort " ''
REPLY=$(__read_charf); __clr_lineupf $((8+1+40+${#extra})) #!#
case "$REPLY" in
[AaQq]) return 201;; #break
[Rr]) return 200;; #redo
[Ee]|$'\e') return 199;; #edit
[VvXx]) return 198;; #text editor
[UuMm]) return 197;; #multiline
[Ww]) return 196;; #whisper off
[NnOo]) unset REC_OUT ;return 1;; #no
esac #yes
}
#read one char from user
function __read_charf
{
typeset REPLY ret
((NO_CLR)) || __clr_ttystf;
IFS=$'\n' read -r -n 1 "$@" </dev/tty; ret=$?;
printf '%.1s\n' "$REPLY";
[[ -n $REPLY ]] && echo >&2;
return $ret
}
#main user input read
#usage: read_mainf [read_opt].. VARIABLE_NAME
function read_mainf
{
IFS= read -r -e -d $'\r' ${OPTCTRD:+-d $'\04'} "$@"
}
#https://www.reddit.com/r/bash/comments/ppp6a2/is_there_a_way_to_paste_multiple_lines_where_read/
#print response
function prompt_printf
{
typeset stream ret
if ((STREAM))
then typeset OPTC OPTV; stream=$STREAM;
else set -- "$FILE"
((OPTBB)) && jq -r '(.choices[].logprobs)?' "$@" >&2
fi
if ((OPTEMBED))
then jq -r '(.data),
(.model//"'"$MOD"'"//"?")+" ("+(.object//"?")+") ["
+(.usage.prompt_tokens//"?"|tostring)+" / "
+(.usage.total_tokens//"?"|tostring)+" tkns]"' "$@" >&2
return
fi
if ((OPTMD)) && ((MD_CMD_UNBUFF))
then
JQCOL= JQCOL2= prompt_prettyf "$@" | mdf;
else
prompt_prettyf "$@" | foldf; ret=$?;
if ((OPTMD))
then printf "${NC}\\n" >&2;
prompt_pf -r ${stream:+-j --unbuffered} "$@" "$FILE" 2>/dev/null | mdf >&2 2>/dev/null;
fi
fi || prompt_pf -r ${stream:+-j --unbuffered} "$@" "$FILE" 2>/dev/null;
return $ret;
}
function prompt_prettyf
{
jq -r ${stream:+-j --unbuffered} "${JQCOLNULL} ${JQCOL} ${JQCOL2}
byellow
+ (.choices[1].index as \$sep | if .choices? != null then .choices[] else . end |
( (.text//.response//(.message.content)//(.delta.content)//\"\" ) |
if (${OPTC:-0}>0) then (gsub(\"^[\\\\n\\\\t ]\"; \"\") | gsub(\"[\\\\n\\\\t ]+$\"; \"\")) else . end)
+ if .finish_reason? != \"stop\" then (if (.finish_reason? + \"\") != \"\" then red+\"(\"+.finish_reason+\")\"+byellow else null end) else null end,
if \$sep then \"---\" else empty end) + reset" "$@" && _p_suffixf;
}
function prompt_pf
{
typeset var
typeset -a opt
for var
do [[ -f $var ]] || { opt+=("$var"); shift ;}
done
set -- "(if .choices? != null then (.choices[$INDEX]) else . end |.text//.response//(.message.content)//(.delta.content))//(.data?)//empty" "$@"
((${#opt[@]})) && set -- "${opt[@]}" "$@"
{ jq "$@" && _p_suffixf ;} || ! cat -- "$@" >&2 2>/dev/null
}
#https://stackoverflow.com/questions/57298373/print-colored-raw-output-with-jq-on-terminal
#https://stackoverflow.com/questions/40321035/ #gsub(\"^[\\n\\t]\"; \"\")
function _p_suffixf { ((!${#SUFFIX} )) || printf '%s' "$(unescapef "$SUFFIX")" ;}
#open image with sys defaults
function __openf
{
if command -v xdg-open >/dev/null 2>&1
then xdg-open "$1"
elif command -v open >/dev/null 2>&1
then open "$1"
elif command -v feh >/dev/null 2>&1
then feh "$1"
elif command -v sxiv >/dev/null 2>&1
then sxiv "$1"
elif command -v firefox >/dev/null 2>&1
then firefox "$1"
elif command -v google-chrome-stable >/dev/null 2>&1
then google-chrome-stable "$1"
elif command -v google-chrome >/dev/null 2>&1
then google-chrome "$1"
else false
fi
}
#https://budts.be/weblog/2011/07/xdf-open-vs-exo-open/
#print image endpoint response
function prompt_imgprintf
{
typeset n m fname fout
if [[ $OPTI_FMT = b64_json ]]
then [[ -d "${FILEOUT%/*}" ]] || FILEOUT="${FILEIN}"
n=0 m=0
for fname in "${FILEOUT%.*}"*
do fname="${fname%.*}" fname="${fname##*[!0-9]}"
((m>fname)) || ((m=fname+1))
done
while jq -e ".data[${n}]" "$FILE" >/dev/null 2>&1
do fout="${FILEOUT%.*}${m}.png"
jq -r ".data[${n}].b64_json" "$FILE" | { base64 -d || base64 -D ;} > "$fout"
_sysmsgf 'File Out:' "${fout/"$HOME"/"~"}";
((OPTV)) || __openf "$fout" || function __openf { : ;}
((++n, ++m)); ((n<50)) || break;
done
((n)) || ! cat -- "$FILE" >&2;
else jq -r '.data[].url' "$FILE" || ! cat -- "$FILE" >&2;
fi &&
jq -r 'if .data[].revised_prompt then "\nREVISED PROMPT: "+.data[].revised_prompt else empty end' "$FILE" >&2
}
function prompt_audiof
{
((OPTVV)) && __warmsgf "Whisper:" "Model: ${MOD_AUDIO:-unset}, Temperature: ${OPTT:-unset}${*:+, }${*}" >&2
curl -\# ${OPTV:+-Ss} --fail-with-body -L "${API_HOST}${ENDPOINTS[EPN]}" \
-X POST \
-H "Authorization: Bearer $OPENAI_API_KEY" \
-H 'Content-Type: multipart/form-data' \
-F file="@$1" \
-F model="${MOD_AUDIO}" \
-F temperature="$OPTT" \
-o "$FILE" \
"${@:2}" && {
[[ -d $CACHEDIR ]] && printf '%s\n\n' "$(<"$FILE")" >> "$FILEWHISPER";
((OPTV)) || __clr_lineupf; ((CHAT_ENV)) || echo >&2;
}
}
function list_modelsf
{
((MISTRALAI)) && typeset OPENAI_API_KEY=$MISTRAL_API_KEY API_HOST=$MISTRAL_API_HOST
curl -\# --fail-with-body -L "${API_HOST}${ENDPOINTS[11]}${1:+/}${1}" \
-H "Authorization: Bearer $OPENAI_API_KEY" -o "$FILE" &&
if [[ -n $1 ]]
then jq . "$FILE" || ! cat -- "$FILE"
else jq -r '.data[].id' "$FILE" | sort \
&& { ((MISTRALAI)) || printf '%s\n' text-moderation-latest text-moderation-stable text-moderation-007 ;} \
|| ! cat -- "$FILE"
fi || ! __warmsgf 'Err:' 'Model list'
}
function pick_modelf
{