>>106158578
10900k dual channel ddr4 3200, 2x 3090, windows, ik_llama.cpp, nvidia-smi -lgc -lmc to 3d p0 clocks
200-300t/s pp 10-15t/s tg
@echo off
set CUDA_VISIBLE_DEVICES=0,1
llama-server.exe ^
-m "T:\models\GLM-4.5-Air-IQ4_KSS-00001-of-00002.gguf" ^
--n-gpu-layers 999 ^
-ts 23,19 ^
--threads 18 ^
--threads-batch 18 ^
--ctx-size 32768 ^
--batch-size 2048 ^
--ubatch-size 2048 ^
--no-mmap ^
-fa ^
-fmoe ^
-rtr ^
-ot "blk\.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14)\..*exps=CUDA0" ^
-ot "blk\.(15|16|17|18|19|20|21|22|23|24|25|26|27)\..*exps=CUDA1" ^
-ot "exps=CPU"