| @hf/thebloke/mistral-7b-instruct-v0.1-awq |
mistral-7b-instruct-v0.1-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/deepgram/aura-1 |
aura-1
|
0.02 |
0.02 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @hf/mistral/mistral-7b-instruct-v0.2 |
mistral-7b-instruct-v0.2
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 3072, Output Limit: 4096
|
|
| @cf/tinyllama/tinyllama-1.1b-chat-v1.0 |
tinyllama-1.1b-chat-v1.0
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 2048, Output Limit: 2048
|
|
| @cf/qwen/qwen1.5-0.5b-chat |
qwen1.5-0.5b-chat
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 32000, Output Limit: 32000
|
|
| @cf/meta/llama-3.2-11b-vision-instruct |
llama-3.2-11b-vision-instruct
|
0.05 |
0.68 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: 128000
|
|
| @hf/thebloke/llama-2-13b-chat-awq |
llama-2-13b-chat-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/meta/llama-3.1-8b-instruct-fp8 |
llama-3.1-8b-instruct-fp8
|
0.15 |
0.29 |
Provider: Cloudflare Workers AI, Context: 32000, Output Limit: 32000
|
|
| @cf/openai/whisper |
whisper
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/stabilityai/stable-diffusion-xl-base-1.0 |
stable-diffusion-xl-base-1.0
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/meta/llama-2-7b-chat-fp16 |
llama-2-7b-chat-fp16
|
0.56 |
6.67 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/microsoft/resnet-50 |
resnet-50
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/runwayml/stable-diffusion-v1-5-inpainting |
stable-diffusion-v1-5-inpainting
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/defog/sqlcoder-7b-2 |
sqlcoder-7b-2
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 10000, Output Limit: 10000
|
|
| @cf/meta/llama-3-8b-instruct |
llama-3-8b-instruct
|
0.28 |
0.83 |
Provider: Cloudflare Workers AI, Context: 7968, Output Limit: 7968
|
|
| @cf/meta-llama/llama-2-7b-chat-hf-lora |
llama-2-7b-chat-hf-lora
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @cf/meta/llama-3.1-8b-instruct |
llama-3.1-8b-instruct
|
0.28 |
0.83 |
Provider: Cloudflare Workers AI, Context: 7968, Output Limit: 7968
|
|
| @cf/openchat/openchat-3.5-0106 |
openchat-3.5-0106
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @hf/thebloke/openhermes-2.5-mistral-7b-awq |
openhermes-2.5-mistral-7b-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/leonardo/lucid-origin |
lucid-origin
|
0.01 |
0.01 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/facebook/bart-large-cnn |
bart-large-cnn
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/black-forest-labs/flux-1-schnell |
flux-1-schnell
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 2048, Output Limit: N/A
|
|
| @cf/deepseek-ai/deepseek-r1-distill-qwen-32b |
deepseek-r1-distill-qwen-32b
|
0.50 |
4.88 |
Provider: Cloudflare Workers AI, Context: 80000, Output Limit: 80000
|
|
| @cf/google/gemma-2b-it-lora |
gemma-2b-it-lora
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @cf/fblgit/una-cybertron-7b-v2-bf16 |
una-cybertron-7b-v2-bf16
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 15000, Output Limit: 15000
|
|
| @cf/aisingapore/gemma-sea-lion-v4-27b-it |
gemma-sea-lion-v4-27b-it
|
0.35 |
0.56 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: N/A
|
|
| @cf/meta/m2m100-1.2b |
m2m100-1.2b
|
0.34 |
0.34 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/meta/llama-3.2-3b-instruct |
llama-3.2-3b-instruct
|
0.05 |
0.34 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: 128000
|
|
| @cf/qwen/qwen2.5-coder-32b-instruct |
qwen2.5-coder-32b-instruct
|
0.66 |
1.00 |
Provider: Cloudflare Workers AI, Context: 32768, Output Limit: 32768
|
|
| @cf/runwayml/stable-diffusion-v1-5-img2img |
stable-diffusion-v1-5-img2img
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/google/gemma-7b-it-lora |
gemma-7b-it-lora
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 3500, Output Limit: 3500
|
|
| @cf/qwen/qwen1.5-14b-chat-awq |
qwen1.5-14b-chat-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 7500, Output Limit: 7500
|
|
| @cf/qwen/qwen1.5-1.8b-chat |
qwen1.5-1.8b-chat
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 32000, Output Limit: 32000
|
|
| @cf/mistralai/mistral-small-3.1-24b-instruct |
mistral-small-3.1-24b-instruct
|
0.35 |
0.56 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: 128000
|
|
| @hf/google/gemma-7b-it |
gemma-7b-it
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @cf/qwen/qwen3-30b-a3b-fp8 |
qwen3-30b-a3b-fp8
|
0.05 |
0.34 |
Provider: Cloudflare Workers AI, Context: 32768, Output Limit: N/A
|
|
| @hf/thebloke/llamaguard-7b-awq |
llamaguard-7b-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @hf/nousresearch/hermes-2-pro-mistral-7b |
hermes-2-pro-mistral-7b
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 24000, Output Limit: 24000
|
|
| @cf/ibm-granite/granite-4.0-h-micro |
granite-4.0-h-micro
|
0.02 |
0.11 |
Provider: Cloudflare Workers AI, Context: 131000, Output Limit: N/A
|
|
| @cf/tiiuae/falcon-7b-instruct |
falcon-7b-instruct
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/meta/llama-3.3-70b-instruct-fp8-fast |
llama-3.3-70b-instruct-fp8-fast
|
0.29 |
2.25 |
Provider: Cloudflare Workers AI, Context: 24000, Output Limit: 24000
|
|
| @cf/meta/llama-3-8b-instruct-awq |
llama-3-8b-instruct-awq
|
0.12 |
0.27 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @cf/leonardo/phoenix-1.0 |
phoenix-1.0
|
0.01 |
0.01 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/microsoft/phi-2 |
phi-2
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 2048, Output Limit: 2048
|
|
| @cf/lykon/dreamshaper-8-lcm |
dreamshaper-8-lcm
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/thebloke/discolm-german-7b-v1-awq |
discolm-german-7b-v1-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/meta/llama-2-7b-chat-int8 |
llama-2-7b-chat-int8
|
0.56 |
6.67 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @cf/meta/llama-3.2-1b-instruct |
llama-3.2-1b-instruct
|
0.03 |
0.20 |
Provider: Cloudflare Workers AI, Context: 60000, Output Limit: 60000
|
|
| @cf/openai/whisper-large-v3-turbo |
whisper-large-v3-turbo
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/meta/llama-4-scout-17b-16e-instruct |
llama-4-scout-17b-16e-instruct
|
0.27 |
0.85 |
Provider: Cloudflare Workers AI, Context: 131000, Output Limit: 131000
|
|
| @hf/nexusflow/starling-lm-7b-beta |
starling-lm-7b-beta
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @hf/thebloke/deepseek-coder-6.7b-base-awq |
deepseek-coder-6.7b-base-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/google/gemma-3-12b-it |
gemma-3-12b-it
|
0.35 |
0.56 |
Provider: Cloudflare Workers AI, Context: 80000, Output Limit: 80000
|
|
| @cf/meta/llama-guard-3-8b |
llama-guard-3-8b
|
0.48 |
0.03 |
Provider: Cloudflare Workers AI, Context: 131072, Output Limit: N/A
|
|
| @hf/thebloke/neural-chat-7b-v3-1-awq |
neural-chat-7b-v3-1-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/openai/whisper-tiny-en |
whisper-tiny-en
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/bytedance/stable-diffusion-xl-lightning |
stable-diffusion-xl-lightning
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/mistral/mistral-7b-instruct-v0.1 |
mistral-7b-instruct-v0.1
|
0.11 |
0.19 |
Provider: Cloudflare Workers AI, Context: 2824, Output Limit: 2824
|
|
| @cf/llava-hf/llava-1.5-7b-hf |
llava-1.5-7b-hf
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/openai/gpt-oss-20b |
gpt-oss-20b
|
0.20 |
0.30 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: 128000
|
|
| @cf/deepseek-ai/deepseek-math-7b-instruct |
deepseek-math-7b-instruct
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/openai/gpt-oss-120b |
gpt-oss-120b
|
0.35 |
0.75 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: 128000
|
|
| @cf/myshell-ai/melotts |
melotts
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/qwen/qwen1.5-7b-chat-awq |
qwen1.5-7b-chat-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 20000, Output Limit: 20000
|
|
| @cf/meta/llama-3.1-8b-instruct-fast |
llama-3.1-8b-instruct-fast
|
0.05 |
0.38 |
Provider: Cloudflare Workers AI, Context: 128000, Output Limit: 128000
|
|
| @cf/deepgram/nova-3 |
nova-3
|
0.01 |
0.01 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|
| @cf/meta/llama-3.1-70b-instruct |
llama-3.1-70b-instruct
|
0.29 |
2.25 |
Provider: Cloudflare Workers AI, Context: 24000, Output Limit: 24000
|
|
| @cf/qwen/qwq-32b |
qwq-32b
|
0.66 |
1.00 |
Provider: Cloudflare Workers AI, Context: 24000, Output Limit: 24000
|
|
| @hf/thebloke/zephyr-7b-beta-awq |
zephyr-7b-beta-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @hf/thebloke/deepseek-coder-6.7b-instruct-awq |
deepseek-coder-6.7b-instruct-awq
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 4096, Output Limit: 4096
|
|
| @cf/meta/llama-3.1-8b-instruct-awq |
llama-3.1-8b-instruct-awq
|
0.12 |
0.27 |
Provider: Cloudflare Workers AI, Context: 8192, Output Limit: 8192
|
|
| @cf/mistral/mistral-7b-instruct-v0.2-lora |
mistral-7b-instruct-v0.2-lora
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: 15000, Output Limit: 15000
|
|
| @cf/unum/uform-gen2-qwen-500m |
uform-gen2-qwen-500m
|
0.00 |
0.00 |
Provider: Cloudflare Workers AI, Context: N/A, Output Limit: N/A
|
|