|
--- |
|
license: apache-2.0 |
|
datasets: |
|
- hackaprompt/hackaprompt-dataset |
|
- hackercupai/hackercup |
|
- OpenPipe/hacker-news |
|
- nixiesearch/hackernews-stories |
|
- ebowwa/cognitive-hacking |
|
- Aratako/Synthetic-JP-EN-Coding-Dataset-801k |
|
- kanhatakeyama/wizardlm8x22b-logical-math-coding-sft |
|
- kanhatakeyama/wizardlm8x22b-logical-math-coding-sft_additional-ja |
|
- jtatman/stable-diffusion-prompts-stats-full-uncensored |
|
- Locutusque/sharegpt_v3_uncensored_cleaned |
|
- cognitivecomputations/dolphin |
|
language: |
|
- en |
|
- fr |
|
- ar |
|
- zh |
|
- hi |
|
metrics: |
|
- code_eval |
|
base_model: |
|
- Qwen/Qwen2.5-Coder-32B-Instruct |
|
- Qwen/QwQ-32B-Preview |
|
- Sovenok-Hacker/gemma-2-2b-it-Q4_0-GGUF |
|
- cognitivecomputations/dolphin-2.9.2-qwen2-72b |
|
- cognitivecomputations/dolphin-2.9.4-llama3.1-8b-gguf |
|
- cognitivecomputations/dolphin-2.9.4-llama3.1-8b |
|
new_version: cognitivecomputations/dolphin-2.9.2-qwen2-72b |
|
library_name: adapter-transformers |
|
tags: |
|
- code |
|
--- |