| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| Fine-tune swiss-ai/Apertus-8B-Instruct-2509 on three agentic coding / reasoning datasets: |
| - Roman1111111/claude-opus-4.6-10000x (9.6K — Opus 4.6 reasoning distillation) |
| - togethercomputer/CoderForge-Preview (15K sample — agentic coding trajectories) |
| - Crownelius/Opus-4.6-Reasoning-3300x (2.2K — reasoning with thinking traces) |
| |
| All data is formatted with the native Apertus chat format via the apertus-format library. |
| Thinking/reasoning traces are preserved as THOUGHTS blocks. |
| CoderForge tool calls are mapped to Apertus TOOL_CALLS/TOOL_OUTPUTS blocks within a |
| single merged assistant turn. |
| """ |
|
|
| import json |
| import subprocess |
| import sys |
|
|
| |
| print("Cloning apertus-format...") |
| subprocess.run( |
| ["git", "clone", "--depth=1", "https://github.com/swiss-ai/apertus-format.git", "/tmp/apertus-format"], |
| check=True, |
| ) |
| sys.path.insert(0, "/tmp/apertus-format") |
|
|
| from datasets import load_dataset, concatenate_datasets |
| from peft import LoraConfig |
| from src import ( |
| Message, Conversation, ApertusFormatter, |
| AssistantBlock, ToolCall, ToolOutput, BlockType, |
| ) |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
| from trl import SFTConfig, SFTTrainer |
|
|
| MODEL_ID = "swiss-ai/Apertus-8B-Instruct-2509" |
| OUTPUT_REPO = "Colby/apertus-8b-coding" |
| CODERFORGE_SAMPLE = 15_000 |
|
|
| formatter = ApertusFormatter(enable_thinking=True) |
|
|
|
|
| def format_roman(example): |
| """Opus 4.6 reasoning dataset: messages list with optional reasoning field.""" |
| msgs = [] |
| for msg in example["messages"]: |
| role = msg.get("role", "") |
| content = msg.get("content", "") or "" |
| if role == "system": |
| msgs.append(Message.system(content)) |
| elif role == "user": |
| msgs.append(Message.user(content)) |
| elif role == "assistant": |
| reasoning = msg.get("reasoning", "") or "" |
| blocks = [] |
| if reasoning.strip(): |
| blocks.append(AssistantBlock(type=BlockType.THOUGHTS, text=reasoning)) |
| blocks.append(AssistantBlock(type=BlockType.RESPONSE, text=content)) |
| msgs.append(Message.assistant_with_blocks(blocks)) |
| try: |
| return {"text": formatter.format_conversation(Conversation(messages=msgs))} |
| except Exception: |
| return {"text": None} |
|
|
|
|
| def format_coderforge(example): |
| """ |
| CoderForge agentic trajectories: messages is a JSON string in OpenHands format. |
| Merges all assistant+tool turns into a single Apertus assistant message. |
| |
| Block mapping: |
| assistant with tool_calls → THOUGHTS (the explanation) + TOOL_CALLS (the action) |
| tool result → TOOL_OUTPUTS |
| assistant without tool_calls (final) → RESPONSE |
| This ordering is valid in the Apertus format (RESPONSE may not precede TOOL_OUTPUTS). |
| """ |
| try: |
| raw = json.loads(example["messages"]) |
| except (json.JSONDecodeError, TypeError): |
| return {"text": None} |
|
|
| system_msgs = [] |
| user_msgs = [] |
| agentic_blocks = [] |
| agentic_started = False |
|
|
| for msg in raw: |
| role = msg.get("role", "") |
| content = msg.get("content") or "" |
| if isinstance(content, list): |
| content = " ".join( |
| p.get("text", "") for p in content if isinstance(p, dict) |
| ) |
| content = str(content).strip() |
|
|
| if role == "system": |
| system_msgs.append(Message.system(content)) |
| elif role == "user" and not agentic_started: |
| user_msgs.append(Message.user(content)) |
| elif role == "assistant": |
| agentic_started = True |
| tool_calls_raw = msg.get("tool_calls") or [] |
| if tool_calls_raw: |
| |
| if content: |
| agentic_blocks.append( |
| AssistantBlock(type=BlockType.THOUGHTS, text=content) |
| ) |
| calls = [ |
| ToolCall( |
| name=tc["function"]["name"], |
| arguments=tc["function"].get("arguments", "{}"), |
| ) |
| for tc in tool_calls_raw |
| if "function" in tc |
| ] |
| if calls: |
| agentic_blocks.append( |
| AssistantBlock(type=BlockType.TOOL_CALLS, calls=calls) |
| ) |
| elif content: |
| |
| agentic_blocks.append( |
| AssistantBlock(type=BlockType.RESPONSE, text=content) |
| ) |
| elif role == "tool": |
| agentic_started = True |
| if content: |
| agentic_blocks.append( |
| AssistantBlock( |
| type=BlockType.TOOL_OUTPUTS, |
| outputs=[ToolOutput(output=content)], |
| ) |
| ) |
|
|
| if not agentic_blocks: |
| return {"text": None} |
|
|
| all_msgs = system_msgs + user_msgs + [Message.assistant_with_blocks(agentic_blocks)] |
| try: |
| return {"text": formatter.format_conversation(Conversation(messages=all_msgs))} |
| except Exception: |
| return {"text": None} |
|
|
|
|
| def format_crownelius(example): |
| """Opus 4.6 reasoning dataset: flat problem/thinking/solution columns.""" |
| problem = (example.get("problem") or "").strip() |
| thinking = (example.get("thinking") or "").strip() |
| solution = (example.get("solution") or "").strip() |
| if not problem or not solution: |
| return {"text": None} |
| blocks = [] |
| if thinking: |
| blocks.append(AssistantBlock(type=BlockType.THOUGHTS, text=thinking)) |
| blocks.append(AssistantBlock(type=BlockType.RESPONSE, text=solution)) |
| msgs = [ |
| Message.user(problem), |
| Message.assistant_with_blocks(blocks), |
| ] |
| try: |
| return {"text": formatter.format_conversation(Conversation(messages=msgs))} |
| except Exception: |
| return {"text": None} |
|
|
|
|
| print("Loading datasets...") |
| ds_roman = load_dataset("Roman1111111/claude-opus-4.6-10000x", split="train") |
| ds_coderforge = ( |
| load_dataset( |
| "togethercomputer/CoderForge-Preview", |
| name="trajectories", |
| split="filtered_reward1", |
| ) |
| .shuffle(seed=42) |
| .select(range(CODERFORGE_SAMPLE)) |
| ) |
| ds_crownelius = load_dataset("Crownelius/Opus-4.6-Reasoning-3300x", split="train") |
|
|
| print("Mapping to Apertus format...") |
| ds_roman = ds_roman.map(format_roman, remove_columns=ds_roman.column_names) |
| ds_coderforge = ds_coderforge.map( |
| format_coderforge, remove_columns=ds_coderforge.column_names |
| ) |
| ds_crownelius = ds_crownelius.map( |
| format_crownelius, remove_columns=ds_crownelius.column_names |
| ) |
|
|
| ds_roman = ds_roman.filter(lambda x: x["text"] is not None) |
| ds_coderforge = ds_coderforge.filter(lambda x: x["text"] is not None) |
| ds_crownelius = ds_crownelius.filter(lambda x: x["text"] is not None) |
|
|
| print(f" Roman: {len(ds_roman)}") |
| print(f" CoderForge: {len(ds_coderforge)}") |
| print(f" Crownelius: {len(ds_crownelius)}") |
|
|
| combined = concatenate_datasets([ds_roman, ds_coderforge, ds_crownelius]).shuffle(seed=42) |
| split = combined.train_test_split(test_size=0.05, seed=42) |
| train_dataset = split["train"] |
| eval_dataset = split["test"] |
| print(f"Total — Train: {len(train_dataset)} Eval: {len(eval_dataset)}") |
|
|
| peft_config = LoraConfig( |
| r=16, |
| lora_alpha=32, |
| lora_dropout=0.05, |
| bias="none", |
| task_type="CAUSAL_LM", |
| target_modules="all-linear", |
| ) |
|
|
| config = SFTConfig( |
| output_dir="apertus-8b-coding", |
| push_to_hub=True, |
| hub_model_id=OUTPUT_REPO, |
| hub_strategy="every_save", |
|
|
| dataset_text_field="text", |
| max_length=2048, |
|
|
| num_train_epochs=2, |
| per_device_train_batch_size=1, |
| per_device_eval_batch_size=1, |
| gradient_accumulation_steps=16, |
| learning_rate=2e-4, |
| lr_scheduler_type="cosine", |
| warmup_steps=100, |
| bf16=True, |
| gradient_checkpointing=True, |
|
|
| logging_steps=10, |
| save_strategy="steps", |
| save_steps=100, |
| save_total_limit=2, |
| eval_strategy="steps", |
| eval_steps=100, |
|
|
| |
| |
| report_to="none", |
| ) |
|
|
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_quant_type="nf4", |
| bnb_4bit_compute_dtype="bfloat16", |
| bnb_4bit_use_double_quant=True, |
| ) |
|
|
| print("Loading model and tokenizer...") |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_ID, |
| quantization_config=bnb_config, |
| device_map="auto", |
| ) |
|
|
| print("Initializing trainer...") |
| trainer = SFTTrainer( |
| model=model, |
| processing_class=tokenizer, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| peft_config=peft_config, |
| args=config, |
| ) |
|
|
| print("Starting training...") |
| trainer.train() |
|
|
| print("Pushing to Hub...") |
| trainer.push_to_hub() |
|
|
| print(f"Done! Model at: https://huggingface.co/{OUTPUT_REPO}") |
|
|