#!/usr/bin/env python3
"""satwork autonomous worker — earn Bitcoin by improving optimization targets.

Download and run. No dependencies beyond Python 3.10+ stdlib.
For described/signature targets, set XAI_API_KEY env var ($0.002/proposal).

Supports blind (free) and described (Grok LLM, $0.002/proposal) targets.

Quick start:
    python3 worker.py                                    # Blind grinding ($0)
    XAI_API_KEY=xai-... python3 worker.py --mode hybrid  # + Grok for described
    python3 worker.py --tier-filter described --kg       # + KG purchases
    python3 worker.py --max-proposals 100                # Limit run length

Source: https://satwork.ai | Docs: https://satwork.ai/llms.txt
"""

import argparse
import json
import math
import os
import random
import secrets
import sys
import time
import urllib.error
import urllib.request
from dataclasses import dataclass, field
from datetime import datetime


# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------

@dataclass
class WorkerConfig:
    coordinator: str = "https://satwork.ai"
    agent_key: str = ""
    mode: str = "blind"            # blind | hybrid
    xai_key: str = ""              # xAI API key ($0.002/proposal for described)
    xai_model: str = "grok-3-mini"
    max_proposals: int = 500
    max_spend: int = 1000          # max sats to spend
    abandon_threshold: int = 80    # proposals before abandoning a target
    min_hit_rate: float = 0.03     # abandon if hit rate drops below this
    tier_filter: str = ""          # restrict to: blind, described, signature
    target_ids: list = field(default_factory=list)
    kg_enabled: bool = False       # enable KG purchases for warm-start
    log_file: str = ""             # JSONL log file


# ---------------------------------------------------------------------------
# HTTP helpers
# ---------------------------------------------------------------------------

def api_get(base, path, timeout=30):
    """GET from coordinator API. Returns parsed JSON or error dict."""
    try:
        resp = urllib.request.urlopen(f"{base}/api{path}", timeout=timeout)
        return json.loads(resp.read())
    except urllib.error.HTTPError as e:
        return {"error": e.code, "detail": e.read().decode()[:200]}
    except Exception as e:
        return {"error": str(e)}


def api_post(base, path, body, timeout=150):
    """POST to coordinator API. Returns parsed JSON or error dict."""
    data = json.dumps(body).encode()
    req = urllib.request.Request(
        f"{base}/api{path}", data=data, method="POST",
        headers={"Content-Type": "application/json"},
    )
    try:
        resp = urllib.request.urlopen(req, timeout=timeout)
        return json.loads(resp.read())
    except urllib.error.HTTPError as e:
        return {"error": e.code, "detail": e.read().decode()[:200]}
    except Exception as e:
        return {"error": str(e)}


# ---------------------------------------------------------------------------
# Target selection — EV scoring with virgin bonus
# ---------------------------------------------------------------------------

@dataclass
class TargetState:
    target_id: str
    tier: str
    proposals: int = 0
    improvements: int = 0
    best_score: float = 0
    last_gap: float = 1.0
    consecutive_failures: int = 0
    abandoned: bool = False
    history: list = field(default_factory=list)


def select_target(targets, states, config):
    """Pick the best target using EV scoring.

    Features: tier filtering, stale skip, saturation skip (>=0.99),
    virgin bonus (5x for untouched targets), weighted random from top 3.
    """
    candidates = []
    for t in targets:
        tid = t["id"]
        tier = t["privacy_tier"]

        # Mode filter: blind mode can only do blind targets
        if config.mode == "blind" and tier != "blind":
            continue

        # Tier filter
        if config.tier_filter and tier != config.tier_filter:
            continue

        # Target ID filter
        if config.target_ids and tid not in config.target_ids:
            continue

        # Skip stale targets
        if t.get("stale"):
            continue

        # Skip saturated targets (baseline already near-perfect)
        baseline = t.get("baseline", t.get("baseline_score", 0))
        if baseline >= 0.99 and t.get("metric_direction") == "maximize":
            continue

        state = states.get(tid)
        if state and state.abandoned:
            continue

        # EV = hit_rate * reward - cost
        hit_rate = t.get("hit_rate", 0.1)
        if hit_rate == 0:
            hit_rate = 0.5  # virgin target — assume good odds
        eff_reward = t.get("effective_reward", t.get("reward_per_improvement", 100))
        cost = t["cost_per_proposal"]
        ev = hit_rate * eff_reward - cost

        # Abandon checks based on local state
        if state:
            if state.proposals >= config.abandon_threshold:
                continue
            if state.proposals > 20 and state.improvements > 0:
                recent_rate = state.improvements / state.proposals
                if recent_rate < config.min_hit_rate:
                    continue

        # Virgin bonus: first-mover advantage is massive
        total_props = t.get("total_proposals", 0)
        if total_props == 0:
            ev *= 5.0
        elif total_props < 10:
            ev *= 2.0

        candidates.append((ev, t))

    if not candidates:
        return None

    candidates.sort(key=lambda x: -x[0])

    # Weighted random from top 3 (avoid herding on #1)
    top = candidates[:min(3, len(candidates))]
    weights = [max(c[0], 0.01) for c in top]
    total = sum(weights)
    r = random.random() * total
    cumulative = 0
    for w, t in zip(weights, top):
        cumulative += w
        if r <= cumulative:
            return t[1]
    return top[0][1]


# ---------------------------------------------------------------------------
# Blind proposal generation (no LLM, $0)
# ---------------------------------------------------------------------------

def random_proposal(spec):
    """Generate random params within bounds."""
    params = []
    for p in spec:
        lo, hi = p["min"], p["max"]
        if p.get("type") == "int":
            params.append(random.randint(int(lo), int(hi)))
        else:
            params.append(random.uniform(lo, hi))
    return params


def evolutionary_proposal(spec, history, mutation_rate=0.2):
    """Mutate from best known params (gaussian perturbation)."""
    if len(history) < 3:
        return random_proposal(spec)

    sorted_hist = sorted(history, key=lambda h: h.get("score") or 0, reverse=True)
    parent = random.choice(sorted_hist[:3])
    parent_params = parent.get("params", [])

    if not parent_params or len(parent_params) != len(spec):
        return random_proposal(spec)

    params = []
    for i, p in enumerate(spec):
        lo, hi = p["min"], p["max"]
        base = parent_params[i]

        if random.random() < mutation_rate:
            range_size = hi - lo
            delta = random.gauss(0, range_size * 0.15)
            val = base + delta
        else:
            val = base

        val = max(lo, min(hi, val))
        if p.get("type") == "int":
            val = int(round(val))
        params.append(val)

    return params


def coordinate_descent_proposal(spec, history):
    """Systematic single-parameter sweep from best known."""
    if len(history) < 3:
        return random_proposal(spec)

    sorted_hist = sorted(history, key=lambda h: h.get("score") or 0, reverse=True)
    best = sorted_hist[0]
    best_params = best.get("params", [])

    if not best_params or len(best_params) != len(spec):
        return random_proposal(spec)

    param_idx = len(history) % len(spec)
    p = spec[param_idx]
    lo, hi = p["min"], p["max"]

    params = list(best_params)
    range_size = hi - lo
    step = range_size / 10
    direction = 1 if random.random() > 0.5 else -1
    new_val = params[param_idx] + direction * step * random.uniform(0.5, 2.0)
    new_val = max(lo, min(hi, new_val))
    if p.get("type") == "int":
        new_val = int(round(new_val))
    params[param_idx] = new_val

    return params


def generate_blind_proposal(spec, history):
    """Pick the best strategy based on how many proposals we've made."""
    n = len(history)
    if n < 5:
        return random_proposal(spec)
    elif n < 20:
        return evolutionary_proposal(spec, history)
    else:
        if n % 3 == 0:
            return coordinate_descent_proposal(spec, history)
        else:
            return evolutionary_proposal(spec, history, mutation_rate=0.15)


# ---------------------------------------------------------------------------
# LLM integration (xAI/Grok only — $0.002/proposal)
# ---------------------------------------------------------------------------

def llm_xai(api_key, prompt, model="grok-3-mini", system=None):
    """Call xAI API (OpenAI-compatible chat completions)."""
    messages = []
    if system:
        messages.append({"role": "system", "content": system})
    messages.append({"role": "user", "content": prompt})

    body = {
        "model": model,
        "messages": messages,
        "temperature": 0.7,
        "max_tokens": 2000,
    }
    data = json.dumps(body).encode()
    req = urllib.request.Request(
        "https://api.x.ai/v1/chat/completions", data=data, method="POST",
        headers={
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
            "User-Agent": "satwork-worker/1.0",
        },
    )
    try:
        resp = urllib.request.urlopen(req, timeout=60)
        result = json.loads(resp.read())
        return result["choices"][0]["message"]["content"]
    except Exception:
        return None


def build_system_prompt():
    """System message for LLM chat APIs."""
    return (
        "You are an optimization agent. You modify configuration files to "
        "maximize a scoring metric. Output only valid JSON in the exact "
        "format requested. No explanations, no markdown fences, no commentary."
    )


def build_proposal_prompt(target, spec_data, history, context=None, kg_solution=None):
    """Build a structured prompt: identity, rubric, baseline, history, strategy, format."""
    description = spec_data.get("description", "")
    file_contents = spec_data.get("file_contents", {})
    signatures = spec_data.get("signatures", [])
    mutable_files = spec_data.get("mutable_files") or target.get("mutable_files") or []
    metric = target.get("metric_name", "score")
    direction = target.get("metric_direction", "maximize")
    baseline = target.get("baseline", target.get("baseline_score", 0))

    parts = []

    # 1. Target identity + scoring rubric
    parts.append(f"TARGET: {target['name']}")
    parts.append(f"METRIC: {metric} (goal: {direction})")
    parts.append(f"BASELINE TO BEAT: {baseline:.4f}")
    if context:
        best = context.get("best_score")
        if best is not None:
            parts.append(f"CURRENT BEST ACROSS ALL WORKERS: {best:.4f}")
        note = context.get("note")
        if note:
            parts.append(f"WARNING: {note}")
    parts.append("")

    # 2. Description
    if description:
        parts.append(f"DESCRIPTION:\n{description}")
        parts.append("")

    # 3. File contents
    if file_contents:
        parts.append("CURRENT FILE CONTENTS:")
        parts.append(json.dumps(file_contents, indent=2))
        parts.append("")

    # 4. Signatures (for signature tier)
    if signatures:
        parts.append("FUNCTION SIGNATURES:")
        for sig in signatures:
            parts.append(f"  {sig.get('signature', '')}: {sig.get('description', '')}")
        parts.append("")

    # 5. History summary (last 5 attempts)
    if history:
        parts.append("RECENT ATTEMPTS:")
        for h in history[-5:]:
            score = h.get("score")
            status = "HIT" if h.get("improved") else "miss"
            score_str = f"{score:.4f}" if score is not None else "err"
            parts.append(f"  score={score_str} ({status})")
        last_detail = history[-1].get("eval_detail", "")
        if last_detail:
            parts.append(f"\nLATEST EVAL FEEDBACK:\n{last_detail}")
        parts.append("")
    else:
        parts.append("This is your first attempt on this target.")
        parts.append("")

    # 6. KG prior art (if purchased)
    if kg_solution:
        files = kg_solution.get("files", {})
        if files:
            parts.append("PRIOR ART (purchased from knowledge graph):")
            for fname_kg, content in files.items():
                parts.append(f"  {fname_kg}: {content[:500]}")
            parts.append("Use this as inspiration. Build on what worked, fix what didn't.")
            parts.append("")

    # 7. Strategy hint
    parts.append(
        "STRATEGY: Adjust values incrementally from the current config. "
        "Focus on the weakest component identified in the eval feedback. "
        "Do not rewrite everything — targeted changes are more likely to improve the score."
    )
    parts.append("")

    # 8. Output format with concrete example
    fname = mutable_files[0] if mutable_files else "config.json"
    parts.append(
        f'OUTPUT FORMAT: Respond with ONLY valid JSON:\n'
        f'{{"{ fname }": "{{ ... file contents as JSON string ... }}"}}\n\n'
        f'Example:\n'
        f'{{"{ fname }": "{{\\\"key\\\": \\\"value\\\"}}"}}'
    )

    return "\n".join(parts)


def parse_files_from_response(response, mutable_files=None):
    """Extract file dict from LLM response.

    Handles three LLM output formats:
    1. {"filename": "stringified content"}  — ideal wrapper
    2. {"filename": {nested json object}}   — auto-stringify values
    3. Raw JSON content (no wrapper)        — wrap with first mutable_file name
    """
    if not response:
        return None

    # Strip markdown code fences
    text = response.strip()
    if text.startswith("```"):
        lines = text.split("\n")
        lines = [l for l in lines if not l.strip().startswith("```")]
        text = "\n".join(lines).strip()

    try:
        start = text.find("{")
        end = text.rfind("}") + 1
        if start >= 0 and end > start:
            parsed = json.loads(text[start:end])
            if isinstance(parsed, dict):
                # Handle {"filename": "...", "contents": "..."} wrapper
                if "filename" in parsed and "contents" in parsed:
                    fname_from_llm = parsed["filename"]
                    contents = parsed["contents"]
                    if isinstance(contents, str):
                        return {fname_from_llm: contents}
                    else:
                        return {fname_from_llm: json.dumps(contents, indent=2)}

                if mutable_files and len(mutable_files) == 1:
                    fname = mutable_files[0]
                    if fname in parsed:
                        val = parsed[fname]
                        if isinstance(val, str):
                            return {fname: val}
                        else:
                            return {fname: json.dumps(val, indent=2)}
                    elif not any(k.endswith(('.json', '.py', '.txt', '.yaml'))
                                 for k in parsed):
                        # Raw content, not a wrapper — wrap it
                        return {fname: json.dumps(parsed, indent=2)}

                # Multi-file or auto-detect: stringify non-string values
                files = {}
                for k, v in parsed.items():
                    if isinstance(v, str):
                        files[k] = v
                    else:
                        files[k] = json.dumps(v, indent=2)
                if files:
                    return files
    except json.JSONDecodeError:
        pass
    return None


# ---------------------------------------------------------------------------
# Knowledge graph — buy prior art for warm-start
# ---------------------------------------------------------------------------

def kg_search_and_buy(config, target_id):
    """Search KG for prior art on this target and buy the best active node.

    Tries free lookup first, falls back to paid purchase.
    Returns solution_data dict if successful, None otherwise.
    """
    nodes_resp = api_get(config.coordinator, f"/kg/nodes?target_id={target_id}")
    if "error" in nodes_resp:
        return None

    nodes = nodes_resp.get("nodes", [])
    active = [n for n in nodes if n.get("status") == "active"]
    if not active:
        return None

    best = max(active, key=lambda n: n.get("score", 0))
    node_id = best.get("id")
    if not node_id:
        return None

    # Try free lookup first
    req = urllib.request.Request(
        f"{config.coordinator}/api/kg/nodes/{node_id}/solution",
        headers={"X-Agent-Key": config.agent_key},
    )
    result = {}
    try:
        resp = urllib.request.urlopen(req, timeout=30)
        result = json.loads(resp.read())
        if result.get("solution_data"):
            print(f"  KG: free lookup {node_id[:12]} (score={best.get('score', '?')})")
    except urllib.error.HTTPError as e:
        print(f"  KG: free lookup failed ({e.code}), trying purchase...")
        purchase_body = {"agent_key": config.agent_key}
        result = api_post(config.coordinator, f"/kg/nodes/{node_id}/purchase",
                          purchase_body)
        if "error" in result:
            detail = result.get("detail", result.get("error", ""))
            print(f"  KG: purchase failed: {str(detail)[:80]}")
            return None
    except Exception as e:
        print(f"  KG: lookup error: {e}")
        return None

    solution = result.get("solution_data")
    if solution and isinstance(solution, str):
        try:
            solution = json.loads(solution)
        except json.JSONDecodeError:
            pass
    if solution:
        price = result.get("price_sats", 0)
        print(f"  KG: bought {node_id[:12]} for {price} sats "
              f"(score={best.get('score', '?')})")
        return solution
    return None


# ---------------------------------------------------------------------------
# Described/signature proposal generation (LLM-powered)
# ---------------------------------------------------------------------------

def generate_described_proposal(target, spec_data, history, config):
    """Use Grok to generate a file-based proposal for described/signature targets."""
    tid = target.get("id", "")

    # Fetch context for scoring awareness
    context = None
    ctx_resp = api_get(config.coordinator, f"/propose/{tid}/context")
    if "error" not in ctx_resp:
        context = ctx_resp

    # KG warm-start: buy prior art on first attempt per target
    kg_solution = None
    if config.kg_enabled:
        if not hasattr(config, '_kg_purchased'):
            config._kg_purchased = set()
        if tid not in config._kg_purchased:
            config._kg_purchased.add(tid)
            kg_solution = kg_search_and_buy(config, tid)

    prompt = build_proposal_prompt(target, spec_data, history,
                                   context=context, kg_solution=kg_solution)
    system = build_system_prompt()

    response = llm_xai(config.xai_key, prompt, config.xai_model, system)
    mutable_files = spec_data.get("mutable_files") or target.get("mutable_files")
    return parse_files_from_response(response, mutable_files=mutable_files)


# ---------------------------------------------------------------------------
# Logging
# ---------------------------------------------------------------------------

def log_proposal(log_file, entry):
    """Append a proposal result to the JSONL log."""
    if not log_file:
        return
    with open(log_file, "a") as f:
        f.write(json.dumps(entry) + "\n")


# ---------------------------------------------------------------------------
# Main worker loop
# ---------------------------------------------------------------------------

def run_worker(config):
    """Main worker loop with smart 429 handling and abandon logic."""
    print(f"satwork worker starting")
    print(f"  coordinator: {config.coordinator}")
    print(f"  agent_key: {config.agent_key[:20]}...")
    print(f"  mode: {config.mode}")
    print(f"  max_proposals: {config.max_proposals}")
    if config.xai_key:
        print(f"  LLM: xAI ({config.xai_model})")
    else:
        print(f"  LLM: none (blind mode only)")
    print()

    states = {}           # target_id -> TargetState
    total_proposals = 0
    total_earned = 0
    total_spent = 0
    rate_limit_backoff = 0
    last_heartbeat = time.time()

    while total_proposals < config.max_proposals and total_spent < config.max_spend:
        # Heartbeat every 60s
        now = time.time()
        if now - last_heartbeat > 60:
            active = len([s for s in states.values() if not s.abandoned])
            print(f"  [heartbeat] alive p={total_proposals} "
                  f"earned={total_earned} spent={total_spent} "
                  f"targets={active}")
            last_heartbeat = now

        # Fetch targets
        targets_resp = api_get(config.coordinator, "/propose/targets")
        if "error" in targets_resp:
            print(f"  Error fetching targets: {targets_resp}")
            time.sleep(10)
            continue

        targets = targets_resp.get("targets", [])
        if not targets:
            print("  No targets available. Waiting...")
            time.sleep(30)
            continue

        # Select target
        target = select_target(targets, states, config)
        if not target:
            print("  No viable targets remaining. Stopping.")
            break

        tid = target["id"]
        tier = target["privacy_tier"]

        # Initialize state
        if tid not in states:
            states[tid] = TargetState(target_id=tid, tier=tier)
        state = states[tid]

        # Get spec
        spec_data = api_get(config.coordinator, f"/propose/{tid}/spec")
        if "error" in spec_data:
            print(f"  Error getting spec for {tid}: {spec_data}")
            state.abandoned = True
            continue

        # Generate proposal
        body = {"agent_key": config.agent_key}

        if tier == "blind":
            params_spec = spec_data.get("parameters", [])
            if not params_spec:
                state.abandoned = True
                continue
            params = generate_blind_proposal(params_spec, state.history)
            body["type"] = "params"
            body["params"] = params
        elif tier in ("described", "signature") and config.xai_key:
            files = generate_described_proposal(target, spec_data,
                                                state.history, config)
            if not files:
                state.consecutive_failures += 1
                if state.consecutive_failures >= 8:
                    state.abandoned = True
                    print(f"  Abandoning {tid} (LLM failed 8 times)")
                else:
                    print(f"  LLM failed for {tid} "
                          f"({state.consecutive_failures}/8)")
                continue
            body["type"] = "file"
            body["files"] = files
        else:
            # Can't handle this tier without LLM
            state.abandoned = True
            continue

        # Submit proposal
        result = api_post(config.coordinator, f"/propose/{tid}", body)
        total_proposals += 1
        state.proposals += 1

        if "error" in result:
            err_code = result.get("error")
            detail = str(result.get("detail", "")).lower()
            print(f"  [{total_proposals}] {tid}: ERROR {err_code} "
                  f"{detail[:60]}")

            # Smart 429 handling
            if err_code == 429:
                if "budget" in detail or "exhausted" in detail:
                    state.abandoned = True
                    print(f"  {tid}: budget exhausted, moving on")
                elif "pacing" in detail:
                    time.sleep(5)
                elif "rate limit" in detail:
                    wait = min(30, 2 ** rate_limit_backoff)
                    rate_limit_backoff += 1
                    print(f"  Rate limited, backoff {wait}s")
                    time.sleep(wait)
                else:
                    time.sleep(10)
            continue

        rate_limit_backoff = 0  # reset on success

        score = result.get("score")
        improved = result.get("improvement", False)
        cost = result.get("cost_sats", 0)
        reward = result.get("reward_sats", 0)
        gap = result.get("gap", 0) or 0
        balance = result.get("balance_sats", "?")

        total_spent += cost
        if improved:
            total_earned += reward
            state.improvements += 1
            state.consecutive_failures = 0
        else:
            state.consecutive_failures += 1

        if score is not None and (state.best_score == 0 or score > state.best_score):
            state.best_score = score
        state.last_gap = gap

        # Record history for evolutionary search
        history_entry = {"score": score, "gap": gap, "improved": improved}
        if tier == "blind" and "params" in body:
            history_entry["params"] = body["params"]
        if result.get("eval_detail"):
            history_entry["eval_detail"] = result["eval_detail"]
        state.history.append(history_entry)

        # Print status
        status = "WIN!" if improved else "miss"
        score_str = f"{score:.4f}" if score is not None else "err"
        print(f"  [{total_proposals}] {tid[:25]:25s} {score_str:>8s} "
              f"gap={gap:+.4f} {status:4s} cost={cost} earned={reward} "
              f"bal={balance}")

        # Log to JSONL
        log_proposal(config.log_file, {
            "timestamp": datetime.now().isoformat(),
            "target_id": tid,
            "tier": tier,
            "score": score,
            "gap": gap,
            "improved": improved,
            "cost": cost,
            "reward": reward,
            "balance": balance,
            "proposal_num": total_proposals,
        })

        # Check abandon conditions
        if state.proposals >= config.abandon_threshold:
            state.abandoned = True
            print(f"  Abandoning {tid} (hit proposal limit "
                  f"{config.abandon_threshold})")
        elif state.consecutive_failures >= 20 and abs(gap) < 0.01:
            state.abandoned = True
            print(f"  Abandoning {tid} (diminishing returns)")

        # Brief pause to avoid hammering
        time.sleep(0.5)

    # Summary
    print()
    print(f"Worker finished")
    print(f"  Total proposals: {total_proposals}")
    print(f"  Total earned: {total_earned} sats")
    print(f"  Total spent: {total_spent} sats")
    print(f"  Net: {total_earned - total_spent} sats")
    hits = sum(s.improvements for s in states.values())
    print(f"  Hit rate: {hits}/{total_proposals}")
    print()
    print(f"  Per-target breakdown:")
    for tid, state in sorted(states.items(), key=lambda x: -x[1].improvements):
        if state.proposals > 0:
            rate = state.improvements / state.proposals * 100
            print(f"    {tid[:30]:30s} props={state.proposals:>3d} "
                  f"imps={state.improvements:>2d} rate={rate:.0f}% "
                  f"best={state.best_score:.4f}")


# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------

def main():
    parser = argparse.ArgumentParser(
        description="satwork autonomous worker — earn Bitcoin by improving "
                    "optimization targets",
        epilog="""\
Modes:
  blind   No LLM needed. Evolutionary search on blind targets ($0).
  hybrid  Blind + Grok for described/signature targets ($0.002/proposal).

Environment variables:
  XAI_API_KEY        xAI API key (for Grok models)
  SATWORK_AGENT_KEY  Agent key (or use --agent-key)
""",
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    parser.add_argument("--coordinator", default="https://satwork.ai",
                        help="Coordinator URL (default: https://satwork.ai)")
    parser.add_argument("--agent-key",
                        help="Agent key (default: generate new)")
    parser.add_argument("--mode", default="blind",
                        choices=["blind", "hybrid"],
                        help="blind=free evolutionary, hybrid=+Grok LLM")
    parser.add_argument("--xai-key",
                        help="xAI API key (or XAI_API_KEY env)")
    parser.add_argument("--xai-model", default="grok-3-mini",
                        help="xAI model (default: grok-3-mini)")
    parser.add_argument("--max-proposals", type=int, default=500,
                        help="Max proposals before stopping (default: 500)")
    parser.add_argument("--max-spend", type=int, default=1000,
                        help="Max sats to spend (default: 1000)")
    parser.add_argument("--abandon", type=int, default=80,
                        help="Proposals before abandoning a target")
    parser.add_argument("--tier-filter", default="",
                        choices=["", "blind", "described", "signature"],
                        help="Only work on targets of this privacy tier")
    parser.add_argument("--targets", default="",
                        help="Comma-separated target IDs to work on")
    parser.add_argument("--kg", action="store_true",
                        help="Enable KG purchases for warm-start")
    parser.add_argument("--log",
                        help="JSONL log file (default: worker-<key>.jsonl)")
    args = parser.parse_args()

    agent_key = args.agent_key or os.environ.get("SATWORK_AGENT_KEY")
    if not agent_key:
        agent_key = f"sk-{secrets.token_hex(32)}"
        print(f"Generated agent key: {agent_key}")

    xai_key = args.xai_key or os.environ.get("XAI_API_KEY", "")

    target_ids = ([t.strip() for t in args.targets.split(",") if t.strip()]
                  if args.targets else [])

    config = WorkerConfig(
        coordinator=args.coordinator,
        agent_key=agent_key,
        mode=args.mode,
        xai_key=xai_key,
        xai_model=args.xai_model,
        max_proposals=args.max_proposals,
        max_spend=args.max_spend,
        abandon_threshold=args.abandon,
        tier_filter=args.tier_filter or "",
        target_ids=target_ids,
        kg_enabled=args.kg,
        log_file=args.log or f"worker-{agent_key[3:11]}.jsonl",
    )

    run_worker(config)


if __name__ == "__main__":
    main()
