"""
Scan all GitHub Actions jobs in a GitHub org that ran between
0800-1244 UTC today and identify any that installed litellm 1.82.7 or 1.82.8.
Adjust WINDOW_START / WINDOW_END to cover March 24, 2026 if running later.
"""
import io
import os
import re
import sys
import zipfile
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timezone
import requests
GITHUB_URL = "https://api.github.com"
ORG = "your-org"
TOKEN = os.environ.get("GITHUB_TOKEN", "")
TODAY = datetime.now(timezone.utc).date()
WINDOW_START = datetime(TODAY.year, TODAY.month, TODAY.day, 8, 0, 0, tzinfo=timezone.utc)
WINDOW_END = datetime(TODAY.year, TODAY.month, TODAY.day, 12, 44, 0, tzinfo=timezone.utc)
TARGET_VERSIONS = {"1.82.7", "1.82.8"}
VERSION_PATTERN = re.compile(r"litellm[=\-](\d+\.\d+\.\d+)", re.IGNORECASE)
SESSION = requests.Session()
SESSION.headers.update({
"Authorization": f"Bearer {TOKEN}",
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
})
def get_paginated(url, params=None):
params = dict(params or {})
params.setdefault("per_page", 100)
page = 1
while True:
params["page"] = page
resp = SESSION.get(url, params=params, timeout=30)
if resp.status_code == 404:
return
resp.raise_for_status()
data = resp.json()
if isinstance(data, dict):
items = next((v for v in data.values() if isinstance(v, list)), [])
else:
items = data
if not items:
break
yield from items
if len(items) < params["per_page"]:
break
page += 1
def parse_ts(ts_str):
if not ts_str:
return None
return datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
def get_repos():
repos = []
for r in get_paginated(f"{GITHUB_URL}/orgs/{ORG}/repos", {"type": "all"}):
repos.append({"id": r["id"], "name": r["name"], "full_name": r["full_name"]})
return repos
def get_runs_in_window(repo_full_name):
created_filter = (
f"{WINDOW_START.strftime('%Y-%m-%dT%H:%M:%SZ')}"
f"..{WINDOW_END.strftime('%Y-%m-%dT%H:%M:%SZ')}"
)
url = f"{GITHUB_URL}/repos/{repo_full_name}/actions/runs"
runs = []
for run in get_paginated(url, {"created": created_filter, "per_page": 100}):
ts = parse_ts(run.get("run_started_at") or run.get("created_at"))
if ts and WINDOW_START <= ts <= WINDOW_END:
runs.append(run)
return runs
def get_jobs_for_run(repo_full_name, run_id):
url = f"{GITHUB_URL}/repos/{repo_full_name}/actions/runs/{run_id}/jobs"
jobs = []
for job in get_paginated(url, {"filter": "all"}):
ts = parse_ts(job.get("started_at"))
if ts and WINDOW_START <= ts <= WINDOW_END:
jobs.append(job)
return jobs
def fetch_job_log(repo_full_name, job_id):
url = f"{GITHUB_URL}/repos/{repo_full_name}/actions/jobs/{job_id}/logs"
resp = SESSION.get(url, timeout=60, allow_redirects=True)
if resp.status_code in (403, 404, 410):
return ""
resp.raise_for_status()
content_type = resp.headers.get("Content-Type", "")
if "zip" in content_type or resp.content[:2] == b"PK":
try:
with zipfile.ZipFile(io.BytesIO(resp.content)) as zf:
parts = []
for name in sorted(zf.namelist()):
with zf.open(name) as f:
parts.append(f.read().decode("utf-8", errors="replace"))
return "\n".join(parts)
except zipfile.BadZipFile:
pass
return resp.text
def check_job(repo_full_name, job):
job_id = job["id"]
job_name = job["name"]
run_id = job["run_id"]
started = job.get("started_at", "")
log_text = fetch_job_log(repo_full_name, job_id)
if not log_text:
return None
found_versions = set()
context_lines = []
for line in log_text.splitlines():
m = VERSION_PATTERN.search(line)
if m:
ver = m.group(1)
if ver in TARGET_VERSIONS:
found_versions.add(ver)
context_lines.append(line.strip())
if not found_versions:
return None
return {
"repo": repo_full_name,
"run_id": run_id,
"job_id": job_id,
"job_name": job_name,
"started_at": started,
"versions": sorted(found_versions),
"context": context_lines[:10],
"job_url": job.get("html_url", f"https://github.com/{repo_full_name}/actions/runs/{run_id}"),
}
def main():
if not TOKEN:
print("ERROR: Set GITHUB_TOKEN environment variable.", file=sys.stderr)
sys.exit(1)
print(f"Time window : {WINDOW_START.isoformat()} -> {WINDOW_END.isoformat()}")
print(f"Hunting for : litellm {', '.join(sorted(TARGET_VERSIONS))}")
print()
print(f"Fetching repositories for org '{ORG}'...")
repos = get_repos()
print(f" Found {len(repos)} repositories")
print()
jobs_to_check = []
print("Scanning workflow runs for time window...")
for repo in repos:
full_name = repo["full_name"]
try:
runs = get_runs_in_window(full_name)
except requests.HTTPError as e:
print(f" WARN: {full_name} - {e}", file=sys.stderr)
continue
if not runs:
continue
print(f" {full_name}: {len(runs)} run(s) in window")
for run in runs:
try:
jobs = get_jobs_for_run(full_name, run["id"])
except requests.HTTPError as e:
print(f" WARN: run {run['id']} - {e}", file=sys.stderr)
continue
for job in jobs:
jobs_to_check.append((full_name, job))
total = len(jobs_to_check)
print(f"\nFetching logs for {total} job(s)...")
print()
hits = []
with ThreadPoolExecutor(max_workers=8) as pool:
futures = {
pool.submit(check_job, full_name, job): (full_name, job["id"])
for full_name, job in jobs_to_check
}
done = 0
for future in as_completed(futures):
done += 1
full_name, jid = futures[future]
try:
result = future.result()
except Exception as e:
print(f" ERROR {full_name} job {jid}: {e}", file=sys.stderr)
continue
if result:
hits.append(result)
print(
f" [{done}/{total}] {full_name} job {jid}" +
(f" *** HIT: litellm {result['versions']} ***" if result else ""),
flush=True,
)
print()
print("=" * 72)
print(f"RESULTS: {len(hits)} job(s) installed litellm {' or '.join(sorted(TARGET_VERSIONS))}")
print("=" * 72)
if not hits:
print("No matches found.")
return
for h in sorted(hits, key=lambda x: x["started_at"]):
print()
print(f" Repo : {h['repo']}")
print(f" Job : {h['job_name']} (#{h['job_id']})")
print(f" Run ID : {h['run_id']}")
print(f" Started : {h['started_at']}")
print(f" Versions : litellm {', '.join(h['versions'])}")
print(f" URL : {h['job_url']}")
print(f" Log lines :")
for line in h["context"]:
print(f" {line}")
if __name__ == "__main__":
main()