Remote Work Tools

AI Project Status Generator for Remote Teams Pulling Data from Multiple Tools

Remote teams juggle dozens of tools—Slack for communication, Jira for tracking, GitHub for code, Notion for documentation, and Google Calendar for meetings. Generating a coherent project status update means manually checking each platform, copying data, and synthesizing it into something useful. This process wastes hours every week.

An AI project status generator automates this workflow by pulling data from multiple tools and using large language models to synthesize the information into a readable status report. This guide shows you how to build one from scratch.

Architecture Overview

The system consists of three main components:

  1. Data Connectors – API clients that fetch data from each tool
  2. AI Processing Layer – Normalizes and synthesizes the data
  3. Output Generator – Formats the final status report
┌─────────────┐   ┌─────────────┐   ┌─────────────┐
│   Slack     │   │    Jira     │   │  GitHub     │
└──────┬──────┘   └──────┬──────┘   └──────┬──────┘
       │                 │                 │
       └────────┬────────┴────────┬────────┘
                ▼                  ▼
        ┌─────────────────┐  ┌──────────────┐
        │ Data Normalizer│──│ LLM Processor │
        └─────────────────┘  └──────────────┘
                                   │
                                   ▼
                           ┌──────────────┐
                           │Status Report │
                           └──────────────┘

Building Data Connectors

Each tool requires a dedicated connector. Here’s a Python implementation for three common platforms:

import os
import requests
from datetime import datetime, timedelta
from dataclasses import dataclass
from typing import List, Dict, Any

@dataclass
class ProjectData:
    source: str
    items: List[Dict[str, Any]]
    timestamp: datetime

class SlackConnector:
    def __init__(self, token: str):
        self.token = token
        self.base_url = "https://slack.com/api"

    def get_channel_messages(self, channel_id: str, days: int = 7) -> ProjectData:
        headers = {"Authorization": f"Bearer {self.token}"}
        oldest = (datetime.now() - timedelta(days=days)).timestamp()

        response = requests.get(
            f"{self.base_url}/conversations.history",
            headers=headers,
            params={"channel": channel_id, "oldest": oldest}
        )

        messages = response.json().get("messages", [])
        return ProjectData(source="slack", items=messages, timestamp=datetime.now())

class JiraConnector:
    def __init__(self, domain: str, email: str, api_token: str):
        self.domain = domain
        self.auth = (email, api_token)

    def get_sprint_issues(self, board_id: str) -> ProjectData:
        response = requests.get(
            f"https://{self.domain}/rest/agile/1.0/board/{board_id}/sprint",
            auth=self.auth
        )
        sprint = response.json()["values"][-1]  # Current sprint

        issues_response = requests.get(
            f"https://{self.domain}/rest/agile/1.0/sprint/{sprint['id']}/issue",
            auth=self.auth,
            params={"maxResults": 50}
        )

        issues = issues_response.json().get("issues", [])
        return ProjectData(source="jira", items=issues, timestamp=datetime.now())

class GitHubConnector:
    def __init__(self, token: str):
        self.token = token
        self.headers = {"Authorization": f"token {token}"}

    def get_recent_prs(self, owner: str, repo: str, days: int = 7) -> ProjectData:
        since = (datetime.now() - timedelta(days=days)).isoformat()

        response = requests.get(
            f"https://api.github.com/repos/{owner}/{repo}/pulls",
            headers=self.headers,
            params={"state": "all", "sort": "updated", "direction": "desc"}
        )

        prs = [pr for pr in response.json() if pr["updated_at"] >= since]
        return ProjectData(source="github", items=prs, timestamp=datetime.now())

Normalizing and Aggregating Data

Each tool returns data in a different format. Create a normalization layer to standardize the structure:

from typing import Protocol

class DataNormalizer(Protocol):
    def normalize(self, data: ProjectData) -> Dict[str, Any]:
        ...

class SlackNormalizer:
    def normalize(self, data: ProjectData) -> Dict[str, Any]:
        return {
            "type": "communication",
            "summary": f"{len(data.items)} messages in the last 7 days",
            "key_updates": [
                msg.get("text", "")[:200] for msg in data.items[-5:]
            ],
            "activity_level": "high" if len(data.items) > 50 else "normal"
        }

class JiraNormalizer:
    def normalize(self, data: ProjectData) -> Dict[str, Any]:
        status_counts = {}
        for issue in data.items:
            status = issue["fields"]["status"]["name"]
            status_counts[status] = status_counts.get(status, 0) + 1

        return {
            "type": "project_tracking",
            "summary": f"{len(data.items)} issues in current sprint",
            "status_breakdown": status_counts,
            "completion_percentage": self._calculate_completion(status_counts)
        }

    def _calculate_completion(self, counts: Dict[str, int]) -> float:
        total = sum(counts.values())
        done = counts.get("Done", 0) + counts.get("Closed", 0)
        return round((done / total) * 100, 1) if total > 0 else 0

class GitHubNormalizer:
    def normalize(self, data: ProjectData) -> Dict[str, Any]:
        open_prs = [pr for pr in data.items if pr["state"] == "open"]
        merged_prs = [pr for pr in data.items if pr.get("merged_at")]

        return {
            "type": "code_development",
            "summary": f"{len(data.items)} PRs updated, {len(merged_prs)} merged",
            "open_count": len(open_prs),
            "merged_count": len(merged_prs),
            "key_prs": [
                {"title": pr["title"], "url": pr["html_url"]}
                for pr in data.items[:3]
            ]
        }

AI-Powered Synthesis

Now comes the core value: using an LLM to synthesize all this data into a coherent status report:

import openai

class StatusReportGenerator:
    def __init__(self, api_key: str):
        self.client = openai.OpenAI(api_key=api_key)

    def generate_report(self, normalized_data: List[Dict[str, Any]],
                       team_name: str, project_name: str) -> str:
        context = self._build_context(normalized_data, team_name, project_name)

        response = self.client.chat.completions.create(
            model="gpt-4",
            messages=[
                {
                    "role": "system",
                    "content": """You are a project manager generating a weekly
                    status update. Summarize the team's progress in a concise,
                    actionable format. Highlight blockers, completed work,
                    and upcoming priorities. Use a professional but friendly tone."""
                },
                {
                    "role": "user",
                    "content": context
                }
            ],
            temperature=0.7,
            max_tokens=800
        )

        return response.choices[0].message.content

    def _build_context(self, data: List[Dict[str, Any]],
                      team: str, project: str) -> str:
        sections = [f"## {project} Status Report - {team}\n"]

        for item in data:
            sections.append(f"\n### {item['type'].replace('_', ' ').title()}")
            sections.append(item["summary"])

            if "status_breakdown" in item:
                sections.append("Status breakdown:")
                for status, count in item["status_breakdown"].items():
                    sections.append(f"  - {status}: {count}")

            if "key_prs" in item:
                sections.append("Recent PRs:")
                for pr in item["key_prs"]:
                    sections.append(f"  - {pr['title']}")

        return "\n".join(sections)

Complete Integration

Tie everything together with a main orchestrator:

def generate_weekly_status():
    # Initialize connectors
    slack = SlackConnector(os.environ["SLACK_TOKEN"])
    jira = JiraConnector(
        os.environ["JIRA_DOMAIN"],
        os.environ["JIRA_EMAIL"],
        os.environ["JIRA_TOKEN"]
    )
    github = GitHubConnector(os.environ["GITHUB_TOKEN"])

    # Fetch data from all sources
    data_sources = [
        slack.get_channel_messages(os.environ["SLACK_CHANNEL"]),
        jira.get_sprint_issues(os.environ["JIRA_BOARD"]),
        github.get_recent_prs(os.environ["REPO_OWNER"], os.environ["REPO_NAME"])
    ]

    # Normalize data
    normalizers = [SlackNormalizer(), JiraNormalizer(), GitHubNormalizer()]
    normalized = [
        normalizers[i].normalize(data_sources[i])
        for i in range(len(data_sources))
    ]

    # Generate report
    generator = StatusReportGenerator(os.environ["OPENAI_API_KEY"])
    report = generator.generate_report(
        normalized,
        team_name="Engineering Team",
        project_name="Platform Redesign"
    )

    # Output the report
    print(report)
    return report

Deployment Considerations

For production use, add these essential features:

Rate Limiting: Most APIs impose rate limits. Implement exponential backoff and cache responses where possible.

Authentication Security: Store API tokens in environment variables or a secrets manager. Never commit credentials to version control.

Scheduling: Use a cron job or GitHub Actions workflow to run the generator weekly:

name: Weekly Status Report
on:
  schedule:
    - cron: '0 9 * * Monday'
  workflow_dispatch:

jobs:
  generate:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: actions/setup-python@v5
        with:
          python-version: '3.11'
      - run: pip install -r requirements.txt
      - run: python generate_status.py
        env:
          SLACK_TOKEN: ${{ secrets.SLACK_TOKEN }}
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
          OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

Customization: The prompt in StatusReportGenerator can be modified to match your team’s specific format requirements. Some teams prefer bullet points, others prefer paragraphs.

Adding Notion and Linear Connectors

The same data connector pattern extends to other tools. Notion pages make excellent sources for documentation status and pending decisions. Linear provides detailed engineering metrics including cycle time and scope creep.

class NotionConnector:
    def __init__(self, api_key: str):
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Notion-Version": "2022-06-28",
            "Content-Type": "application/json"
        }
        self.base_url = "https://api.notion.com/v1"

    def get_database_items(self, database_id: str, days: int = 7) -> ProjectData:
        from datetime import datetime, timedelta
        cutoff = (datetime.now() - timedelta(days=days)).isoformat()

        payload = {
            "filter": {
                "property": "Last edited time",
                "last_edited_time": {"after": cutoff}
            },
            "sorts": [{"timestamp": "last_edited_time", "direction": "descending"}],
            "page_size": 50
        }

        response = requests.post(
            f"{self.base_url}/databases/{database_id}/query",
            headers=self.headers,
            json=payload
        )
        pages = response.json().get("results", [])
        return ProjectData(source="notion", items=pages, timestamp=datetime.now())


class LinearConnector:
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.endpoint = "https://api.linear.app/graphql"

    def get_cycle_data(self, team_id: str) -> ProjectData:
        query = """
        query TeamIssues($teamId: String!) {
          team(id: $teamId) {
            cycles(first: 1, orderBy: startedAt) {
              nodes {
                startsAt
                endsAt
                completedAt
                issues {
                  nodes {
                    title
                    state { name }
                    completedAt
                    estimate
                  }
                }
              }
            }
          }
        }
        """
        response = requests.post(
            self.endpoint,
            headers={"Authorization": self.api_key},
            json={"query": query, "variables": {"teamId": team_id}}
        )
        cycle_data = response.json().get("data", {})
        return ProjectData(source="linear", items=[cycle_data], timestamp=datetime.now())

Caching API Responses to Stay Within Rate Limits

Running the generator multiple times per day (or on-demand) against live APIs burns through rate limits quickly. Cache connector responses with a short TTL to allow re-runs without hitting limits:

import hashlib
import json
import os
from datetime import datetime, timedelta
from pathlib import Path

class CachedConnector:
    def __init__(self, connector, cache_dir: str = ".status-cache", ttl_minutes: int = 30):
        self.connector = connector
        self.cache_dir = Path(cache_dir)
        self.cache_dir.mkdir(exist_ok=True)
        self.ttl = timedelta(minutes=ttl_minutes)

    def _cache_key(self, method_name: str, *args) -> str:
        key = f"{type(self.connector).__name__}:{method_name}:{':'.join(str(a) for a in args)}"
        return hashlib.md5(key.encode()).hexdigest()

    def _cached(self, method_name: str, *args):
        key = self._cache_key(method_name, *args)
        cache_file = self.cache_dir / f"{key}.json"

        if cache_file.exists():
            cached = json.loads(cache_file.read_text())
            cached_at = datetime.fromisoformat(cached["cached_at"])
            if datetime.now() - cached_at < self.ttl:
                return cached["data"]

        # Cache miss — call the real connector
        result = getattr(self.connector, method_name)(*args)
        cache_file.write_text(json.dumps({
            "cached_at": datetime.now().isoformat(),
            "data": result.__dict__ if hasattr(result, '__dict__') else result
        }))
        return result

Delivering Reports to Multiple Channels

A status report that only prints to stdout isn’t useful for distributed teams. Add output adapters for Slack, email, and Confluence:

class ReportDelivery:
    def send_to_slack(self, report: str, webhook_url: str, channel: str):
        """Post report to a Slack channel via webhook."""
        payload = {
            "channel": channel,
            "text": f"*Weekly Status Report*\n{report}",
            "mrkdwn": True
        }
        requests.post(webhook_url, json=payload)

    def save_to_notion(self, report: str, page_id: str, notion_token: str):
        """Append report as a new child page in Notion."""
        headers = {
            "Authorization": f"Bearer {notion_token}",
            "Notion-Version": "2022-06-28",
            "Content-Type": "application/json"
        }
        date_str = datetime.now().strftime("%Y-%m-%d")
        requests.post(
            "https://api.notion.com/v1/pages",
            headers=headers,
            json={
                "parent": {"page_id": page_id},
                "properties": {
                    "title": {"title": [{"text": {"content": f"Status Report {date_str}"}}]}
                },
                "children": [
                    {"object": "block", "type": "paragraph",
                     "paragraph": {"rich_text": [{"text": {"content": report}}]}}
                ]
            }
        )

Building an AI project status generator eliminates the manual drudgery of synthesizing updates across disparate tools. Your team gets consistent, data-driven status reports without anyone spending hours gathering information.

Built by theluckystrike — More at zovo.one