Integrations
Integrations
Examples and guides for integrating gitlab-summary with other tools and workflows.
REST API Integration
gitlab-summary exposes a REST API when running in serve mode. This allows integration with monitoring, alerting, and visualization tools.
Starting the API Server
gitlab-summary serve --group your-group --port 5100
API available at: http://localhost:5100/api/
See API Reference for complete endpoint documentation.
Slack Integration
Failure Notifications
Send Slack notifications when pipelines fail.
Python Script:
#!/usr/bin/env python3
import requests
import json
import time
SLACK_WEBHOOK = "https://hooks.slack.com/services/YOUR/WEBHOOK/URL"
GITLAB_SUMMARY_API = "http://localhost:5100"
def check_failures():
"""Check for failed pipelines and notify Slack."""
response = requests.get(f"{GITLAB_SUMMARY_API}/api/pipelines/summary")
data = response.json()
failed_count = data['summary']['failed']
if failed_count > 0:
# Get details of failures
projects_with_failures = [
p for p in data['projects'] if p['failed'] > 0
]
# Format message
message = {
"text": f"β οΈ *{failed_count} Pipeline Failures*",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"β οΈ *{failed_count} Pipeline Failures in the last 24 hours*"
}
},
{
"type": "divider"
}
]
}
# Add project details
for project in projects_with_failures:
message["blocks"].append({
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*{project['project_name']}*\n{project['failed']} failed, {project['successful']} successful"
}
})
# Send to Slack
requests.post(SLACK_WEBHOOK, json=message)
if __name__ == "__main__":
while True:
check_failures()
time.sleep(300) # Check every 5 minutes
πΈ Screenshot placeholder: slack-notification.png
Description: Example Slack notification message showing pipeline failure alert with project names, failure counts, and success statistics
Run as Service:
# Install as systemd service
sudo nano /etc/systemd/system/gitlab-slack-monitor.service
Service File:
[Unit]
Description=GitLab Pipeline Slack Monitor
After=network.target
[Service]
Type=simple
User=gitlab-monitor
WorkingDirectory=/opt/gitlab-monitor
ExecStart=/usr/bin/python3 /opt/gitlab-monitor/slack_notifier.py
Restart=always
[Install]
WantedBy=multi-user.target
Prometheus Metrics
Export pipeline metrics to Prometheus for monitoring and alerting.
Python Exporter:
#!/usr/bin/env python3
from prometheus_client import start_http_server, Gauge, Counter
import requests
import time
import logging
# Define metrics
pipeline_success_rate = Gauge(
'gitlab_pipeline_success_rate',
'Overall pipeline success rate',
['group']
)
pipeline_count = Gauge(
'gitlab_pipeline_count',
'Total pipeline count',
['group', 'status']
)
pipeline_duration = Gauge(
'gitlab_pipeline_avg_duration_seconds',
'Average pipeline duration',
['group']
)
def collect_metrics(api_url, group):
"""Collect metrics from gitlab-summary API."""
try:
response = requests.get(f"{api_url}/api/pipelines/summary")
response.raise_for_status()
data = response.json()
summary = data['summary']
# Update metrics
pipeline_success_rate.labels(group=group).set(
summary['overall_success_rate']
)
pipeline_count.labels(group=group, status='total').set(
summary['total_pipelines']
)
pipeline_count.labels(group=group, status='successful').set(
summary['successful']
)
pipeline_count.labels(group=group, status='failed').set(
summary['failed']
)
pipeline_count.labels(group=group, status='running').set(
summary['running']
)
pipeline_duration.labels(group=group).set(
summary['average_duration']
)
logging.info(f"Metrics updated: {summary['total_pipelines']} pipelines")
except Exception as e:
logging.error(f"Error collecting metrics: {e}")
def main():
logging.basicConfig(level=logging.INFO)
# Configuration
API_URL = "http://localhost:5100"
GROUP = "my-org"
PORT = 9090
INTERVAL = 30
# Start Prometheus HTTP server
start_http_server(PORT)
logging.info(f"Prometheus exporter started on port {PORT}")
# Collect metrics periodically
while True:
collect_metrics(API_URL, GROUP)
time.sleep(INTERVAL)
if __name__ == '__main__':
main()
Prometheus Configuration (prometheus.yml):
scrape_configs:
- job_name: 'gitlab-pipelines'
scrape_interval: 30s
static_configs:
- targets: ['localhost:9090']
πΈ Screenshot placeholder: prometheus-metrics.png
Description: Prometheus metrics dashboard showing gitlab_pipeline_success_rate, gitlab_pipeline_count gauges, and time series data
Grafana Dashboard:
- Import metrics from Prometheus
- Create panels for success rate, failure count, duration trends
- Set alerts for low success rates
πΈ Screenshot placeholder: grafana-dashboard.png
Description: Grafana dashboard displaying pipeline success rate trends, failure count graphs, and average duration charts with time series visualizations
Grafana Integration
Direct API Data Source
Use Grafana’s JSON API plugin to query gitlab-summary directly.
Install Plugin:
grafana-cli plugins install simpod-json-datasource
Configure Data Source:
- Configuration β Data Sources β Add data source
- Select “JSON API”
- URL:
http://localhost:5100 - Save & Test
Create Dashboard Queries:
{
"target": "pipeline-summary",
"endpoint": "/api/pipelines/summary"
}
PagerDuty Integration
Trigger PagerDuty incidents for critical failures.
Python Script:
#!/usr/bin/env python3
import requests
import json
import time
PAGERDUTY_ROUTING_KEY = "YOUR_ROUTING_KEY"
GITLAB_SUMMARY_API = "http://localhost:5100"
def trigger_incident(failed_count):
"""Trigger PagerDuty incident."""
payload = {
"routing_key": PAGERDUTY_ROUTING_KEY,
"event_action": "trigger",
"payload": {
"summary": f"{failed_count} GitLab pipelines failed",
"severity": "error" if failed_count > 5 else "warning",
"source": "gitlab-summary",
"custom_details": {
"failed_pipelines": failed_count
}
}
}
response = requests.post(
"https://events.pagerduty.com/v2/enqueue",
json=payload
)
print(f"PagerDuty response: {response.status_code}")
def check_failures():
"""Check for failures and trigger incident if needed."""
response = requests.get(f"{GITLAB_SUMMARY_API}/api/pipelines/summary")
data = response.json()
failed = data['summary']['failed']
# Trigger if > 3 failures
if failed > 3:
trigger_incident(failed)
if __name__ == "__main__":
while True:
check_failures()
time.sleep(300) # Check every 5 minutes
Email Notifications
Send email summaries of pipeline activity.
Python Script (using SMTP):
#!/usr/bin/env python3
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import requests
from datetime import datetime
SMTP_SERVER = "smtp.gmail.com"
SMTP_PORT = 587
SMTP_USER = "your-email@gmail.com"
SMTP_PASSWORD = "your-app-password"
TO_EMAIL = "team@company.com"
GITLAB_SUMMARY_API = "http://localhost:5100"
def send_summary():
"""Send daily pipeline summary email."""
# Get data
response = requests.get(f"{GITLAB_SUMMARY_API}/api/pipelines/summary")
data = response.json()
summary = data['summary']
# Create email
msg = MIMEMultipart()
msg['From'] = SMTP_USER
msg['To'] = TO_EMAIL
msg['Subject'] = f"GitLab Pipeline Summary - {datetime.now().strftime('%Y-%m-%d')}"
# Email body
body = f"""
GitLab Pipeline Summary (Last 24 hours)
Total Pipelines: {summary['total_pipelines']}
Successful: {summary['successful']} ({summary['overall_success_rate']:.1f}%)
Failed: {summary['failed']}
Running: {summary['running']}
Average Duration: {summary['average_duration']} seconds
---
Project Breakdown:
"""
for project in data['projects'][:10]: # Top 10
body += f"\n{project['project_name']}: {project['success_rate']:.1f}% success rate"
msg.attach(MIMEText(body, 'plain'))
# Send email
server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
server.starttls()
server.login(SMTP_USER, SMTP_PASSWORD)
server.send_message(msg)
server.quit()
print(f"Summary email sent to {TO_EMAIL}")
if __name__ == "__main__":
send_summary()
Schedule with cron:
# Send daily at 9 AM
0 9 * * * /usr/bin/python3 /opt/scripts/gitlab-summary-email.py
CI/CD Integration
GitLab CI/CD
Monitor your own GitLab pipelines from within GitLab CI.
.gitlab-ci.yml:
stages:
- monitor
pipeline_health_check:
stage: monitor
image: python:3.9
script:
- pip install requests
- python3 << EOF
import requests
import sys
response = requests.get('http://gitlab-summary-server:5100/api/pipelines/summary')
data = response.json()
success_rate = data['summary']['overall_success_rate']
if success_rate < 80:
print(f"WARNING: Success rate is {success_rate}%")
sys.exit(1)
else:
print(f"OK: Success rate is {success_rate}%")
EOF
only:
- schedules
Custom Dashboards
React Dashboard
import React, { useEffect, useState } from 'react';
import axios from 'axios';
function PipelineDashboard() {
const [summary, setSummary] = useState(null);
useEffect(() => {
const fetchData = async () => {
const response = await axios.get('http://localhost:5100/api/pipelines/summary');
setSummary(response.data.summary);
};
fetchData();
const interval = setInterval(fetchData, 30000);
return () => clearInterval(interval);
}, []);
if (!summary) return <div>Loading...</div>;
return (
<div className="dashboard">
<h1>Pipeline Status</h1>
<div className="metrics">
<div className="metric">
<h2>{summary.total_pipelines}</h2>
<p>Total Pipelines</p>
</div>
<div className="metric">
<h2>{summary.overall_success_rate.toFixed(1)}%</h2>
<p>Success Rate</p>
</div>
<div className="metric">
<h2>{summary.failed}</h2>
<p>Failed</p>
</div>
</div>
</div>
);
}
Data Export
CSV Export
#!/usr/bin/env python3
import requests
import csv
from datetime import datetime
def export_to_csv():
"""Export pipeline data to CSV."""
response = requests.get('http://localhost:5100/api/pipelines/summary')
data = response.json()
filename = f"pipeline-report-{datetime.now().strftime('%Y%m%d')}.csv"
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Project', 'Total', 'Successful', 'Failed', 'Success Rate'])
for project in data['projects']:
writer.writerow([
project['project_name'],
project['total_pipelines'],
project['successful'],
project['failed'],
f"{project['success_rate']:.1f}%"
])
print(f"Exported to {filename}")
if __name__ == "__main__":
export_to_csv()
Automation Examples
Auto-restart Failed Pipelines
#!/bin/bash
# Check for failures and notify on Slack
GITLAB_SUMMARY="http://localhost:5100"
SLACK_WEBHOOK="https://hooks.slack.com/services/YOUR/WEBHOOK"
# Get failed pipelines
FAILED=$(curl -s "$GITLAB_SUMMARY/api/pipelines/summary" | jq '.summary.failed')
if [ "$FAILED" -gt 0 ]; then
MESSAGE="β οΈ $FAILED pipelines failed. Check dashboard at http://dashboard.company.com"
curl -X POST "$SLACK_WEBHOOK" -H 'Content-Type: application/json' -d "{\"text\":\"$MESSAGE\"}"
fi
See Also
- API Reference β Complete API documentation
- Dashboard Guide β Dashboard features
- CLI Reference β Command-line options