Spaces:
Running
Running
added upstash
Browse files- aiven_keep_alive_service.py +84 -31
- keep_projects_alive_service.py +39 -9
- main.py +4 -8
- mongodb_keep_alive_service.py +52 -14
- neondb_postgres_keep_alive_service.py +60 -47
- redis_keep_alive_service.py +75 -70
- supabase_keep_alive_service.py +58 -46
aiven_keep_alive_service.py
CHANGED
|
@@ -3,6 +3,9 @@ import logging
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
import psycopg2 # For PostgreSQL
|
| 5 |
import pymysql # For MySQL
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Load environment variables
|
| 8 |
load_dotenv()
|
|
@@ -10,36 +13,39 @@ load_dotenv()
|
|
| 10 |
sql_uri = os.getenv("AIVEN_MYSQL_URI")
|
| 11 |
postgres_uri = os.getenv("AIVEN_POSTGRES_URI")
|
| 12 |
|
| 13 |
-
# Configure logging
|
| 14 |
logging.basicConfig(
|
| 15 |
level=logging.INFO,
|
| 16 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 17 |
-
handlers=[
|
| 18 |
-
logging.StreamHandler(),
|
| 19 |
-
logging.FileHandler('keepalive.log')
|
| 20 |
-
]
|
| 21 |
)
|
| 22 |
logger = logging.getLogger('KeepAlive')
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
try:
|
| 27 |
-
from urllib.parse import urlparse, parse_qs
|
| 28 |
-
|
| 29 |
-
# Parse the URI
|
| 30 |
parsed_uri = urlparse(sql_uri)
|
| 31 |
-
|
| 32 |
-
# Extract connection parameters
|
| 33 |
host = parsed_uri.hostname
|
| 34 |
-
port = parsed_uri.port or 3306
|
| 35 |
user = parsed_uri.username
|
| 36 |
password = parsed_uri.password
|
| 37 |
database = parsed_uri.path.lstrip('/') or 'defaultdb'
|
| 38 |
-
|
| 39 |
-
# Parse query parameters for SSL mode
|
| 40 |
query_params = parse_qs(parsed_uri.query)
|
| 41 |
ssl_mode = query_params.get('ssl-mode', ['REQUIRED'])[0]
|
| 42 |
-
|
| 43 |
connection = pymysql.connect(
|
| 44 |
host=host,
|
| 45 |
port=port,
|
|
@@ -50,31 +56,78 @@ def ping_mysql():
|
|
| 50 |
)
|
| 51 |
with connection.cursor() as cursor:
|
| 52 |
cursor.execute("SELECT 1")
|
| 53 |
-
|
| 54 |
-
logger.info(f"MySQL ping successful: {result}")
|
| 55 |
connection.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
except Exception as e:
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
|
|
|
| 61 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
connection = psycopg2.connect(postgres_uri)
|
| 63 |
cursor = connection.cursor()
|
| 64 |
cursor.execute("SELECT 1")
|
| 65 |
-
|
| 66 |
-
logger.info(f"PostgreSQL ping successful: {result}")
|
| 67 |
cursor.close()
|
| 68 |
connection.close()
|
| 69 |
-
except Exception as e:
|
| 70 |
-
logger.error(f"PostgreSQL ping failed: {str(e)}")
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
logger.info("Starting Aiven service health check")
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
import psycopg2 # For PostgreSQL
|
| 5 |
import pymysql # For MySQL
|
| 6 |
+
from urllib.parse import urlparse, parse_qs
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from pydantic import BaseModel
|
| 9 |
|
| 10 |
# Load environment variables
|
| 11 |
load_dotenv()
|
|
|
|
| 13 |
sql_uri = os.getenv("AIVEN_MYSQL_URI")
|
| 14 |
postgres_uri = os.getenv("AIVEN_POSTGRES_URI")
|
| 15 |
|
| 16 |
+
# Configure logging (console only)
|
| 17 |
logging.basicConfig(
|
| 18 |
level=logging.INFO,
|
| 19 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 20 |
+
handlers=[logging.StreamHandler()]
|
|
|
|
|
|
|
|
|
|
| 21 |
)
|
| 22 |
logger = logging.getLogger('KeepAlive')
|
| 23 |
|
| 24 |
+
|
| 25 |
+
# ---------- Pydantic Response Model ----------
|
| 26 |
+
class AivenPingResponse(BaseModel):
|
| 27 |
+
service_name: str
|
| 28 |
+
success: bool
|
| 29 |
+
error: str | None = None
|
| 30 |
+
time: str
|
| 31 |
+
host: str | None = None
|
| 32 |
+
db_type: str | None = None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# ---------- MySQL ----------
|
| 36 |
+
def ping_mysql() -> AivenPingResponse:
|
| 37 |
+
now = datetime.utcnow().isoformat()
|
| 38 |
try:
|
|
|
|
|
|
|
|
|
|
| 39 |
parsed_uri = urlparse(sql_uri)
|
|
|
|
|
|
|
| 40 |
host = parsed_uri.hostname
|
| 41 |
+
port = parsed_uri.port or 3306
|
| 42 |
user = parsed_uri.username
|
| 43 |
password = parsed_uri.password
|
| 44 |
database = parsed_uri.path.lstrip('/') or 'defaultdb'
|
| 45 |
+
|
|
|
|
| 46 |
query_params = parse_qs(parsed_uri.query)
|
| 47 |
ssl_mode = query_params.get('ssl-mode', ['REQUIRED'])[0]
|
| 48 |
+
|
| 49 |
connection = pymysql.connect(
|
| 50 |
host=host,
|
| 51 |
port=port,
|
|
|
|
| 56 |
)
|
| 57 |
with connection.cursor() as cursor:
|
| 58 |
cursor.execute("SELECT 1")
|
| 59 |
+
cursor.fetchone()
|
|
|
|
| 60 |
connection.close()
|
| 61 |
+
|
| 62 |
+
logger.info(f"MySQL ping successful: {host}")
|
| 63 |
+
return AivenPingResponse(
|
| 64 |
+
service_name=database,
|
| 65 |
+
success=True,
|
| 66 |
+
error=None,
|
| 67 |
+
time=now,
|
| 68 |
+
host=host,
|
| 69 |
+
db_type="mysql"
|
| 70 |
+
)
|
| 71 |
except Exception as e:
|
| 72 |
+
error_msg = str(e)
|
| 73 |
+
logger.error(f"MySQL ping failed: {error_msg}")
|
| 74 |
+
return AivenPingResponse(
|
| 75 |
+
service_name="mysql_service",
|
| 76 |
+
success=False,
|
| 77 |
+
error=error_msg,
|
| 78 |
+
time=now,
|
| 79 |
+
host=None,
|
| 80 |
+
db_type="mysql"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
|
| 84 |
+
# ---------- PostgreSQL ----------
|
| 85 |
+
def ping_postgres() -> AivenPingResponse:
|
| 86 |
+
now = datetime.utcnow().isoformat()
|
| 87 |
try:
|
| 88 |
+
parsed_uri = urlparse(postgres_uri)
|
| 89 |
+
host = parsed_uri.hostname
|
| 90 |
+
db_name = parsed_uri.path.lstrip('/')
|
| 91 |
+
|
| 92 |
connection = psycopg2.connect(postgres_uri)
|
| 93 |
cursor = connection.cursor()
|
| 94 |
cursor.execute("SELECT 1")
|
| 95 |
+
cursor.fetchone()
|
|
|
|
| 96 |
cursor.close()
|
| 97 |
connection.close()
|
|
|
|
|
|
|
| 98 |
|
| 99 |
+
logger.info(f"PostgreSQL ping successful: {db_name}@{host}")
|
| 100 |
+
return AivenPingResponse(
|
| 101 |
+
service_name=db_name,
|
| 102 |
+
success=True,
|
| 103 |
+
error=None,
|
| 104 |
+
time=now,
|
| 105 |
+
host=host,
|
| 106 |
+
db_type="postgres"
|
| 107 |
+
)
|
| 108 |
+
except Exception as e:
|
| 109 |
+
error_msg = str(e)
|
| 110 |
+
logger.error(f"PostgreSQL ping failed: {error_msg}")
|
| 111 |
+
return AivenPingResponse(
|
| 112 |
+
service_name="postgres_service",
|
| 113 |
+
success=False,
|
| 114 |
+
error=error_msg,
|
| 115 |
+
time=now,
|
| 116 |
+
host=None,
|
| 117 |
+
db_type="postgres"
|
| 118 |
+
)
|
| 119 |
|
| 120 |
|
| 121 |
+
# ---------- Run Aiven Pings ----------
|
| 122 |
+
def ping_aiven_projects() -> list[AivenPingResponse]:
|
| 123 |
logger.info("Starting Aiven service health check")
|
| 124 |
+
results = []
|
| 125 |
+
if sql_uri:
|
| 126 |
+
results.append(ping_mysql())
|
| 127 |
+
if postgres_uri:
|
| 128 |
+
results.append(ping_postgres())
|
| 129 |
+
logger.info("Completed Aiven service health check")
|
| 130 |
+
return results
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
keep_projects_alive_service.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
from aiven_keep_alive_service import ping_aiven_projects
|
| 4 |
from mongodb_keep_alive_service import ping_mongodb_projects
|
| 5 |
from neondb_postgres_keep_alive_service import ping_neondb_projects
|
|
@@ -9,10 +7,42 @@ from supabase_keep_alive_service import ping_all_supabase_projects
|
|
| 9 |
|
| 10 |
|
| 11 |
def ping_all_projects():
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from aiven_keep_alive_service import ping_aiven_projects
|
| 2 |
from mongodb_keep_alive_service import ping_mongodb_projects
|
| 3 |
from neondb_postgres_keep_alive_service import ping_neondb_projects
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
def ping_all_projects():
|
| 10 |
+
all_results = []
|
| 11 |
+
|
| 12 |
+
# Each function should return either a list of Pydantic models or dicts
|
| 13 |
+
services = [
|
| 14 |
+
ping_all_supabase_projects,
|
| 15 |
+
ping_neondb_projects,
|
| 16 |
+
ping_all_pinecone_indexes,
|
| 17 |
+
ping_mongodb_projects,
|
| 18 |
+
ping_aiven_projects,
|
| 19 |
+
ping_all_redis_projects,
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
for service_func in services:
|
| 23 |
+
try:
|
| 24 |
+
result = service_func()
|
| 25 |
+
if isinstance(result, list):
|
| 26 |
+
# flatten list of Pydantic models
|
| 27 |
+
for r in result:
|
| 28 |
+
if hasattr(r, "model_dump"):
|
| 29 |
+
all_results.append(r.model_dump())
|
| 30 |
+
else:
|
| 31 |
+
all_results.append(r)
|
| 32 |
+
else:
|
| 33 |
+
# single Pydantic model or dict
|
| 34 |
+
if hasattr(result, "model_dump"):
|
| 35 |
+
all_results.append(result.model_dump())
|
| 36 |
+
else:
|
| 37 |
+
all_results.append(result)
|
| 38 |
+
except Exception as e:
|
| 39 |
+
all_results.append({
|
| 40 |
+
"service_name": service_func.__name__,
|
| 41 |
+
"success": False,
|
| 42 |
+
"error": str(e)
|
| 43 |
+
})
|
| 44 |
+
|
| 45 |
+
return all_results
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
main.py
CHANGED
|
@@ -6,6 +6,7 @@ import pandas as pd
|
|
| 6 |
from aiven_keep_alive_service import ping_aiven_projects
|
| 7 |
from cronjob_service import init_scheduler
|
| 8 |
from dotenv import load_dotenv
|
|
|
|
| 9 |
from mongodb_keep_alive_service import ping_mongodb_projects
|
| 10 |
from neondb_postgres_keep_alive_service import ping_neondb_projects
|
| 11 |
from pinecone_keep_alive_service import ping_all_pinecone_indexes
|
|
@@ -54,14 +55,9 @@ def health_check():
|
|
| 54 |
|
| 55 |
|
| 56 |
@app.get("/ping_all_projects")
|
| 57 |
-
def
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
ping_all_pinecone_indexes()
|
| 61 |
-
ping_mongodb_projects()
|
| 62 |
-
ping_aiven_projects()
|
| 63 |
-
ping_all_redis_projects()
|
| 64 |
-
return {"status": "success", "message": "All projects pinged successfully"}
|
| 65 |
|
| 66 |
|
| 67 |
|
|
|
|
| 6 |
from aiven_keep_alive_service import ping_aiven_projects
|
| 7 |
from cronjob_service import init_scheduler
|
| 8 |
from dotenv import load_dotenv
|
| 9 |
+
from keep_projects_alive_service import ping_all_projects
|
| 10 |
from mongodb_keep_alive_service import ping_mongodb_projects
|
| 11 |
from neondb_postgres_keep_alive_service import ping_neondb_projects
|
| 12 |
from pinecone_keep_alive_service import ping_all_pinecone_indexes
|
|
|
|
| 55 |
|
| 56 |
|
| 57 |
@app.get("/ping_all_projects")
|
| 58 |
+
def ping_all_projects_endpoint():
|
| 59 |
+
result = ping_all_projects()
|
| 60 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
|
| 63 |
|
mongodb_keep_alive_service.py
CHANGED
|
@@ -3,14 +3,14 @@ import logging
|
|
| 3 |
from urllib.parse import parse_qs, urlparse
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from pymongo import MongoClient
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Configure logging
|
| 8 |
logging.basicConfig(
|
| 9 |
level=logging.INFO,
|
| 10 |
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 11 |
-
handlers=[
|
| 12 |
-
logging.StreamHandler()
|
| 13 |
-
]
|
| 14 |
)
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
|
@@ -18,7 +18,17 @@ load_dotenv()
|
|
| 18 |
|
| 19 |
mongodb_uris = os.getenv('MONGODB_URIS')
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
"""Extracts the appName from MongoDB URI if present."""
|
| 23 |
try:
|
| 24 |
parsed_uri = urlparse(uri)
|
|
@@ -29,27 +39,55 @@ def extract_app_name_from_uri(uri):
|
|
| 29 |
logger.debug(f"Failed to extract appName from URI: {e}")
|
| 30 |
return "MongoDB Ping Service" # Fallback if parsing fails
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
if not mongodb_uris:
|
| 35 |
logger.warning("No MongoDB URIs found in environment variables")
|
| 36 |
-
return
|
| 37 |
-
|
| 38 |
for uri in mongodb_uris.split(','):
|
| 39 |
-
uri = uri.strip()
|
| 40 |
if not uri:
|
| 41 |
continue
|
| 42 |
-
|
| 43 |
service_name = extract_app_name_from_uri(uri)
|
|
|
|
|
|
|
| 44 |
try:
|
| 45 |
-
client = MongoClient(uri)
|
| 46 |
client.admin.command('ping')
|
| 47 |
-
logger.info(f"{service_name}: Successfully pinged MongoDB at {uri[:50]}...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
client.close()
|
| 49 |
except Exception as e:
|
| 50 |
logger.error(f"{service_name}: Error pinging MongoDB at {uri[:50]}... - {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
def ping_mongodb_projects():
|
| 53 |
logger.info("Starting MongoDB ping checks...")
|
| 54 |
-
ping_mongodb()
|
| 55 |
-
logger.info("Completed MongoDB ping checks.")
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from urllib.parse import parse_qs, urlparse
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from pymongo import MongoClient
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from pydantic import BaseModel
|
| 8 |
|
| 9 |
# Configure logging
|
| 10 |
logging.basicConfig(
|
| 11 |
level=logging.INFO,
|
| 12 |
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 13 |
+
handlers=[logging.StreamHandler()]
|
|
|
|
|
|
|
| 14 |
)
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
|
|
|
| 18 |
|
| 19 |
mongodb_uris = os.getenv('MONGODB_URIS')
|
| 20 |
|
| 21 |
+
|
| 22 |
+
# ---------- Pydantic Response Model ----------
|
| 23 |
+
class MongoPingResponse(BaseModel):
|
| 24 |
+
service_name: str
|
| 25 |
+
success: bool
|
| 26 |
+
error: str | None = None
|
| 27 |
+
time: str
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# ---------- Helpers ----------
|
| 31 |
+
def extract_app_name_from_uri(uri: str) -> str:
|
| 32 |
"""Extracts the appName from MongoDB URI if present."""
|
| 33 |
try:
|
| 34 |
parsed_uri = urlparse(uri)
|
|
|
|
| 39 |
logger.debug(f"Failed to extract appName from URI: {e}")
|
| 40 |
return "MongoDB Ping Service" # Fallback if parsing fails
|
| 41 |
|
| 42 |
+
|
| 43 |
+
# ---------- MongoDB Ping ----------
|
| 44 |
+
def ping_mongodb() -> list[MongoPingResponse]:
|
| 45 |
+
"""Ping MongoDB and return structured responses."""
|
| 46 |
+
responses: list[MongoPingResponse] = []
|
| 47 |
+
|
| 48 |
if not mongodb_uris:
|
| 49 |
logger.warning("No MongoDB URIs found in environment variables")
|
| 50 |
+
return responses
|
| 51 |
+
|
| 52 |
for uri in mongodb_uris.split(','):
|
| 53 |
+
uri = uri.strip()
|
| 54 |
if not uri:
|
| 55 |
continue
|
| 56 |
+
|
| 57 |
service_name = extract_app_name_from_uri(uri)
|
| 58 |
+
now = datetime.utcnow().isoformat()
|
| 59 |
+
|
| 60 |
try:
|
| 61 |
+
client = MongoClient(uri, serverSelectionTimeoutMS=3000) # 3s timeout
|
| 62 |
client.admin.command('ping')
|
| 63 |
+
logger.info(f"{service_name}: Successfully pinged MongoDB at {uri[:50]}...")
|
| 64 |
+
responses.append(
|
| 65 |
+
MongoPingResponse(
|
| 66 |
+
service_name=service_name,
|
| 67 |
+
success=True,
|
| 68 |
+
error=None,
|
| 69 |
+
time=now
|
| 70 |
+
)
|
| 71 |
+
)
|
| 72 |
client.close()
|
| 73 |
except Exception as e:
|
| 74 |
logger.error(f"{service_name}: Error pinging MongoDB at {uri[:50]}... - {str(e)}")
|
| 75 |
+
responses.append(
|
| 76 |
+
MongoPingResponse(
|
| 77 |
+
service_name=service_name,
|
| 78 |
+
success=False,
|
| 79 |
+
error=str(e),
|
| 80 |
+
time=now
|
| 81 |
+
)
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
return responses
|
| 85 |
+
|
| 86 |
|
| 87 |
def ping_mongodb_projects():
|
| 88 |
logger.info("Starting MongoDB ping checks...")
|
| 89 |
+
results = ping_mongodb()
|
| 90 |
+
logger.info("Completed MongoDB ping checks.")
|
| 91 |
+
return results
|
| 92 |
+
|
| 93 |
+
|
neondb_postgres_keep_alive_service.py
CHANGED
|
@@ -4,8 +4,10 @@ from dotenv import load_dotenv
|
|
| 4 |
import psycopg2
|
| 5 |
from psycopg2 import OperationalError
|
| 6 |
from urllib.parse import urlparse
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
# Configure logging
|
| 9 |
logging.basicConfig(
|
| 10 |
level=logging.INFO,
|
| 11 |
format='%(asctime)s - %(levelname)s - %(message)s'
|
|
@@ -16,68 +18,79 @@ load_dotenv()
|
|
| 16 |
|
| 17 |
NEONDB_POSTGRES_URIS = os.getenv('NEONDB_POSTGRES_URIS', '').split(',')
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
for uri in filter(None, (u.strip() for u in NEONDB_POSTGRES_URIS)):
|
|
|
|
| 27 |
try:
|
| 28 |
-
# Parse the URI to extract database name
|
| 29 |
parsed_uri = urlparse(uri)
|
| 30 |
db_name = parsed_uri.path[1:] # Remove the leading '/'
|
| 31 |
host = parsed_uri.hostname
|
| 32 |
-
|
| 33 |
-
# Connect to the database
|
| 34 |
conn = psycopg2.connect(uri)
|
| 35 |
cursor = conn.cursor()
|
| 36 |
-
|
| 37 |
-
# Execute a simple query
|
| 38 |
cursor.execute("SELECT 1")
|
| 39 |
cursor.fetchone()
|
| 40 |
-
|
| 41 |
-
# Close the connection
|
| 42 |
cursor.close()
|
| 43 |
conn.close()
|
| 44 |
-
|
| 45 |
-
results[db_name] = {
|
| 46 |
-
'host': host,
|
| 47 |
-
'status': 'Ping successful',
|
| 48 |
-
'details': f"Successfully connected to database '{db_name}' on {host}"
|
| 49 |
-
}
|
| 50 |
logger.info(f"Successfully connected to database '{db_name}' on {host}")
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
except OperationalError as e:
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
except Exception as e:
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
|
| 70 |
-
|
| 71 |
-
def ping_neondb_projects():
|
| 72 |
logger.info("Starting NeonDB ping checks...")
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
for db_name, result in ping_results.items():
|
| 76 |
-
logger.info(f"Database: {db_name}")
|
| 77 |
-
logger.info(f"Host: {result['host']}")
|
| 78 |
-
logger.info(f"Status: {result['status']}")
|
| 79 |
-
logger.info(f"Details: {result['details']}")
|
| 80 |
-
logger.info("-" * 40)
|
| 81 |
-
|
| 82 |
logger.info("Completed NeonDB ping checks.")
|
|
|
|
|
|
|
|
|
|
| 83 |
|
|
|
|
| 4 |
import psycopg2
|
| 5 |
from psycopg2 import OperationalError
|
| 6 |
from urllib.parse import urlparse
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from pydantic import BaseModel
|
| 9 |
|
| 10 |
+
# Configure logging (console only)
|
| 11 |
logging.basicConfig(
|
| 12 |
level=logging.INFO,
|
| 13 |
format='%(asctime)s - %(levelname)s - %(message)s'
|
|
|
|
| 18 |
|
| 19 |
NEONDB_POSTGRES_URIS = os.getenv('NEONDB_POSTGRES_URIS', '').split(',')
|
| 20 |
|
| 21 |
+
|
| 22 |
+
# ---------- Pydantic Response Model ----------
|
| 23 |
+
class NeonDBPingResponse(BaseModel):
|
| 24 |
+
service_name: str
|
| 25 |
+
success: bool
|
| 26 |
+
error: str | None = None
|
| 27 |
+
time: str
|
| 28 |
+
host: str | None = None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# ---------- NeonDB Ping ----------
|
| 32 |
+
def ping_neondb() -> list[NeonDBPingResponse]:
|
| 33 |
+
"""Ping all NeonDB instances and return structured responses."""
|
| 34 |
+
responses: list[NeonDBPingResponse] = []
|
| 35 |
+
|
| 36 |
for uri in filter(None, (u.strip() for u in NEONDB_POSTGRES_URIS)):
|
| 37 |
+
now = datetime.utcnow().isoformat()
|
| 38 |
try:
|
|
|
|
| 39 |
parsed_uri = urlparse(uri)
|
| 40 |
db_name = parsed_uri.path[1:] # Remove the leading '/'
|
| 41 |
host = parsed_uri.hostname
|
| 42 |
+
|
|
|
|
| 43 |
conn = psycopg2.connect(uri)
|
| 44 |
cursor = conn.cursor()
|
|
|
|
|
|
|
| 45 |
cursor.execute("SELECT 1")
|
| 46 |
cursor.fetchone()
|
|
|
|
|
|
|
| 47 |
cursor.close()
|
| 48 |
conn.close()
|
| 49 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
logger.info(f"Successfully connected to database '{db_name}' on {host}")
|
| 51 |
+
responses.append(
|
| 52 |
+
NeonDBPingResponse(
|
| 53 |
+
service_name=db_name,
|
| 54 |
+
success=True,
|
| 55 |
+
error=None,
|
| 56 |
+
time=now,
|
| 57 |
+
host=host
|
| 58 |
+
)
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
except OperationalError as e:
|
| 62 |
+
error_msg = f"Connection failed: {str(e)}"
|
| 63 |
+
logger.error(f"Connection failed to database '{db_name}' on {host}: {error_msg}")
|
| 64 |
+
responses.append(
|
| 65 |
+
NeonDBPingResponse(
|
| 66 |
+
service_name=db_name,
|
| 67 |
+
success=False,
|
| 68 |
+
error=error_msg,
|
| 69 |
+
time=now,
|
| 70 |
+
host=host
|
| 71 |
+
)
|
| 72 |
+
)
|
| 73 |
except Exception as e:
|
| 74 |
+
error_msg = str(e)
|
| 75 |
+
logger.error(f"Error connecting to database '{db_name}' on {host}: {error_msg}")
|
| 76 |
+
responses.append(
|
| 77 |
+
NeonDBPingResponse(
|
| 78 |
+
service_name=db_name,
|
| 79 |
+
success=False,
|
| 80 |
+
error=error_msg,
|
| 81 |
+
time=now,
|
| 82 |
+
host=host
|
| 83 |
+
)
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
return responses
|
| 87 |
|
| 88 |
|
| 89 |
+
def ping_neondb_projects() -> list[NeonDBPingResponse]:
|
|
|
|
| 90 |
logger.info("Starting NeonDB ping checks...")
|
| 91 |
+
results = ping_neondb()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
logger.info("Completed NeonDB ping checks.")
|
| 93 |
+
return results
|
| 94 |
+
|
| 95 |
+
|
| 96 |
|
redis_keep_alive_service.py
CHANGED
|
@@ -5,12 +5,13 @@ import redis
|
|
| 5 |
import logging
|
| 6 |
from datetime import datetime
|
| 7 |
import uuid
|
|
|
|
| 8 |
|
| 9 |
-
# Logging
|
| 10 |
logging.basicConfig(
|
| 11 |
level=logging.INFO,
|
| 12 |
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 13 |
-
handlers=[logging.
|
| 14 |
)
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
|
@@ -41,8 +42,19 @@ REDIS_SERVICES = {
|
|
| 41 |
}
|
| 42 |
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
try:
|
| 47 |
start_time = datetime.now()
|
| 48 |
r = redis.Redis(
|
|
@@ -51,99 +63,92 @@ def ping_redis(redis_config):
|
|
| 51 |
password=redis_config["password"],
|
| 52 |
ssl=True if redis_config.get("ssl") else False,
|
| 53 |
)
|
|
|
|
| 54 |
# PING
|
| 55 |
-
|
| 56 |
latency = (datetime.now() - start_time).total_seconds() * 1000
|
| 57 |
-
logger.info(
|
| 58 |
-
f"PING {redis_config['host']}:{redis_config['port']} "
|
| 59 |
-
f"- Response: {response}, Latency: {latency:.2f}ms"
|
| 60 |
-
)
|
| 61 |
|
| 62 |
-
#
|
| 63 |
key = f"healthcheck:{uuid.uuid4()}"
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
set_start = datetime.now()
|
| 67 |
-
r.set(key, value)
|
| 68 |
-
set_latency = (datetime.now() - set_start).total_seconds() * 1000
|
| 69 |
-
|
| 70 |
-
get_start = datetime.now()
|
| 71 |
got = r.get(key)
|
| 72 |
-
get_latency = (datetime.now() - get_start).total_seconds() * 1000
|
| 73 |
|
| 74 |
-
logger.info(
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
)
|
| 77 |
-
|
| 78 |
-
return True
|
| 79 |
except Exception as e:
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
|
|
|
| 86 |
try:
|
| 87 |
headers = {"Authorization": f"Bearer {redis_config['rest_token']}"}
|
| 88 |
|
| 89 |
# PING
|
| 90 |
start_time = datetime.now()
|
| 91 |
-
|
| 92 |
-
resp = requests.get(url, headers=headers)
|
| 93 |
latency = (datetime.now() - start_time).total_seconds() * 1000
|
| 94 |
|
| 95 |
-
if resp.status_code =
|
| 96 |
-
|
| 97 |
-
f"PING {redis_config['rest_url']} "
|
| 98 |
-
f"- Response: {resp.text.strip()}, Latency: {latency:.2f}ms"
|
| 99 |
-
)
|
| 100 |
-
else:
|
| 101 |
-
logger.error(f"Unexpected Upstash PING response: {resp.status_code} - {resp.text}")
|
| 102 |
-
return False
|
| 103 |
|
| 104 |
-
#
|
| 105 |
key = f"healthcheck:{uuid.uuid4()}"
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
# SET
|
| 109 |
-
set_start = datetime.now()
|
| 110 |
-
set_resp = requests.post(
|
| 111 |
-
f"{redis_config['rest_url']}/set/{key}/{value}",
|
| 112 |
-
headers=headers,
|
| 113 |
-
)
|
| 114 |
-
set_latency = (datetime.now() - set_start).total_seconds() * 1000
|
| 115 |
-
|
| 116 |
-
# GET
|
| 117 |
-
get_start = datetime.now()
|
| 118 |
get_resp = requests.get(f"{redis_config['rest_url']}/get/{key}", headers=headers)
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
return True
|
| 131 |
except Exception as e:
|
| 132 |
-
|
| 133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
|
| 136 |
-
|
|
|
|
| 137 |
logger.info("Starting Redis service health check")
|
| 138 |
-
results =
|
| 139 |
|
| 140 |
for service_name, config in REDIS_SERVICES.items():
|
|
|
|
|
|
|
| 141 |
if "rest_url" in config: # Upstash
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
logger.info(f"Testing {service_name} at {config['host']}:{config['port']}")
|
| 146 |
-
results[service_name] = ping_redis(config)
|
| 147 |
|
| 148 |
logger.info("Completed all Redis service checks")
|
| 149 |
return results
|
|
|
|
| 5 |
import logging
|
| 6 |
from datetime import datetime
|
| 7 |
import uuid
|
| 8 |
+
from pydantic import BaseModel
|
| 9 |
|
| 10 |
+
# Logging (console only)
|
| 11 |
logging.basicConfig(
|
| 12 |
level=logging.INFO,
|
| 13 |
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 14 |
+
handlers=[logging.StreamHandler()]
|
| 15 |
)
|
| 16 |
logger = logging.getLogger(__name__)
|
| 17 |
|
|
|
|
| 42 |
}
|
| 43 |
|
| 44 |
|
| 45 |
+
# ---------- Pydantic Response ----------
|
| 46 |
+
class RedisPingResponse(BaseModel):
|
| 47 |
+
service_name: str
|
| 48 |
+
success: bool
|
| 49 |
+
error: str | None = None
|
| 50 |
+
time: str
|
| 51 |
+
latency_ms: float | None = None
|
| 52 |
+
type: str # "self-managed" or "upstash"
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# ---------- Self-managed Redis ----------
|
| 56 |
+
def ping_redis(service_name: str, redis_config: dict) -> RedisPingResponse:
|
| 57 |
+
now = datetime.utcnow().isoformat()
|
| 58 |
try:
|
| 59 |
start_time = datetime.now()
|
| 60 |
r = redis.Redis(
|
|
|
|
| 63 |
password=redis_config["password"],
|
| 64 |
ssl=True if redis_config.get("ssl") else False,
|
| 65 |
)
|
| 66 |
+
|
| 67 |
# PING
|
| 68 |
+
r.ping()
|
| 69 |
latency = (datetime.now() - start_time).total_seconds() * 1000
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
+
# Simple SET/GET check
|
| 72 |
key = f"healthcheck:{uuid.uuid4()}"
|
| 73 |
+
r.set(key, "ok")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
got = r.get(key)
|
|
|
|
| 75 |
|
| 76 |
+
logger.info(f"{service_name} PING OK, latency {latency:.2f}ms, value={got.decode()}")
|
| 77 |
+
return RedisPingResponse(
|
| 78 |
+
service_name=service_name,
|
| 79 |
+
success=True,
|
| 80 |
+
error=None,
|
| 81 |
+
time=now,
|
| 82 |
+
latency_ms=latency,
|
| 83 |
+
type="self-managed"
|
| 84 |
)
|
|
|
|
|
|
|
| 85 |
except Exception as e:
|
| 86 |
+
error_msg = str(e)
|
| 87 |
+
logger.error(f"{service_name} failed: {error_msg}")
|
| 88 |
+
return RedisPingResponse(
|
| 89 |
+
service_name=service_name,
|
| 90 |
+
success=False,
|
| 91 |
+
error=error_msg,
|
| 92 |
+
time=now,
|
| 93 |
+
latency_ms=None,
|
| 94 |
+
type="self-managed"
|
| 95 |
+
)
|
| 96 |
|
| 97 |
|
| 98 |
+
# ---------- Upstash Redis ----------
|
| 99 |
+
def ping_upstash(service_name: str, redis_config: dict) -> RedisPingResponse:
|
| 100 |
+
now = datetime.utcnow().isoformat()
|
| 101 |
try:
|
| 102 |
headers = {"Authorization": f"Bearer {redis_config['rest_token']}"}
|
| 103 |
|
| 104 |
# PING
|
| 105 |
start_time = datetime.now()
|
| 106 |
+
resp = requests.get(f"{redis_config['rest_url']}/ping", headers=headers)
|
|
|
|
| 107 |
latency = (datetime.now() - start_time).total_seconds() * 1000
|
| 108 |
|
| 109 |
+
if resp.status_code != 200 or resp.text.strip().lower() != "pong":
|
| 110 |
+
raise Exception(f"Unexpected response: {resp.status_code} - {resp.text}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
+
# SET/GET
|
| 113 |
key = f"healthcheck:{uuid.uuid4()}"
|
| 114 |
+
requests.post(f"{redis_config['rest_url']}/set/{key}/ok", headers=headers)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
get_resp = requests.get(f"{redis_config['rest_url']}/get/{key}", headers=headers)
|
| 116 |
+
got_value = get_resp.json().get("result") if get_resp.status_code == 200 else None
|
| 117 |
+
|
| 118 |
+
logger.info(f"{service_name} PING OK, latency {latency:.2f}ms, value={got_value}")
|
| 119 |
+
return RedisPingResponse(
|
| 120 |
+
service_name=service_name,
|
| 121 |
+
success=True,
|
| 122 |
+
error=None,
|
| 123 |
+
time=now,
|
| 124 |
+
latency_ms=latency,
|
| 125 |
+
type="upstash"
|
| 126 |
+
)
|
|
|
|
| 127 |
except Exception as e:
|
| 128 |
+
error_msg = str(e)
|
| 129 |
+
logger.error(f"{service_name} failed: {error_msg}")
|
| 130 |
+
return RedisPingResponse(
|
| 131 |
+
service_name=service_name,
|
| 132 |
+
success=False,
|
| 133 |
+
error=error_msg,
|
| 134 |
+
time=now,
|
| 135 |
+
latency_ms=None,
|
| 136 |
+
type="upstash"
|
| 137 |
+
)
|
| 138 |
|
| 139 |
|
| 140 |
+
# ---------- Run All ----------
|
| 141 |
+
def ping_all_redis_projects() -> list[RedisPingResponse]:
|
| 142 |
logger.info("Starting Redis service health check")
|
| 143 |
+
results: list[RedisPingResponse] = []
|
| 144 |
|
| 145 |
for service_name, config in REDIS_SERVICES.items():
|
| 146 |
+
if not config or not any(config.values()):
|
| 147 |
+
continue # skip empty configs
|
| 148 |
if "rest_url" in config: # Upstash
|
| 149 |
+
results.append(ping_upstash(service_name, config))
|
| 150 |
+
else: # Self-managed
|
| 151 |
+
results.append(ping_redis(service_name, config))
|
|
|
|
|
|
|
| 152 |
|
| 153 |
logger.info("Completed all Redis service checks")
|
| 154 |
return results
|
supabase_keep_alive_service.py
CHANGED
|
@@ -4,6 +4,7 @@ import os
|
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from uuid import uuid4
|
| 6 |
from datetime import datetime
|
|
|
|
| 7 |
|
| 8 |
# Configure logging
|
| 9 |
logging.basicConfig(
|
|
@@ -11,10 +12,11 @@ logging.basicConfig(
|
|
| 11 |
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 12 |
handlers=[
|
| 13 |
logging.StreamHandler(), # Output to console
|
| 14 |
-
logging.FileHandler('supabase_ping.log') # Output to file
|
| 15 |
]
|
| 16 |
)
|
| 17 |
|
|
|
|
|
|
|
| 18 |
load_dotenv()
|
| 19 |
|
| 20 |
SUPABASE_PROJECTS = {
|
|
@@ -50,89 +52,99 @@ SUPABASE_PROJECTS = {
|
|
| 50 |
}
|
| 51 |
}
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
def initiate_crud_operation(client: Client, table_name: str, service_name: str) -> bool:
|
| 54 |
"""
|
| 55 |
Perform full CRUD operations on the ping_test table.
|
| 56 |
-
|
| 57 |
-
Args:
|
| 58 |
-
client: Supabase client instance
|
| 59 |
-
table_name: Name of the table to perform operations on
|
| 60 |
-
service_name: Name of the service for logging purposes
|
| 61 |
-
|
| 62 |
-
Returns:
|
| 63 |
-
bool: True if all operations succeeded, False otherwise
|
| 64 |
"""
|
| 65 |
try:
|
| 66 |
-
# Generate a unique ID for this test record
|
| 67 |
test_id = str(uuid4())
|
| 68 |
test_service_name = f"{service_name}_crud_test"
|
| 69 |
-
|
| 70 |
-
# CREATE
|
| 71 |
-
create_data = {
|
| 72 |
-
"id": test_id,
|
| 73 |
-
"service_name": test_service_name
|
| 74 |
-
}
|
| 75 |
create_response = client.table(table_name).insert(create_data).execute()
|
| 76 |
if not create_response.data:
|
| 77 |
-
|
| 78 |
return False
|
| 79 |
-
|
| 80 |
-
# READ
|
| 81 |
read_response = client.table(table_name).select("*").eq("id", test_id).execute()
|
| 82 |
if not read_response.data or read_response.data[0]["id"] != test_id:
|
| 83 |
-
|
| 84 |
return False
|
| 85 |
-
|
| 86 |
-
# UPDATE
|
| 87 |
new_service_name = f"{test_service_name}_updated"
|
| 88 |
update_response = client.table(table_name).update({"service_name": new_service_name}).eq("id", test_id).execute()
|
| 89 |
if not update_response.data or update_response.data[0]["service_name"] != new_service_name:
|
| 90 |
-
|
| 91 |
return False
|
| 92 |
-
|
| 93 |
-
# DELETE
|
| 94 |
delete_response = client.table(table_name).delete().eq("id", test_id).execute()
|
| 95 |
if not delete_response.data:
|
| 96 |
-
|
| 97 |
return False
|
| 98 |
-
|
| 99 |
# Verify deletion
|
| 100 |
verify_response = client.table(table_name).select("*").eq("id", test_id).execute()
|
| 101 |
if verify_response.data:
|
| 102 |
-
|
| 103 |
return False
|
| 104 |
-
|
| 105 |
logging.info(f"CRUD operations completed successfully for {service_name}")
|
| 106 |
return True
|
| 107 |
-
|
| 108 |
except Exception as e:
|
| 109 |
-
|
| 110 |
return False
|
| 111 |
|
| 112 |
-
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
if service_name not in SUPABASE_PROJECTS:
|
| 115 |
-
|
| 116 |
-
|
|
|
|
| 117 |
|
| 118 |
config = SUPABASE_PROJECTS[service_name]
|
| 119 |
supabase: Client = create_client(config["url"], config["anon_key"])
|
| 120 |
|
| 121 |
try:
|
| 122 |
-
# Perform full CRUD operations instead of simple query
|
| 123 |
success = initiate_crud_operation(supabase, "ping_test", service_name)
|
| 124 |
if not success:
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
|
|
|
| 128 |
logging.info(f"Pinged {service_name} successfully with full CRUD operations.")
|
| 129 |
-
return True
|
|
|
|
| 130 |
except Exception as e:
|
| 131 |
-
|
| 132 |
-
|
|
|
|
|
|
|
| 133 |
|
| 134 |
-
def ping_all_supabase_projects():
|
| 135 |
-
"""Ping all Supabase projects."""
|
|
|
|
| 136 |
for service_name in SUPABASE_PROJECTS:
|
| 137 |
-
ping_supabase(service_name)
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from uuid import uuid4
|
| 6 |
from datetime import datetime
|
| 7 |
+
from pydantic import BaseModel
|
| 8 |
|
| 9 |
# Configure logging
|
| 10 |
logging.basicConfig(
|
|
|
|
| 12 |
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 13 |
handlers=[
|
| 14 |
logging.StreamHandler(), # Output to console
|
|
|
|
| 15 |
]
|
| 16 |
)
|
| 17 |
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
load_dotenv()
|
| 21 |
|
| 22 |
SUPABASE_PROJECTS = {
|
|
|
|
| 52 |
}
|
| 53 |
}
|
| 54 |
|
| 55 |
+
|
| 56 |
+
# ---------- Pydantic Response Model ----------
|
| 57 |
+
class SupabasePingResponse(BaseModel):
|
| 58 |
+
service_name: str
|
| 59 |
+
success: bool
|
| 60 |
+
error: str | None = None
|
| 61 |
+
time: str
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# ---------- CRUD Operations ----------
|
| 65 |
def initiate_crud_operation(client: Client, table_name: str, service_name: str) -> bool:
|
| 66 |
"""
|
| 67 |
Perform full CRUD operations on the ping_test table.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
"""
|
| 69 |
try:
|
|
|
|
| 70 |
test_id = str(uuid4())
|
| 71 |
test_service_name = f"{service_name}_crud_test"
|
| 72 |
+
|
| 73 |
+
# CREATE
|
| 74 |
+
create_data = {"id": test_id, "service_name": test_service_name}
|
|
|
|
|
|
|
|
|
|
| 75 |
create_response = client.table(table_name).insert(create_data).execute()
|
| 76 |
if not create_response.data:
|
| 77 |
+
logger.error(f"CREATE failed for {service_name}")
|
| 78 |
return False
|
| 79 |
+
|
| 80 |
+
# READ
|
| 81 |
read_response = client.table(table_name).select("*").eq("id", test_id).execute()
|
| 82 |
if not read_response.data or read_response.data[0]["id"] != test_id:
|
| 83 |
+
logger.error(f"READ failed for {service_name}")
|
| 84 |
return False
|
| 85 |
+
|
| 86 |
+
# UPDATE
|
| 87 |
new_service_name = f"{test_service_name}_updated"
|
| 88 |
update_response = client.table(table_name).update({"service_name": new_service_name}).eq("id", test_id).execute()
|
| 89 |
if not update_response.data or update_response.data[0]["service_name"] != new_service_name:
|
| 90 |
+
logger.error(f"UPDATE failed for {service_name}")
|
| 91 |
return False
|
| 92 |
+
|
| 93 |
+
# DELETE
|
| 94 |
delete_response = client.table(table_name).delete().eq("id", test_id).execute()
|
| 95 |
if not delete_response.data:
|
| 96 |
+
logger.error(f"DELETE failed for {service_name}")
|
| 97 |
return False
|
| 98 |
+
|
| 99 |
# Verify deletion
|
| 100 |
verify_response = client.table(table_name).select("*").eq("id", test_id).execute()
|
| 101 |
if verify_response.data:
|
| 102 |
+
logger.error(f"DELETE verification failed for {service_name}")
|
| 103 |
return False
|
| 104 |
+
|
| 105 |
logging.info(f"CRUD operations completed successfully for {service_name}")
|
| 106 |
return True
|
| 107 |
+
|
| 108 |
except Exception as e:
|
| 109 |
+
logger.error(f"CRUD operation failed for {service_name}: {str(e)}", exc_info=True)
|
| 110 |
return False
|
| 111 |
|
| 112 |
+
|
| 113 |
+
# ---------- Ping Supabase ----------
|
| 114 |
+
def ping_supabase(service_name: str) -> SupabasePingResponse:
|
| 115 |
+
"""Ping a Supabase project and return structured response."""
|
| 116 |
+
now = datetime.utcnow().isoformat()
|
| 117 |
+
|
| 118 |
if service_name not in SUPABASE_PROJECTS:
|
| 119 |
+
error_msg = f"Service '{service_name}' not found in config."
|
| 120 |
+
logger.error(error_msg)
|
| 121 |
+
return SupabasePingResponse(service_name=service_name, success=False, error=error_msg, time=now)
|
| 122 |
|
| 123 |
config = SUPABASE_PROJECTS[service_name]
|
| 124 |
supabase: Client = create_client(config["url"], config["anon_key"])
|
| 125 |
|
| 126 |
try:
|
|
|
|
| 127 |
success = initiate_crud_operation(supabase, "ping_test", service_name)
|
| 128 |
if not success:
|
| 129 |
+
error_msg = f"CRUD operations failed for {service_name}"
|
| 130 |
+
logger.error(error_msg)
|
| 131 |
+
return SupabasePingResponse(service_name=service_name, success=False, error=error_msg, time=now)
|
| 132 |
+
|
| 133 |
logging.info(f"Pinged {service_name} successfully with full CRUD operations.")
|
| 134 |
+
return SupabasePingResponse(service_name=service_name, success=True, error=None, time=now)
|
| 135 |
+
|
| 136 |
except Exception as e:
|
| 137 |
+
error_msg = f"Failed to ping {service_name}: {str(e)}"
|
| 138 |
+
logger.error(error_msg, exc_info=True)
|
| 139 |
+
return SupabasePingResponse(service_name=service_name, success=False, error=error_msg, time=now)
|
| 140 |
+
|
| 141 |
|
| 142 |
+
def ping_all_supabase_projects() -> list[SupabasePingResponse]:
|
| 143 |
+
"""Ping all Supabase projects and return structured responses."""
|
| 144 |
+
responses: list[SupabasePingResponse] = []
|
| 145 |
for service_name in SUPABASE_PROJECTS:
|
| 146 |
+
responses.append(ping_supabase(service_name))
|
| 147 |
+
return responses
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
|