coolify/app/Jobs/PushServerUpdateJob.php

546 lines
22 KiB
PHP
Raw Normal View History

2024-10-14 10:07:37 +00:00
<?php
namespace App\Jobs;
2024-10-14 15:54:29 +00:00
use App\Actions\Database\StartDatabaseProxy;
use App\Actions\Database\StopDatabaseProxy;
2024-10-15 11:39:19 +00:00
use App\Actions\Proxy\CheckProxy;
2024-10-14 11:32:36 +00:00
use App\Actions\Proxy\StartProxy;
2024-10-30 13:54:27 +00:00
use App\Actions\Server\StartLogDrain;
2024-10-14 15:54:29 +00:00
use App\Actions\Shared\ComplexStatusCheck;
2024-10-14 11:32:36 +00:00
use App\Models\Application;
2024-10-14 10:07:37 +00:00
use App\Models\Server;
2024-10-14 15:54:29 +00:00
use App\Models\ServiceApplication;
use App\Models\ServiceDatabase;
use App\Notifications\Container\ContainerRestarted;
2024-10-14 10:07:37 +00:00
use Illuminate\Bus\Queueable;
use Illuminate\Contracts\Queue\ShouldBeEncrypted;
2024-10-14 10:07:37 +00:00
use Illuminate\Contracts\Queue\ShouldQueue;
use Illuminate\Foundation\Bus\Dispatchable;
use Illuminate\Queue\InteractsWithQueue;
use Illuminate\Queue\Middleware\WithoutOverlapping;
2024-10-14 10:07:37 +00:00
use Illuminate\Queue\SerializesModels;
2024-10-14 11:32:36 +00:00
use Illuminate\Support\Collection;
debug: add comprehensive status change logging Added detailed debug logging to all status update paths to help diagnose why "unhealthy" status appears in the UI. ## Logging Added ### 1. PushServerUpdateJob (Sentinel updates) **Location**: Lines 303-315 **Logs**: Status changes from Sentinel push updates **Data tracked**: - Old vs new status - Container statuses that led to aggregation - Status flags (hasRunning, hasUnhealthy, hasUnknown) ### 2. GetContainersStatus (SSH updates) **Location**: Lines 441-449, 346-354, 358-365 **Logs**: Status changes from SSH-based checks **Scenarios**: - Normal status aggregation - Recently restarted containers (kept as degraded) - Applications not running (set to exited) **Data tracked**: - Old vs new status - Container statuses - Restart count and timing - Whether containers exist ### 3. Application Model Status Accessor **Location**: Lines 706-712, 726-732 **Logs**: When status is set without explicit health information **Issue**: Highlights cases where health defaults to "unhealthy" **Data tracked**: - Raw value passed to setter - Final result after default applied ## How to Use ### Enable Debug Logging Edit `.env` or `config/logging.php` to set log level to debug: ``` LOG_LEVEL=debug ``` ### Monitor Logs ```bash tail -f storage/logs/laravel.log | grep STATUS-DEBUG ``` ### Log Format All logs use `[STATUS-DEBUG]` prefix for easy filtering: ``` [2025-11-19 13:00:00] local.DEBUG: [STATUS-DEBUG] Sentinel status change { "source": "PushServerUpdateJob", "app_id": 123, "app_name": "my-app", "old_status": "running:unknown", "new_status": "running:healthy", "container_statuses": [...], "flags": {...} } ``` ## What to Look For 1. **Default to unhealthy**: Check Application model accessor logs 2. **Status flipping**: Compare timestamps between Sentinel and SSH updates 3. **Incorrect aggregation**: Check flags and container_statuses 4. **Stale database values**: Check if old_status persists across multiple logs ## Next Steps After gathering logs, we can: 1. Identify the exact source of "unhealthy" status 2. Determine if it's a default issue, aggregation bug, or timing problem 3. Apply targeted fix based on evidence 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-19 12:52:08 +00:00
use Illuminate\Support\Facades\Log;
2025-07-12 12:44:32 +00:00
use Laravel\Horizon\Contracts\Silenced;
2024-10-14 10:07:37 +00:00
2025-07-12 12:44:32 +00:00
class PushServerUpdateJob implements ShouldBeEncrypted, ShouldQueue, Silenced
2024-10-14 10:07:37 +00:00
{
use Dispatchable, InteractsWithQueue, Queueable, SerializesModels;
public $tries = 1;
2024-10-14 15:54:29 +00:00
public $timeout = 30;
public Collection $containers;
public Collection $applications;
public Collection $previews;
public Collection $databases;
public Collection $services;
2024-10-14 15:54:29 +00:00
public Collection $allApplicationIds;
public Collection $allDatabaseUuids;
2024-10-15 11:39:19 +00:00
public Collection $allTcpProxyUuids;
2024-10-14 15:54:29 +00:00
public Collection $allServiceApplicationIds;
public Collection $allApplicationPreviewsIds;
public Collection $allServiceDatabaseIds;
public Collection $allApplicationsWithAdditionalServers;
public Collection $foundApplicationIds;
public Collection $foundDatabaseUuids;
public Collection $foundServiceApplicationIds;
public Collection $foundServiceDatabaseIds;
public Collection $foundApplicationPreviewsIds;
public Collection $applicationContainerStatuses;
2024-10-14 15:54:29 +00:00
public bool $foundProxy = false;
2024-10-15 11:39:19 +00:00
public bool $foundLogDrainContainer = false;
2024-10-14 10:07:37 +00:00
public function middleware(): array
{
return [(new WithoutOverlapping('push-server-update-'.$this->server->uuid))->expireAfter(30)->dontRelease()];
}
2024-10-14 10:07:37 +00:00
public function backoff(): int
{
return isDev() ? 1 : 3;
}
2024-10-14 11:32:36 +00:00
public function __construct(public Server $server, public $data)
{
2024-10-14 15:54:29 +00:00
$this->containers = collect();
$this->foundApplicationIds = collect();
$this->foundDatabaseUuids = collect();
$this->foundServiceApplicationIds = collect();
$this->foundApplicationPreviewsIds = collect();
$this->foundServiceDatabaseIds = collect();
$this->applicationContainerStatuses = collect();
2024-10-14 15:54:29 +00:00
$this->allApplicationIds = collect();
$this->allDatabaseUuids = collect();
$this->allTcpProxyUuids = collect();
$this->allServiceApplicationIds = collect();
$this->allServiceDatabaseIds = collect();
2024-10-14 11:32:36 +00:00
}
2024-10-14 10:07:37 +00:00
public function handle()
{
2024-10-22 09:29:43 +00:00
// TODO: Swarm is not supported yet
2024-10-28 13:37:00 +00:00
if (! $this->data) {
throw new \Exception('No data provided');
}
$data = collect($this->data);
2024-10-15 11:39:19 +00:00
2024-10-28 13:37:00 +00:00
$this->server->sentinelHeartbeat();
2024-10-15 11:39:19 +00:00
2024-10-28 13:37:00 +00:00
$this->containers = collect(data_get($data, 'containers'));
2024-10-28 13:37:00 +00:00
$filesystemUsageRoot = data_get($data, 'filesystem_usage_root.used_percentage');
ServerStorageCheckJob::dispatch($this->server, $filesystemUsageRoot);
2024-10-28 13:37:00 +00:00
if ($this->containers->isEmpty()) {
return;
}
$this->applications = $this->server->applications();
$this->databases = $this->server->databases();
$this->previews = $this->server->previews();
$this->services = $this->server->services()->get();
$this->allApplicationIds = $this->applications->filter(function ($application) {
return $application->additional_servers->count() === 0;
})->pluck('id');
$this->allApplicationsWithAdditionalServers = $this->applications->filter(function ($application) {
return $application->additional_servers->count() > 0;
});
$this->allApplicationPreviewsIds = $this->previews->map(function ($preview) {
return $preview->application_id.':'.$preview->pull_request_id;
});
2024-10-28 13:37:00 +00:00
$this->allDatabaseUuids = $this->databases->pluck('uuid');
$this->allTcpProxyUuids = $this->databases->where('is_public', true)->pluck('uuid');
$this->services->each(function ($service) {
$service->applications()->pluck('id')->each(function ($applicationId) {
$this->allServiceApplicationIds->push($applicationId);
});
2024-10-28 13:37:00 +00:00
$service->databases()->pluck('id')->each(function ($databaseId) {
$this->allServiceDatabaseIds->push($databaseId);
2024-10-14 15:54:29 +00:00
});
2024-10-28 13:37:00 +00:00
});
foreach ($this->containers as $container) {
$containerStatus = data_get($container, 'state', 'exited');
$containerHealth = data_get($container, 'health_status', 'unhealthy');
$containerStatus = "$containerStatus ($containerHealth)";
$labels = collect(data_get($container, 'labels'));
$coolify_managed = $labels->has('coolify.managed');
if ($coolify_managed) {
$name = data_get($container, 'name');
if ($name === 'coolify-log-drain' && $this->isRunning($containerStatus)) {
$this->foundLogDrainContainer = true;
}
if ($labels->has('coolify.applicationId')) {
$applicationId = $labels->get('coolify.applicationId');
$pullRequestId = $labels->get('coolify.pullRequestId', '0');
2024-10-28 13:37:00 +00:00
try {
if ($pullRequestId === '0') {
if ($this->allApplicationIds->contains($applicationId) && $this->isRunning($containerStatus)) {
$this->foundApplicationIds->push($applicationId);
2024-10-14 15:54:29 +00:00
}
// Store container status for aggregation
if (! $this->applicationContainerStatuses->has($applicationId)) {
$this->applicationContainerStatuses->put($applicationId, collect());
}
$containerName = $labels->get('com.docker.compose.service');
if ($containerName) {
$this->applicationContainerStatuses->get($applicationId)->put($containerName, $containerStatus);
}
2024-10-28 13:37:00 +00:00
} else {
$previewKey = $applicationId.':'.$pullRequestId;
if ($this->allApplicationPreviewsIds->contains($previewKey) && $this->isRunning($containerStatus)) {
$this->foundApplicationPreviewsIds->push($previewKey);
2024-10-28 13:37:00 +00:00
}
$this->updateApplicationPreviewStatus($applicationId, $pullRequestId, $containerStatus);
2024-10-14 16:04:36 +00:00
}
2024-10-28 13:37:00 +00:00
} catch (\Exception $e) {
}
} elseif ($labels->has('coolify.serviceId')) {
$serviceId = $labels->get('coolify.serviceId');
$subType = $labels->get('coolify.service.subType');
$subId = $labels->get('coolify.service.subId');
if ($subType === 'application' && $this->isRunning($containerStatus)) {
$this->foundServiceApplicationIds->push($subId);
$this->updateServiceSubStatus($serviceId, $subType, $subId, $containerStatus);
} elseif ($subType === 'database' && $this->isRunning($containerStatus)) {
$this->foundServiceDatabaseIds->push($subId);
$this->updateServiceSubStatus($serviceId, $subType, $subId, $containerStatus);
}
} else {
$uuid = $labels->get('com.docker.compose.service');
$type = $labels->get('coolify.type');
if ($name === 'coolify-proxy' && $this->isRunning($containerStatus)) {
$this->foundProxy = true;
} elseif ($type === 'service' && $this->isRunning($containerStatus)) {
2024-10-14 11:32:36 +00:00
} else {
2024-10-28 13:37:00 +00:00
if ($this->allDatabaseUuids->contains($uuid) && $this->isRunning($containerStatus)) {
$this->foundDatabaseUuids->push($uuid);
if ($this->allTcpProxyUuids->contains($uuid) && $this->isRunning($containerStatus)) {
$this->updateDatabaseStatus($uuid, $containerStatus, tcpProxy: true);
} else {
$this->updateDatabaseStatus($uuid, $containerStatus, tcpProxy: false);
2024-10-14 15:54:29 +00:00
}
}
2024-10-14 11:32:36 +00:00
}
}
2024-10-14 10:07:37 +00:00
}
2024-10-28 13:37:00 +00:00
}
2024-10-14 11:32:36 +00:00
2024-10-28 13:37:00 +00:00
$this->updateProxyStatus();
2024-10-15 11:39:19 +00:00
2024-10-28 13:37:00 +00:00
$this->updateNotFoundApplicationStatus();
$this->updateNotFoundApplicationPreviewStatus();
$this->updateNotFoundDatabaseStatus();
$this->updateNotFoundServiceStatus();
2024-10-15 11:39:19 +00:00
2024-10-28 13:37:00 +00:00
$this->updateAdditionalServersStatus();
2024-10-14 15:54:29 +00:00
// Aggregate multi-container application statuses
$this->aggregateMultiContainerStatuses();
2024-10-28 13:37:00 +00:00
$this->checkLogDrainContainer();
2024-10-14 15:54:29 +00:00
}
private function aggregateMultiContainerStatuses()
{
if ($this->applicationContainerStatuses->isEmpty()) {
return;
}
foreach ($this->applicationContainerStatuses as $applicationId => $containerStatuses) {
$application = $this->applications->where('id', $applicationId)->first();
if (! $application) {
continue;
}
// Parse docker compose to check for excluded containers
$dockerComposeRaw = data_get($application, 'docker_compose_raw');
$excludedContainers = collect();
if ($dockerComposeRaw) {
try {
$dockerCompose = \Symfony\Component\Yaml\Yaml::parse($dockerComposeRaw);
$services = data_get($dockerCompose, 'services', []);
foreach ($services as $serviceName => $serviceConfig) {
// Check if container should be excluded
$excludeFromHc = data_get($serviceConfig, 'exclude_from_hc', false);
$restartPolicy = data_get($serviceConfig, 'restart', 'always');
if ($excludeFromHc || $restartPolicy === 'no') {
$excludedContainers->push($serviceName);
}
}
} catch (\Exception $e) {
// If we can't parse, treat all containers as included
}
}
// Filter out excluded containers
$relevantStatuses = $containerStatuses->filter(function ($status, $containerName) use ($excludedContainers) {
return ! $excludedContainers->contains($containerName);
});
// If all containers are excluded, don't update status
if ($relevantStatuses->isEmpty()) {
continue;
}
// Aggregate status: if any container is running, app is running
$hasRunning = false;
$hasUnhealthy = false;
fix: preserve unknown health status in Sentinel updates (PushServerUpdateJob) ## Problem Services with "running (unknown)" status were periodically changing to "running (healthy)" every ~30 seconds when Sentinel pushed updates. This was confusing for users and inconsistent with SSH-based status checks. ## Root Cause `PushServerUpdateJob::aggregateMultiContainerStatuses()` was missing logic to track "unknown" health state. It only tracked "unhealthy" and defaulted everything else to "healthy". When Sentinel pushed updates with "running (unknown)" containers: - The job saw `hasRunning = true` and `hasUnhealthy = false` - It incorrectly returned "running (healthy)" instead of "running (unknown)" ## Solution Updated `PushServerUpdateJob` to match the logic in `GetContainersStatus`: 1. Added `$hasUnknown` tracking variable 2. Check for "unknown" in status strings (alongside "unhealthy") 3. Implement 3-way priority: unhealthy > unknown > healthy This ensures consistency between: - SSH-based updates (`GetContainersStatus`) - Sentinel-based updates (`PushServerUpdateJob`) - UI display logic ## Changes - **app/Jobs/PushServerUpdateJob.php**: Added unknown status tracking - **tests/Unit/PushServerUpdateJobStatusAggregationTest.php**: New comprehensive tests - **tests/Unit/ExcludeFromHealthCheckTest.php**: Updated to match current implementation ## Testing All 31 status-related unit tests passing: - 18 tests in ContainerHealthStatusTest - 8 tests in ExcludeFromHealthCheckTest (updated) - 6 tests in PushServerUpdateJobStatusAggregationTest (new) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-19 12:40:58 +00:00
$hasUnknown = false;
foreach ($relevantStatuses as $status) {
if (str($status)->contains('running')) {
$hasRunning = true;
if (str($status)->contains('unhealthy')) {
$hasUnhealthy = true;
}
fix: preserve unknown health status in Sentinel updates (PushServerUpdateJob) ## Problem Services with "running (unknown)" status were periodically changing to "running (healthy)" every ~30 seconds when Sentinel pushed updates. This was confusing for users and inconsistent with SSH-based status checks. ## Root Cause `PushServerUpdateJob::aggregateMultiContainerStatuses()` was missing logic to track "unknown" health state. It only tracked "unhealthy" and defaulted everything else to "healthy". When Sentinel pushed updates with "running (unknown)" containers: - The job saw `hasRunning = true` and `hasUnhealthy = false` - It incorrectly returned "running (healthy)" instead of "running (unknown)" ## Solution Updated `PushServerUpdateJob` to match the logic in `GetContainersStatus`: 1. Added `$hasUnknown` tracking variable 2. Check for "unknown" in status strings (alongside "unhealthy") 3. Implement 3-way priority: unhealthy > unknown > healthy This ensures consistency between: - SSH-based updates (`GetContainersStatus`) - Sentinel-based updates (`PushServerUpdateJob`) - UI display logic ## Changes - **app/Jobs/PushServerUpdateJob.php**: Added unknown status tracking - **tests/Unit/PushServerUpdateJobStatusAggregationTest.php**: New comprehensive tests - **tests/Unit/ExcludeFromHealthCheckTest.php**: Updated to match current implementation ## Testing All 31 status-related unit tests passing: - 18 tests in ContainerHealthStatusTest - 8 tests in ExcludeFromHealthCheckTest (updated) - 6 tests in PushServerUpdateJobStatusAggregationTest (new) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-19 12:40:58 +00:00
if (str($status)->contains('unknown')) {
$hasUnknown = true;
}
}
}
$aggregatedStatus = null;
if ($hasRunning) {
fix: preserve unknown health status in Sentinel updates (PushServerUpdateJob) ## Problem Services with "running (unknown)" status were periodically changing to "running (healthy)" every ~30 seconds when Sentinel pushed updates. This was confusing for users and inconsistent with SSH-based status checks. ## Root Cause `PushServerUpdateJob::aggregateMultiContainerStatuses()` was missing logic to track "unknown" health state. It only tracked "unhealthy" and defaulted everything else to "healthy". When Sentinel pushed updates with "running (unknown)" containers: - The job saw `hasRunning = true` and `hasUnhealthy = false` - It incorrectly returned "running (healthy)" instead of "running (unknown)" ## Solution Updated `PushServerUpdateJob` to match the logic in `GetContainersStatus`: 1. Added `$hasUnknown` tracking variable 2. Check for "unknown" in status strings (alongside "unhealthy") 3. Implement 3-way priority: unhealthy > unknown > healthy This ensures consistency between: - SSH-based updates (`GetContainersStatus`) - Sentinel-based updates (`PushServerUpdateJob`) - UI display logic ## Changes - **app/Jobs/PushServerUpdateJob.php**: Added unknown status tracking - **tests/Unit/PushServerUpdateJobStatusAggregationTest.php**: New comprehensive tests - **tests/Unit/ExcludeFromHealthCheckTest.php**: Updated to match current implementation ## Testing All 31 status-related unit tests passing: - 18 tests in ContainerHealthStatusTest - 8 tests in ExcludeFromHealthCheckTest (updated) - 6 tests in PushServerUpdateJobStatusAggregationTest (new) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-19 12:40:58 +00:00
if ($hasUnhealthy) {
$aggregatedStatus = 'running (unhealthy)';
} elseif ($hasUnknown) {
$aggregatedStatus = 'running (unknown)';
} else {
$aggregatedStatus = 'running (healthy)';
}
} else {
// All containers are exited
$aggregatedStatus = 'exited (unhealthy)';
}
// Update application status with aggregated result
if ($aggregatedStatus && $application->status !== $aggregatedStatus) {
debug: add comprehensive status change logging Added detailed debug logging to all status update paths to help diagnose why "unhealthy" status appears in the UI. ## Logging Added ### 1. PushServerUpdateJob (Sentinel updates) **Location**: Lines 303-315 **Logs**: Status changes from Sentinel push updates **Data tracked**: - Old vs new status - Container statuses that led to aggregation - Status flags (hasRunning, hasUnhealthy, hasUnknown) ### 2. GetContainersStatus (SSH updates) **Location**: Lines 441-449, 346-354, 358-365 **Logs**: Status changes from SSH-based checks **Scenarios**: - Normal status aggregation - Recently restarted containers (kept as degraded) - Applications not running (set to exited) **Data tracked**: - Old vs new status - Container statuses - Restart count and timing - Whether containers exist ### 3. Application Model Status Accessor **Location**: Lines 706-712, 726-732 **Logs**: When status is set without explicit health information **Issue**: Highlights cases where health defaults to "unhealthy" **Data tracked**: - Raw value passed to setter - Final result after default applied ## How to Use ### Enable Debug Logging Edit `.env` or `config/logging.php` to set log level to debug: ``` LOG_LEVEL=debug ``` ### Monitor Logs ```bash tail -f storage/logs/laravel.log | grep STATUS-DEBUG ``` ### Log Format All logs use `[STATUS-DEBUG]` prefix for easy filtering: ``` [2025-11-19 13:00:00] local.DEBUG: [STATUS-DEBUG] Sentinel status change { "source": "PushServerUpdateJob", "app_id": 123, "app_name": "my-app", "old_status": "running:unknown", "new_status": "running:healthy", "container_statuses": [...], "flags": {...} } ``` ## What to Look For 1. **Default to unhealthy**: Check Application model accessor logs 2. **Status flipping**: Compare timestamps between Sentinel and SSH updates 3. **Incorrect aggregation**: Check flags and container_statuses 4. **Stale database values**: Check if old_status persists across multiple logs ## Next Steps After gathering logs, we can: 1. Identify the exact source of "unhealthy" status 2. Determine if it's a default issue, aggregation bug, or timing problem 3. Apply targeted fix based on evidence 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-19 12:52:08 +00:00
Log::debug('[STATUS-DEBUG] Sentinel status change', [
'source' => 'PushServerUpdateJob',
'app_id' => $application->id,
'app_name' => $application->name,
'old_status' => $application->status,
'new_status' => $aggregatedStatus,
'container_statuses' => $relevantStatuses->toArray(),
'flags' => [
'hasRunning' => $hasRunning,
'hasUnhealthy' => $hasUnhealthy,
'hasUnknown' => $hasUnknown,
],
]);
$application->status = $aggregatedStatus;
$application->save();
}
}
}
2024-10-14 15:54:29 +00:00
private function updateApplicationStatus(string $applicationId, string $containerStatus)
{
$application = $this->applications->where('id', $applicationId)->first();
2024-10-14 15:54:29 +00:00
if (! $application) {
return;
}
if ($application->status !== $containerStatus) {
$application->status = $containerStatus;
$application->save();
}
2024-10-14 15:54:29 +00:00
}
private function updateApplicationPreviewStatus(string $applicationId, string $pullRequestId, string $containerStatus)
2024-10-14 15:54:29 +00:00
{
$application = $this->previews->where('application_id', $applicationId)
->where('pull_request_id', $pullRequestId)
->first();
2024-10-14 15:54:29 +00:00
if (! $application) {
return;
2024-10-14 11:32:36 +00:00
}
if ($application->status !== $containerStatus) {
$application->status = $containerStatus;
$application->save();
}
2024-10-14 15:54:29 +00:00
}
2024-10-14 11:32:36 +00:00
2024-10-14 15:54:29 +00:00
private function updateNotFoundApplicationStatus()
{
$notFoundApplicationIds = $this->allApplicationIds->diff($this->foundApplicationIds);
2024-10-14 11:32:36 +00:00
if ($notFoundApplicationIds->isNotEmpty()) {
2024-10-14 15:54:29 +00:00
$notFoundApplicationIds->each(function ($applicationId) {
$application = Application::find($applicationId);
if ($application) {
// Don't mark as exited if already exited
if (str($application->status)->startsWith('exited')) {
return;
}
// Only protection: Verify we received any container data at all
// If containers collection is completely empty, Sentinel might have failed
if ($this->containers->isEmpty()) {
return;
}
if ($application->status !== 'exited') {
$application->status = 'exited';
$application->save();
}
2024-10-14 15:54:29 +00:00
}
});
}
}
private function updateNotFoundApplicationPreviewStatus()
{
$notFoundApplicationPreviewsIds = $this->allApplicationPreviewsIds->diff($this->foundApplicationPreviewsIds);
if ($notFoundApplicationPreviewsIds->isNotEmpty()) {
$notFoundApplicationPreviewsIds->each(function ($previewKey) {
// Parse the previewKey format "application_id:pull_request_id"
$parts = explode(':', $previewKey);
if (count($parts) !== 2) {
return;
}
$applicationId = $parts[0];
$pullRequestId = $parts[1];
$applicationPreview = $this->previews->where('application_id', $applicationId)
->where('pull_request_id', $pullRequestId)
->first();
2024-10-14 15:54:29 +00:00
if ($applicationPreview) {
// Don't mark as exited if already exited
if (str($applicationPreview->status)->startsWith('exited')) {
return;
}
// Only protection: Verify we received any container data at all
// If containers collection is completely empty, Sentinel might have failed
if ($this->containers->isEmpty()) {
return;
}
if ($applicationPreview->status !== 'exited') {
$applicationPreview->status = 'exited';
$applicationPreview->save();
}
2024-10-14 15:54:29 +00:00
}
});
}
}
private function updateProxyStatus()
{
// If proxy is not found, start it
2024-10-15 11:39:19 +00:00
if ($this->server->isProxyShouldRun()) {
if ($this->foundProxy === false) {
try {
if (CheckProxy::run($this->server)) {
StartProxy::run($this->server, async: false);
$this->server->team?->notify(new ContainerRestarted('coolify-proxy', $this->server));
2024-10-15 11:39:19 +00:00
}
} catch (\Throwable $e) {
}
} else {
$connectProxyToDockerNetworks = connectProxyToNetworks($this->server);
instant_remote_process($connectProxyToDockerNetworks, $this->server, false);
}
2024-10-14 10:07:37 +00:00
}
}
2024-10-14 15:54:29 +00:00
private function updateDatabaseStatus(string $databaseUuid, string $containerStatus, bool $tcpProxy = false)
2024-10-14 11:32:36 +00:00
{
$database = $this->databases->where('uuid', $databaseUuid)->first();
2024-10-14 15:54:29 +00:00
if (! $database) {
return;
}
if ($database->status !== $containerStatus) {
$database->status = $containerStatus;
$database->save();
}
if ($this->isRunning($containerStatus) && $tcpProxy) {
2024-10-14 15:54:29 +00:00
$tcpProxyContainerFound = $this->containers->filter(function ($value, $key) use ($databaseUuid) {
2024-10-14 16:04:36 +00:00
return data_get($value, 'name') === "$databaseUuid-proxy" && data_get($value, 'state') === 'running';
2024-10-14 15:54:29 +00:00
})->first();
if (! $tcpProxyContainerFound) {
StartDatabaseProxy::dispatch($database);
$this->server->team?->notify(new ContainerRestarted("TCP Proxy for {$database->name}", $this->server));
2024-10-14 15:54:29 +00:00
} else {
2024-10-14 11:32:36 +00:00
}
2024-10-14 15:54:29 +00:00
}
}
private function updateNotFoundDatabaseStatus()
{
$notFoundDatabaseUuids = $this->allDatabaseUuids->diff($this->foundDatabaseUuids);
if ($notFoundDatabaseUuids->isNotEmpty()) {
$notFoundDatabaseUuids->each(function ($databaseUuid) {
$database = $this->databases->where('uuid', $databaseUuid)->first();
2024-10-14 15:54:29 +00:00
if ($database) {
if ($database->status !== 'exited') {
$database->status = 'exited';
$database->save();
}
if ($database->is_public) {
StopDatabaseProxy::dispatch($database);
}
2024-10-14 15:54:29 +00:00
}
});
}
}
private function updateServiceSubStatus(string $serviceId, string $subType, string $subId, string $containerStatus)
{
$service = $this->services->where('id', $serviceId)->first();
2024-10-14 15:54:29 +00:00
if (! $service) {
return;
}
if ($subType === 'application') {
$application = $service->applications()->where('id', $subId)->first();
if ($application) {
if ($application->status !== $containerStatus) {
$application->status = $containerStatus;
$application->save();
}
}
2024-10-14 15:54:29 +00:00
} elseif ($subType === 'database') {
$database = $service->databases()->where('id', $subId)->first();
if ($database) {
if ($database->status !== $containerStatus) {
$database->status = $containerStatus;
$database->save();
}
}
2024-10-14 11:32:36 +00:00
}
}
2024-10-14 10:07:37 +00:00
2024-10-14 15:54:29 +00:00
private function updateNotFoundServiceStatus()
2024-10-14 11:32:36 +00:00
{
2024-10-14 15:54:29 +00:00
$notFoundServiceApplicationIds = $this->allServiceApplicationIds->diff($this->foundServiceApplicationIds);
$notFoundServiceDatabaseIds = $this->allServiceDatabaseIds->diff($this->foundServiceDatabaseIds);
if ($notFoundServiceApplicationIds->isNotEmpty()) {
$notFoundServiceApplicationIds->each(function ($serviceApplicationId) {
$application = ServiceApplication::find($serviceApplicationId);
if ($application) {
if ($application->status !== 'exited') {
$application->status = 'exited';
$application->save();
}
2024-10-14 15:54:29 +00:00
}
});
}
if ($notFoundServiceDatabaseIds->isNotEmpty()) {
$notFoundServiceDatabaseIds->each(function ($serviceDatabaseId) {
$database = ServiceDatabase::find($serviceDatabaseId);
if ($database) {
if ($database->status !== 'exited') {
$database->status = 'exited';
$database->save();
}
2024-10-14 15:54:29 +00:00
}
});
}
}
private function updateAdditionalServersStatus()
{
$this->allApplicationsWithAdditionalServers->each(function ($application) {
ComplexStatusCheck::run($application);
2024-10-14 11:32:36 +00:00
});
}
private function isRunning(string $containerStatus)
{
return str($containerStatus)->contains('running');
}
2024-10-15 11:39:19 +00:00
private function checkLogDrainContainer()
{
2024-10-15 11:39:19 +00:00
if ($this->server->isLogDrainEnabled() && $this->foundLogDrainContainer === false) {
StartLogDrain::dispatch($this->server);
2024-10-15 11:39:19 +00:00
}
}
2024-10-14 10:07:37 +00:00
}