From e1955871b2a2b5edd4260fb520209589e455e96d Mon Sep 17 00:00:00 2001 From: cangui Date: Mon, 18 Aug 2025 22:01:17 +0200 Subject: [PATCH] up --- internal/job/runner.go | 91 +++++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/internal/job/runner.go b/internal/job/runner.go index fdd6a96..d5c01e6 100644 --- a/internal/job/runner.go +++ b/internal/job/runner.go @@ -184,12 +184,22 @@ func ListJobs(db *gorm.DB) []*DownloadJob { func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, db *gorm.DB) { UpdateJobStatus(job.ID, "downloading", db) - var path models.PathDownload - if err := db.First(&path, job.PathID).Error; err != nil { + // Récupère l'entrée (Path="Film" / "Série" / ...) + var p models.PathDownload + if err := db.First(&p, job.PathID).Error; err != nil { UpdateJobStatus(job.ID, "failed", db) return } + // ★ Chemin physique = upload/ + diskBase := filepath.Join("upload", p.Path) + if err := os.MkdirAll(diskBase, 0o755); err != nil { + log.Printf("[ERROR] Création dossier '%s' : %v", diskBase, err) + UpdateJobStatus(job.ID, "failed", db) + return + } + + // HEAD pour taille + Accept-Ranges resp, err := http.Head(downloadURL) if err != nil || resp.StatusCode != http.StatusOK { UpdateJobStatus(job.ID, "failed", db) @@ -201,10 +211,11 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, return } - acceptRanges := resp.Header.Get("Accept-Ranges") + acceptRanges := strings.ToLower(resp.Header.Get("Accept-Ranges")) if acceptRanges != "bytes" { log.Println("[INFO] Serveur ne supporte pas Range, fallback single thread") - StartDownloadSingleThread(job, downloadURL, db, path.Path) + // ★ passer le chemin physique + StartDownloadSingleThread(job, downloadURL, db, diskBase) return } @@ -220,7 +231,6 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, go func() { var lastTotal int64 = 0 lastUpdate := time.Now() - ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() @@ -231,12 +241,11 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, case <-ticker.C: elapsed := time.Since(lastUpdate).Seconds() if elapsed > 0 { - speed := int(float64(downloaded-lastTotal) / elapsed / 1024) // en Ko/s + speed := int(float64(downloaded-lastTotal) / elapsed / 1024) // Ko/s lastTotal = downloaded lastUpdate = time.Now() progress := int((downloaded * 100) / size) - // Update en base db.Model(&DownloadJob{}).Where("id = ?", job.ID).Updates(map[string]interface{}{ "progress": progress, "speed": speed, @@ -257,14 +266,13 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, end = size - 1 } - tmpPath := filepath.Join(os.TempDir(), fmt.Sprintf("%s.part%d", job.ID, i)) + tmpPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v.part%d", job.ID, i)) tmpFiles[i] = tmpPath wg.Add(1) go func(start, end int64, tmpPath string) { defer wg.Done() - err := downloadSegment(downloadURL, start, end, tmpPath, progressChan) - if err != nil { + if err := downloadSegment(downloadURL, start, end, tmpPath, progressChan); err != nil { log.Printf("[ERROR] Segment %d-%d échoué : %v\n", start, end, err) } }(start, end, tmpPath) @@ -273,9 +281,10 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, wg.Wait() close(done) - // Fusion + // Fusion vers destination finale safeName := SanitizeFileName(job.Name) - finalPath := generateUniqueFilePath(path.Path, safeName) + // ★ utilise le dossier physique + finalPath := generateUniqueFilePath(diskBase, safeName) out, err := os.Create(finalPath) if err != nil { @@ -290,24 +299,26 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, UpdateJobStatus(job.ID, "failed", db) return } - io.Copy(out, part) - part.Close() - os.Remove(tmpPath) + _, _ = io.Copy(out, part) + _ = part.Close() + _ = os.Remove(tmpPath) } - + + // Post-traitement selon extension ext := strings.ToLower(filepath.Ext(finalPath)) videoExts := map[string]bool{".mkv": true, ".avi": true, ".mp4": true} if !videoExts[ext] { switch ext { case ".zip": - if err := unzip(finalPath, path.Path); err != nil { + // ★ extraire dans le dossier physique + if err := unzip(finalPath, diskBase); err != nil { log.Printf("[ERROR] Décompression ZIP échouée : %v\n", err) UpdateJobStatus(job.ID, "failed", db) return } case ".rar": - if err := unrarExtract(finalPath, path.Path); err != nil { + if err := unrarExtract(finalPath, diskBase); err != nil { log.Printf("[ERROR] Décompression RAR échouée : %v\n", err) UpdateJobStatus(job.ID, "failed", db) return @@ -322,6 +333,7 @@ func StartDownload(job *DownloadJob, downloadURL string, client *client.Client, log.Printf("[OK] Fichier téléchargé : %s\n", finalPath) } + // generateUniqueFilePath ajoute un suffixe si le fichier existe déjà func generateUniqueFilePath(basePath, fileName string) string { finalPath := filepath.Join(basePath, fileName) @@ -346,6 +358,25 @@ func generateUniqueFilePath(basePath, fileName string) string { func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB, basePath string) { UpdateJobStatus(job.ID, "running", db) + // --- Normalise le chemin disque --- + // Si basePath est "Film" (logique BDD), on le préfixe par "upload/". + // Si on te passe déjà "upload/Film" ou un chemin absolu, on le garde. + diskBase := basePath + clean := filepath.Clean(basePath) + if !filepath.IsAbs(clean) && + clean != "upload" && + !strings.HasPrefix(clean, "upload"+string(os.PathSeparator)) { + diskBase = filepath.Join("upload", clean) + } + + // Créer le répertoire si nécessaire + if err := os.MkdirAll(diskBase, 0o755); err != nil { + log.Printf("[ERROR] Création du dossier %s échouée : %v\n", diskBase, err) + UpdateJobStatus(job.ID, "failed", db) + return + } + + // Lance le téléchargement resp, err := http.Get(downloadURL) if err != nil { log.Printf("[ERROR] Téléchargement échoué : %v\n", err) @@ -360,14 +391,7 @@ func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB return } - // Créer le répertoire si nécessaire - if err := os.MkdirAll(basePath, os.ModePerm); err != nil { - log.Printf("[ERROR] Création du dossier %s échouée : %v\n", basePath, err) - UpdateJobStatus(job.ID, "failed", db) - return - } - - destPath := filepath.Join(basePath, SanitizeFileName(job.Name)) + destPath := filepath.Join(diskBase, SanitizeFileName(job.Name)) outFile, err := os.Create(destPath) if err != nil { log.Printf("[ERROR] Impossible de créer le fichier : %v\n", err) @@ -376,7 +400,7 @@ func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB } defer outFile.Close() - // Calcul taille totale + // Taille totale pour progression totalSize := resp.ContentLength if totalSize <= 0 && job.Size > 0 { totalSize = job.Size @@ -387,7 +411,7 @@ func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB lastUpdate := time.Now() for { - n, err := resp.Body.Read(buf) + n, readErr := resp.Body.Read(buf) if n > 0 { if _, writeErr := outFile.Write(buf[:n]); writeErr != nil { log.Printf("[ERROR] Écriture échouée : %v\n", writeErr) @@ -396,17 +420,17 @@ func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB } downloaded += int64(n) } - if err != nil { - if err == io.EOF { + if readErr != nil { + if readErr == io.EOF { break } - log.Printf("[ERROR] Erreur de lecture : %v\n", err) + log.Printf("[ERROR] Erreur de lecture : %v\n", readErr) UpdateJobStatus(job.ID, "failed", db) return } - // Mise à jour de la progression - if time.Since(lastUpdate) > 500*time.Millisecond && totalSize > 0 { + // Mise à jour de la progression (toutes les 500ms) + if totalSize > 0 && time.Since(lastUpdate) > 500*time.Millisecond { progress := int((downloaded * 100) / totalSize) UpdateJobProgress(job.ID, progress, db) lastUpdate = time.Now() @@ -418,6 +442,7 @@ func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB log.Printf("[OK] Fichier téléchargé (single) : %s\n", destPath) } + func downloadSegment(url string, start, end int64, dest string, progressChan chan<- int64) error { req, _ := http.NewRequest("GET", url, nil) req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))