shelfy/internal/download/jobs.go

549 lines
14 KiB
Go
Raw Normal View History

2025-06-12 08:57:10 +00:00
package download
import (
"app/shelfly/internal/debridlink"
2025-06-15 15:21:11 +00:00
"app/shelfly/internal/models"
"errors"
"fmt"
2025-06-12 15:31:12 +00:00
"io"
"log"
2025-06-12 08:57:10 +00:00
"net/http"
2025-06-12 15:31:12 +00:00
"os"
"path/filepath"
"regexp"
2025-06-15 15:21:11 +00:00
"strings"
2025-06-12 08:57:10 +00:00
"sync"
"time"
2025-06-26 09:11:21 +00:00
"archive/zip"
2025-06-12 08:57:10 +00:00
"gorm.io/gorm"
2025-06-26 09:22:15 +00:00
"os/exec"
2025-06-12 08:57:10 +00:00
)
type DownloadJob struct {
2025-06-15 15:21:11 +00:00
ID string `gorm:"primaryKey;column:id"`
Link string `gorm:"column:link"`
Name string `gorm:"column:name"`
Status string `gorm:"column:status"`
PathID int `gorm:"column:path_id"` // 👈 int pas uint
Size int64 `gorm:"column:size"`
Host string `gorm:"column:host"`
Progress int `gorm:"column:progress"` // 👈 int
StreamURL string `gorm:"column:stream_url"`
Speed int `gorm:"column:speed;default:0"` // vitesse en Ko/s
CreatedAt time.Time `gorm:"autoCreateTime"`
UpdatedAt time.Time `gorm:"autoUpdateTime"`
2025-06-12 08:57:10 +00:00
}
var (
jobs = make(map[string]*DownloadJob)
jobsMu sync.Mutex
)
2025-06-12 15:31:12 +00:00
// Enregistre un job en mémoire et en base
func RegisterJobWithDB(job *DownloadJob, db *gorm.DB) error {
2025-06-15 15:21:11 +00:00
var existing DownloadJob
// On cherche le job existant SANS les soft deletes si jamais ils sont activés par erreur
err := db.Unscoped().First(&existing, "id = ?", job.ID).Error
if err == nil {
// Le job existe déjà, on le met à jour
log.Printf("[INFO] Mise à jour du job existant : %s\n", job.ID)
err = db.Model(&existing).Updates(map[string]interface{}{
"link": job.Link,
"name": job.Name,
"status": job.Status,
"path_id": job.PathID,
"size": job.Size,
"host": job.Host,
"progress": job.Progress,
"stream_url": job.StreamURL,
"updated_at": time.Now(),
}).Error
if err != nil {
log.Printf("[ERROR] Échec de la mise à jour : %v\n", err)
return err
}
} else if errors.Is(err, gorm.ErrRecordNotFound) {
// Le job n'existe pas, on le crée
if err := db.Create(job).Error; err != nil {
log.Printf("[ERROR] Insertion échouée : %v\n", err)
return err
}
log.Printf("[INFO] Nouveau job enregistré : %s\n", job.ID)
} else {
// Une erreur inattendue
log.Printf("[ERROR] Erreur inattendue lors de la recherche du job : %v\n", err)
return err
}
// Mise à jour en mémoire
2025-06-12 08:57:10 +00:00
jobsMu.Lock()
2025-06-12 15:31:12 +00:00
jobs[job.ID] = job
2025-06-12 08:57:10 +00:00
jobsMu.Unlock()
2025-06-15 15:21:11 +00:00
return nil
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
// Charge tous les jobs depuis la base en mémoire (au démarrage)
func InitJobsFromDB(db *gorm.DB) error {
var jobList []DownloadJob
if err := db.Find(&jobList).Error; err != nil {
return err
2025-06-12 08:57:10 +00:00
}
jobsMu.Lock()
defer jobsMu.Unlock()
2025-06-12 15:31:12 +00:00
for _, j := range jobList {
jobCopy := j
jobs[j.ID] = &jobCopy
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
log.Printf("[JOB] %d jobs rechargés depuis la base\n", len(jobs))
return nil
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
// Met à jour le status dun job et le persiste
func UpdateJobStatus(id string, status string, db *gorm.DB) {
2025-06-12 08:57:10 +00:00
jobsMu.Lock()
defer jobsMu.Unlock()
2025-06-12 15:31:12 +00:00
if job, ok := jobs[id]; ok {
job.Status = status
job.UpdatedAt = time.Now()
if db != nil {
_ = db.Save(job)
}
2025-06-12 08:57:10 +00:00
}
2025-06-15 15:21:11 +00:00
Broadcast()
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
// Met à jour la progression dun job et le persiste
func UpdateJobProgress(id string, progress int, db *gorm.DB) {
2025-06-12 08:57:10 +00:00
jobsMu.Lock()
defer jobsMu.Unlock()
2025-06-12 15:31:12 +00:00
if job, ok := jobs[id]; ok {
job.Progress = progress
job.UpdatedAt = time.Now()
if db != nil {
_ = db.Save(job)
}
2025-06-12 08:57:10 +00:00
}
}
2025-06-12 15:31:12 +00:00
// Supprime un job (mémoire uniquement)
2025-06-15 15:21:11 +00:00
func DeleteJob(id string, db *gorm.DB) error {
// Supprime en mémoire
2025-06-12 08:57:10 +00:00
jobsMu.Lock()
2025-06-12 15:31:12 +00:00
delete(jobs, id)
2025-06-15 15:21:11 +00:00
jobsMu.Unlock()
// Supprime en base
if err := db.Delete(&DownloadJob{}, "id = ?", id).Error; err != nil {
log.Printf("[ERROR] Échec de suppression du job en base : %v\n", err)
return err
}
log.Printf("[JOB] Supprimé : %s\n", id)
return nil
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
// Liste tous les jobs
2025-06-15 15:21:11 +00:00
func ListJobs(db *gorm.DB) []*DownloadJob {
var jobsFromDB []*DownloadJob
if err := db.Order("created_at desc").Find(&jobsFromDB).Error; err != nil {
log.Printf("[ERROR] Impossible de charger les jobs depuis la base : %v\n", err)
return []*DownloadJob{}
2025-06-12 15:31:12 +00:00
}
2025-06-15 15:21:11 +00:00
return jobsFromDB
2025-06-12 08:57:10 +00:00
}
2025-06-15 15:21:11 +00:00
2025-06-12 08:57:10 +00:00
2025-06-12 15:31:12 +00:00
func StartDownload(job *DownloadJob, downloadURL string, client *debridlink.Client, db *gorm.DB) {
2025-06-15 15:21:11 +00:00
UpdateJobStatus(job.ID, "downloading", db)
var path models.PathDownload
if err := db.First(&path, job.PathID).Error; err != nil {
UpdateJobStatus(job.ID, "failed", db)
return
}
resp, err := http.Head(downloadURL)
if err != nil || resp.StatusCode != http.StatusOK {
UpdateJobStatus(job.ID, "failed", db)
return
}
size := resp.ContentLength
if size <= 0 {
UpdateJobStatus(job.ID, "failed", db)
return
}
acceptRanges := resp.Header.Get("Accept-Ranges")
if acceptRanges != "bytes" {
log.Println("[INFO] Serveur ne supporte pas Range, fallback single thread")
StartDownloadSingleThread(job, downloadURL, db, path.Path)
return
}
const numSegments = 4
segmentSize := size / numSegments
tmpFiles := make([]string, numSegments)
wg := sync.WaitGroup{}
progressChan := make(chan int64, 100)
done := make(chan bool)
// Progression + Vitesse
var downloaded int64
go func() {
var lastTotal int64 = 0
lastUpdate := time.Now()
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case n := <-progressChan:
downloaded += n
case <-ticker.C:
elapsed := time.Since(lastUpdate).Seconds()
if elapsed > 0 {
speed := int(float64(downloaded-lastTotal) / elapsed / 1024) // en Ko/s
lastTotal = downloaded
lastUpdate = time.Now()
progress := int((downloaded * 100) / size)
// Update en base
db.Model(&DownloadJob{}).Where("id = ?", job.ID).Updates(map[string]interface{}{
"progress": progress,
"speed": speed,
})
Broadcast()
}
case <-done:
return
}
}
}()
// Téléchargement parallèle
for i := 0; i < numSegments; i++ {
start := int64(i) * segmentSize
end := start + segmentSize - 1
if i == numSegments-1 {
end = size - 1
}
tmpPath := filepath.Join(os.TempDir(), fmt.Sprintf("%s.part%d", job.ID, i))
tmpFiles[i] = tmpPath
wg.Add(1)
go func(start, end int64, tmpPath string) {
defer wg.Done()
err := downloadSegment(downloadURL, start, end, tmpPath, progressChan)
if err != nil {
log.Printf("[ERROR] Segment %d-%d échoué : %v\n", start, end, err)
}
}(start, end, tmpPath)
}
wg.Wait()
close(done)
// Fusion
2025-06-20 18:17:24 +00:00
safeName := SanitizeFileName(job.Name)
2025-06-15 15:21:11 +00:00
finalPath := generateUniqueFilePath(path.Path, safeName)
out, err := os.Create(finalPath)
if err != nil {
UpdateJobStatus(job.ID, "failed", db)
return
}
defer out.Close()
for _, tmpPath := range tmpFiles {
part, err := os.Open(tmpPath)
if err != nil {
UpdateJobStatus(job.ID, "failed", db)
return
}
io.Copy(out, part)
part.Close()
os.Remove(tmpPath)
}
2025-06-26 10:28:34 +00:00
2025-06-26 09:11:21 +00:00
ext := strings.ToLower(filepath.Ext(finalPath))
videoExts := map[string]bool{".mkv": true, ".avi": true, ".mp4": true}
if !videoExts[ext] {
switch ext {
case ".zip":
if err := unzip(finalPath, path.Path); err != nil {
log.Printf("[ERROR] Décompression ZIP échouée : %v\n", err)
UpdateJobStatus(job.ID, "failed", db)
return
}
case ".rar":
if err := unrarExtract(finalPath, path.Path); err != nil {
log.Printf("[ERROR] Décompression RAR échouée : %v\n", err)
UpdateJobStatus(job.ID, "failed", db)
return
}
default:
log.Printf("[INFO] Extension non gérée : %s\n", ext)
}
}
2025-06-26 10:28:34 +00:00
UpdateJobProgress(job.ID, 100, db)
2025-06-15 15:21:11 +00:00
UpdateJobStatus(job.ID, "done", db)
log.Printf("[OK] Fichier téléchargé : %s\n", finalPath)
}
// generateUniqueFilePath ajoute un suffixe si le fichier existe déjà
func generateUniqueFilePath(basePath, fileName string) string {
finalPath := filepath.Join(basePath, fileName)
if _, err := os.Stat(finalPath); os.IsNotExist(err) {
return finalPath
}
base := strings.TrimSuffix(fileName, filepath.Ext(fileName))
ext := filepath.Ext(fileName)
counter := 1
for {
newName := fmt.Sprintf("%s (%d)%s", base, counter, ext)
newPath := filepath.Join(basePath, newName)
if _, err := os.Stat(newPath); os.IsNotExist(err) {
return newPath
}
counter++
}
}
func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB, basePath string) {
2025-06-12 15:31:12 +00:00
UpdateJobStatus(job.ID, "running", db)
2025-06-12 08:57:10 +00:00
2025-06-12 15:31:12 +00:00
resp, err := http.Get(downloadURL)
if err != nil {
log.Printf("[ERROR] Téléchargement échoué : %v\n", err)
UpdateJobStatus(job.ID, "failed", db)
return
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
defer resp.Body.Close()
2025-06-12 08:57:10 +00:00
2025-06-12 15:31:12 +00:00
if resp.StatusCode != http.StatusOK {
log.Printf("[ERROR] Erreur HTTP : %s\n", resp.Status)
UpdateJobStatus(job.ID, "failed", db)
return
}
2025-06-12 08:57:10 +00:00
2025-06-15 15:21:11 +00:00
// Créer le répertoire si nécessaire
if err := os.MkdirAll(basePath, os.ModePerm); err != nil {
log.Printf("[ERROR] Création du dossier %s échouée : %v\n", basePath, err)
2025-06-12 15:31:12 +00:00
UpdateJobStatus(job.ID, "failed", db)
return
}
2025-06-15 15:21:11 +00:00
2025-06-20 18:17:24 +00:00
destPath := filepath.Join(basePath, SanitizeFileName(job.Name))
2025-06-12 15:31:12 +00:00
outFile, err := os.Create(destPath)
2025-06-12 08:57:10 +00:00
if err != nil {
2025-06-12 15:31:12 +00:00
log.Printf("[ERROR] Impossible de créer le fichier : %v\n", err)
UpdateJobStatus(job.ID, "failed", db)
return
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
defer outFile.Close()
2025-06-15 15:21:11 +00:00
// Calcul taille totale
2025-06-12 15:31:12 +00:00
totalSize := resp.ContentLength
if totalSize <= 0 && job.Size > 0 {
totalSize = job.Size
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
buf := make([]byte, 32*1024) // 32KB
var downloaded int64
lastUpdate := time.Now()
2025-06-12 08:57:10 +00:00
2025-06-12 15:31:12 +00:00
for {
n, err := resp.Body.Read(buf)
if n > 0 {
if _, writeErr := outFile.Write(buf[:n]); writeErr != nil {
log.Printf("[ERROR] Écriture échouée : %v\n", writeErr)
UpdateJobStatus(job.ID, "failed", db)
return
}
downloaded += int64(n)
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
if err != nil {
if err == io.EOF {
break
}
log.Printf("[ERROR] Erreur de lecture : %v\n", err)
UpdateJobStatus(job.ID, "failed", db)
2025-06-12 08:57:10 +00:00
return
}
2025-06-15 15:21:11 +00:00
// Mise à jour de la progression
2025-06-12 15:31:12 +00:00
if time.Since(lastUpdate) > 500*time.Millisecond && totalSize > 0 {
progress := int((downloaded * 100) / totalSize)
UpdateJobProgress(job.ID, progress, db)
lastUpdate = time.Now()
}
2025-06-12 08:57:10 +00:00
}
2025-06-12 15:31:12 +00:00
UpdateJobProgress(job.ID, 100, db)
UpdateJobStatus(job.ID, "done", db)
2025-06-15 15:21:11 +00:00
log.Printf("[OK] Fichier téléchargé (single) : %s\n", destPath)
2025-06-12 15:31:12 +00:00
}
2025-06-15 15:21:11 +00:00
func downloadSegment(url string, start, end int64, dest string, progressChan chan<- int64) error {
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
out, err := os.Create(dest)
if err != nil {
return err
}
defer out.Close()
buf := make([]byte, 32*1024)
for {
n, err := resp.Body.Read(buf)
if n > 0 {
if _, err := out.Write(buf[:n]); err != nil {
return err
}
progressChan <- int64(n) // ← envoie progression
}
if err != nil {
if err == io.EOF {
break
}
return err
}
}
return nil
}
2025-06-20 18:17:24 +00:00
func SanitizeFileName(name string) string {
2025-06-12 15:31:12 +00:00
re := regexp.MustCompile(`[^\w\-.]`)
return re.ReplaceAllString(name, "_")
2025-06-12 08:57:10 +00:00
}
2025-06-15 15:21:11 +00:00
var (
subscribers = make(map[chan struct{}]struct{})
subscribersMu sync.Mutex
)
// Subscribe renvoie un chan à fermer par le client SSE
func Subscribe() chan struct{} {
ch := make(chan struct{}, 1)
subscribersMu.Lock()
subscribers[ch] = struct{}{}
subscribersMu.Unlock()
return ch
}
2025-06-12 08:57:10 +00:00
2025-06-15 15:21:11 +00:00
// Unsubscribe supprime le chan
func Unsubscribe(ch chan struct{}) {
subscribersMu.Lock()
delete(subscribers, ch)
subscribersMu.Unlock()
close(ch)
}
// Broadcast notifie tous les subscribers
func Broadcast() {
subscribersMu.Lock()
defer subscribersMu.Unlock()
for ch := range subscribers {
select {
case ch <- struct{}{}:
log.Println("Broadcast envoyé à un client")
default:
log.Println("Client bloqué, message ignoré")
}
}
}
2025-06-26 09:11:21 +00:00
func unzip(srcZip, destDir string) error {
r, err := zip.OpenReader(srcZip)
if err != nil {
return err
}
defer r.Close()
for _, f := range r.File {
fpath := filepath.Join(destDir, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(fpath, os.ModePerm)
continue
}
if err := os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
return err
}
in, err := f.Open()
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
return err
}
}
return nil
}
func unrarExtract(srcRar, destDir string) error {
2025-06-26 14:04:24 +00:00
log.Printf("[DEBUG] Début de lextraction RAR src: %q, dest: %q", srcRar, destDir)
// 1) Tentative avec unrar
cmdUnrar := exec.Command("unrar", "x", "-y", srcRar, destDir)
log.Printf("[DEBUG] Exécution de la commande unrar : %s", strings.Join(cmdUnrar.Args, " "))
outputUnrar, errUnrar := cmdUnrar.CombinedOutput()
log.Printf("[DEBUG] Résultat unrar err: %v, output:\n%s", errUnrar, string(outputUnrar))
if errUnrar == nil {
log.Printf("[INFO] Extraction réussie avec unrar.")
2025-06-26 09:13:25 +00:00
return nil
2025-06-26 09:11:21 +00:00
}
2025-06-26 14:04:24 +00:00
log.Printf("[WARN] Échec de unrar, passage à 7z.")
2025-06-26 09:13:25 +00:00
2025-06-26 14:04:24 +00:00
// 2) Repli sur 7z
2025-06-26 09:13:25 +00:00
cmd7z := exec.Command("7z", "x", srcRar, "-y", "-o"+destDir)
2025-06-26 14:04:24 +00:00
log.Printf("[DEBUG] Exécution de la commande 7z : %s", strings.Join(cmd7z.Args, " "))
2025-06-26 09:13:25 +00:00
output7z, err7z := cmd7z.CombinedOutput()
2025-06-26 14:04:24 +00:00
log.Printf("[DEBUG] Résultat 7z err: %v, output:\n%s", err7z, string(output7z))
2025-06-26 09:13:25 +00:00
if err7z == nil {
2025-06-26 14:04:24 +00:00
log.Printf("[INFO] Extraction réussie avec 7z.")
2025-06-26 09:13:25 +00:00
return nil
}
2025-06-26 14:04:24 +00:00
// 3) Les deux ont échoué
errMsg := fmt.Errorf(
"unrar a échoué : %v\nSortie unrar : %s\n\n7z a échoué : %v\nSortie 7z : %s",
errUnrar, string(outputUnrar),
2025-06-26 09:13:25 +00:00
err7z, string(output7z),
)
2025-06-26 14:04:24 +00:00
log.Printf("[ERROR] %v", errMsg)
return errMsg
2025-06-26 09:11:21 +00:00
}
2025-06-26 09:13:25 +00:00