534 lines
13 KiB
Go
534 lines
13 KiB
Go
package download
|
||
|
||
import (
|
||
"app/shelfly/internal/debridlink"
|
||
"app/shelfly/internal/models"
|
||
"errors"
|
||
"fmt"
|
||
"io"
|
||
"log"
|
||
"net/http"
|
||
"os"
|
||
"path/filepath"
|
||
"regexp"
|
||
"strings"
|
||
"sync"
|
||
"time"
|
||
"archive/zip"
|
||
"gorm.io/gorm"
|
||
"os/exec"
|
||
|
||
)
|
||
|
||
type DownloadJob struct {
|
||
ID string `gorm:"primaryKey;column:id"`
|
||
Link string `gorm:"column:link"`
|
||
Name string `gorm:"column:name"`
|
||
Status string `gorm:"column:status"`
|
||
PathID int `gorm:"column:path_id"` // 👈 int pas uint
|
||
Size int64 `gorm:"column:size"`
|
||
Host string `gorm:"column:host"`
|
||
Progress int `gorm:"column:progress"` // 👈 int
|
||
StreamURL string `gorm:"column:stream_url"`
|
||
Speed int `gorm:"column:speed;default:0"` // vitesse en Ko/s
|
||
CreatedAt time.Time `gorm:"autoCreateTime"`
|
||
UpdatedAt time.Time `gorm:"autoUpdateTime"`
|
||
}
|
||
|
||
var (
|
||
jobs = make(map[string]*DownloadJob)
|
||
jobsMu sync.Mutex
|
||
)
|
||
|
||
// Enregistre un job en mémoire et en base
|
||
func RegisterJobWithDB(job *DownloadJob, db *gorm.DB) error {
|
||
var existing DownloadJob
|
||
|
||
// On cherche le job existant SANS les soft deletes si jamais ils sont activés par erreur
|
||
err := db.Unscoped().First(&existing, "id = ?", job.ID).Error
|
||
|
||
if err == nil {
|
||
// Le job existe déjà, on le met à jour
|
||
log.Printf("[INFO] Mise à jour du job existant : %s\n", job.ID)
|
||
|
||
err = db.Model(&existing).Updates(map[string]interface{}{
|
||
"link": job.Link,
|
||
"name": job.Name,
|
||
"status": job.Status,
|
||
"path_id": job.PathID,
|
||
"size": job.Size,
|
||
"host": job.Host,
|
||
"progress": job.Progress,
|
||
"stream_url": job.StreamURL,
|
||
"updated_at": time.Now(),
|
||
}).Error
|
||
|
||
if err != nil {
|
||
log.Printf("[ERROR] Échec de la mise à jour : %v\n", err)
|
||
return err
|
||
}
|
||
} else if errors.Is(err, gorm.ErrRecordNotFound) {
|
||
// Le job n'existe pas, on le crée
|
||
if err := db.Create(job).Error; err != nil {
|
||
log.Printf("[ERROR] Insertion échouée : %v\n", err)
|
||
return err
|
||
}
|
||
log.Printf("[INFO] Nouveau job enregistré : %s\n", job.ID)
|
||
} else {
|
||
// Une erreur inattendue
|
||
log.Printf("[ERROR] Erreur inattendue lors de la recherche du job : %v\n", err)
|
||
return err
|
||
}
|
||
|
||
// Mise à jour en mémoire
|
||
jobsMu.Lock()
|
||
jobs[job.ID] = job
|
||
jobsMu.Unlock()
|
||
|
||
return nil
|
||
}
|
||
|
||
// Charge tous les jobs depuis la base en mémoire (au démarrage)
|
||
func InitJobsFromDB(db *gorm.DB) error {
|
||
var jobList []DownloadJob
|
||
if err := db.Find(&jobList).Error; err != nil {
|
||
return err
|
||
}
|
||
|
||
jobsMu.Lock()
|
||
defer jobsMu.Unlock()
|
||
|
||
for _, j := range jobList {
|
||
jobCopy := j
|
||
jobs[j.ID] = &jobCopy
|
||
}
|
||
log.Printf("[JOB] %d jobs rechargés depuis la base\n", len(jobs))
|
||
return nil
|
||
}
|
||
|
||
// Met à jour le status d’un job et le persiste
|
||
func UpdateJobStatus(id string, status string, db *gorm.DB) {
|
||
jobsMu.Lock()
|
||
defer jobsMu.Unlock()
|
||
|
||
if job, ok := jobs[id]; ok {
|
||
job.Status = status
|
||
job.UpdatedAt = time.Now()
|
||
if db != nil {
|
||
_ = db.Save(job)
|
||
}
|
||
}
|
||
Broadcast()
|
||
}
|
||
|
||
// Met à jour la progression d’un job et le persiste
|
||
func UpdateJobProgress(id string, progress int, db *gorm.DB) {
|
||
jobsMu.Lock()
|
||
defer jobsMu.Unlock()
|
||
|
||
if job, ok := jobs[id]; ok {
|
||
job.Progress = progress
|
||
job.UpdatedAt = time.Now()
|
||
if db != nil {
|
||
_ = db.Save(job)
|
||
}
|
||
}
|
||
}
|
||
|
||
// Supprime un job (mémoire uniquement)
|
||
func DeleteJob(id string, db *gorm.DB) error {
|
||
// Supprime en mémoire
|
||
jobsMu.Lock()
|
||
delete(jobs, id)
|
||
jobsMu.Unlock()
|
||
|
||
// Supprime en base
|
||
if err := db.Delete(&DownloadJob{}, "id = ?", id).Error; err != nil {
|
||
log.Printf("[ERROR] Échec de suppression du job en base : %v\n", err)
|
||
return err
|
||
}
|
||
|
||
log.Printf("[JOB] Supprimé : %s\n", id)
|
||
return nil
|
||
}
|
||
|
||
// Liste tous les jobs
|
||
func ListJobs(db *gorm.DB) []*DownloadJob {
|
||
var jobsFromDB []*DownloadJob
|
||
if err := db.Order("created_at desc").Find(&jobsFromDB).Error; err != nil {
|
||
log.Printf("[ERROR] Impossible de charger les jobs depuis la base : %v\n", err)
|
||
return []*DownloadJob{}
|
||
}
|
||
return jobsFromDB
|
||
}
|
||
|
||
|
||
func StartDownload(job *DownloadJob, downloadURL string, client *debridlink.Client, db *gorm.DB) {
|
||
UpdateJobStatus(job.ID, "downloading", db)
|
||
|
||
var path models.PathDownload
|
||
if err := db.First(&path, job.PathID).Error; err != nil {
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
|
||
resp, err := http.Head(downloadURL)
|
||
if err != nil || resp.StatusCode != http.StatusOK {
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
size := resp.ContentLength
|
||
if size <= 0 {
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
|
||
acceptRanges := resp.Header.Get("Accept-Ranges")
|
||
if acceptRanges != "bytes" {
|
||
log.Println("[INFO] Serveur ne supporte pas Range, fallback single thread")
|
||
StartDownloadSingleThread(job, downloadURL, db, path.Path)
|
||
return
|
||
}
|
||
|
||
const numSegments = 4
|
||
segmentSize := size / numSegments
|
||
tmpFiles := make([]string, numSegments)
|
||
wg := sync.WaitGroup{}
|
||
progressChan := make(chan int64, 100)
|
||
done := make(chan bool)
|
||
|
||
// Progression + Vitesse
|
||
var downloaded int64
|
||
go func() {
|
||
var lastTotal int64 = 0
|
||
lastUpdate := time.Now()
|
||
|
||
ticker := time.NewTicker(1 * time.Second)
|
||
defer ticker.Stop()
|
||
|
||
for {
|
||
select {
|
||
case n := <-progressChan:
|
||
downloaded += n
|
||
case <-ticker.C:
|
||
elapsed := time.Since(lastUpdate).Seconds()
|
||
if elapsed > 0 {
|
||
speed := int(float64(downloaded-lastTotal) / elapsed / 1024) // en Ko/s
|
||
lastTotal = downloaded
|
||
lastUpdate = time.Now()
|
||
progress := int((downloaded * 100) / size)
|
||
|
||
// Update en base
|
||
db.Model(&DownloadJob{}).Where("id = ?", job.ID).Updates(map[string]interface{}{
|
||
"progress": progress,
|
||
"speed": speed,
|
||
})
|
||
Broadcast()
|
||
}
|
||
case <-done:
|
||
return
|
||
}
|
||
}
|
||
}()
|
||
|
||
// Téléchargement parallèle
|
||
for i := 0; i < numSegments; i++ {
|
||
start := int64(i) * segmentSize
|
||
end := start + segmentSize - 1
|
||
if i == numSegments-1 {
|
||
end = size - 1
|
||
}
|
||
|
||
tmpPath := filepath.Join(os.TempDir(), fmt.Sprintf("%s.part%d", job.ID, i))
|
||
tmpFiles[i] = tmpPath
|
||
|
||
wg.Add(1)
|
||
go func(start, end int64, tmpPath string) {
|
||
defer wg.Done()
|
||
err := downloadSegment(downloadURL, start, end, tmpPath, progressChan)
|
||
if err != nil {
|
||
log.Printf("[ERROR] Segment %d-%d échoué : %v\n", start, end, err)
|
||
}
|
||
}(start, end, tmpPath)
|
||
}
|
||
|
||
wg.Wait()
|
||
close(done)
|
||
|
||
// Fusion
|
||
safeName := SanitizeFileName(job.Name)
|
||
finalPath := generateUniqueFilePath(path.Path, safeName)
|
||
|
||
out, err := os.Create(finalPath)
|
||
if err != nil {
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
defer out.Close()
|
||
|
||
for _, tmpPath := range tmpFiles {
|
||
part, err := os.Open(tmpPath)
|
||
if err != nil {
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
io.Copy(out, part)
|
||
part.Close()
|
||
os.Remove(tmpPath)
|
||
}
|
||
UpdateJobProgress(job.ID, 100, db)
|
||
ext := strings.ToLower(filepath.Ext(finalPath))
|
||
videoExts := map[string]bool{".mkv": true, ".avi": true, ".mp4": true}
|
||
|
||
if !videoExts[ext] {
|
||
switch ext {
|
||
case ".zip":
|
||
if err := unzip(finalPath, path.Path); err != nil {
|
||
log.Printf("[ERROR] Décompression ZIP échouée : %v\n", err)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
case ".rar":
|
||
if err := unrarExtract(finalPath, path.Path); err != nil {
|
||
log.Printf("[ERROR] Décompression RAR échouée : %v\n", err)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
default:
|
||
log.Printf("[INFO] Extension non gérée : %s\n", ext)
|
||
}
|
||
}
|
||
|
||
|
||
UpdateJobStatus(job.ID, "done", db)
|
||
log.Printf("[OK] Fichier téléchargé : %s\n", finalPath)
|
||
}
|
||
|
||
// generateUniqueFilePath ajoute un suffixe si le fichier existe déjà
|
||
func generateUniqueFilePath(basePath, fileName string) string {
|
||
finalPath := filepath.Join(basePath, fileName)
|
||
if _, err := os.Stat(finalPath); os.IsNotExist(err) {
|
||
return finalPath
|
||
}
|
||
|
||
base := strings.TrimSuffix(fileName, filepath.Ext(fileName))
|
||
ext := filepath.Ext(fileName)
|
||
counter := 1
|
||
|
||
for {
|
||
newName := fmt.Sprintf("%s (%d)%s", base, counter, ext)
|
||
newPath := filepath.Join(basePath, newName)
|
||
if _, err := os.Stat(newPath); os.IsNotExist(err) {
|
||
return newPath
|
||
}
|
||
counter++
|
||
}
|
||
}
|
||
|
||
func StartDownloadSingleThread(job *DownloadJob, downloadURL string, db *gorm.DB, basePath string) {
|
||
UpdateJobStatus(job.ID, "running", db)
|
||
|
||
resp, err := http.Get(downloadURL)
|
||
if err != nil {
|
||
log.Printf("[ERROR] Téléchargement échoué : %v\n", err)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
defer resp.Body.Close()
|
||
|
||
if resp.StatusCode != http.StatusOK {
|
||
log.Printf("[ERROR] Erreur HTTP : %s\n", resp.Status)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
|
||
// Créer le répertoire si nécessaire
|
||
if err := os.MkdirAll(basePath, os.ModePerm); err != nil {
|
||
log.Printf("[ERROR] Création du dossier %s échouée : %v\n", basePath, err)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
|
||
destPath := filepath.Join(basePath, SanitizeFileName(job.Name))
|
||
outFile, err := os.Create(destPath)
|
||
if err != nil {
|
||
log.Printf("[ERROR] Impossible de créer le fichier : %v\n", err)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
defer outFile.Close()
|
||
|
||
// Calcul taille totale
|
||
totalSize := resp.ContentLength
|
||
if totalSize <= 0 && job.Size > 0 {
|
||
totalSize = job.Size
|
||
}
|
||
|
||
buf := make([]byte, 32*1024) // 32KB
|
||
var downloaded int64
|
||
lastUpdate := time.Now()
|
||
|
||
for {
|
||
n, err := resp.Body.Read(buf)
|
||
if n > 0 {
|
||
if _, writeErr := outFile.Write(buf[:n]); writeErr != nil {
|
||
log.Printf("[ERROR] Écriture échouée : %v\n", writeErr)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
downloaded += int64(n)
|
||
}
|
||
if err != nil {
|
||
if err == io.EOF {
|
||
break
|
||
}
|
||
log.Printf("[ERROR] Erreur de lecture : %v\n", err)
|
||
UpdateJobStatus(job.ID, "failed", db)
|
||
return
|
||
}
|
||
|
||
// Mise à jour de la progression
|
||
if time.Since(lastUpdate) > 500*time.Millisecond && totalSize > 0 {
|
||
progress := int((downloaded * 100) / totalSize)
|
||
UpdateJobProgress(job.ID, progress, db)
|
||
lastUpdate = time.Now()
|
||
}
|
||
}
|
||
|
||
UpdateJobProgress(job.ID, 100, db)
|
||
UpdateJobStatus(job.ID, "done", db)
|
||
log.Printf("[OK] Fichier téléchargé (single) : %s\n", destPath)
|
||
}
|
||
|
||
func downloadSegment(url string, start, end int64, dest string, progressChan chan<- int64) error {
|
||
req, _ := http.NewRequest("GET", url, nil)
|
||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||
|
||
resp, err := http.DefaultClient.Do(req)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
defer resp.Body.Close()
|
||
|
||
out, err := os.Create(dest)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
defer out.Close()
|
||
|
||
buf := make([]byte, 32*1024)
|
||
for {
|
||
n, err := resp.Body.Read(buf)
|
||
if n > 0 {
|
||
if _, err := out.Write(buf[:n]); err != nil {
|
||
return err
|
||
}
|
||
progressChan <- int64(n) // ← envoie progression
|
||
}
|
||
if err != nil {
|
||
if err == io.EOF {
|
||
break
|
||
}
|
||
return err
|
||
}
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
func SanitizeFileName(name string) string {
|
||
re := regexp.MustCompile(`[^\w\-.]`)
|
||
return re.ReplaceAllString(name, "_")
|
||
}
|
||
var (
|
||
subscribers = make(map[chan struct{}]struct{})
|
||
subscribersMu sync.Mutex
|
||
)
|
||
|
||
// Subscribe renvoie un chan à fermer par le client SSE
|
||
func Subscribe() chan struct{} {
|
||
ch := make(chan struct{}, 1)
|
||
subscribersMu.Lock()
|
||
subscribers[ch] = struct{}{}
|
||
subscribersMu.Unlock()
|
||
return ch
|
||
}
|
||
|
||
// Unsubscribe supprime le chan
|
||
func Unsubscribe(ch chan struct{}) {
|
||
subscribersMu.Lock()
|
||
delete(subscribers, ch)
|
||
subscribersMu.Unlock()
|
||
close(ch)
|
||
}
|
||
|
||
// Broadcast notifie tous les subscribers
|
||
func Broadcast() {
|
||
subscribersMu.Lock()
|
||
defer subscribersMu.Unlock()
|
||
|
||
for ch := range subscribers {
|
||
select {
|
||
case ch <- struct{}{}:
|
||
log.Println("Broadcast envoyé à un client")
|
||
default:
|
||
log.Println("Client bloqué, message ignoré")
|
||
}
|
||
}
|
||
}
|
||
func unzip(srcZip, destDir string) error {
|
||
r, err := zip.OpenReader(srcZip)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
defer r.Close()
|
||
|
||
for _, f := range r.File {
|
||
fpath := filepath.Join(destDir, f.Name)
|
||
if f.FileInfo().IsDir() {
|
||
os.MkdirAll(fpath, os.ModePerm)
|
||
continue
|
||
}
|
||
if err := os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
||
return err
|
||
}
|
||
in, err := f.Open()
|
||
if err != nil {
|
||
return err
|
||
}
|
||
defer in.Close()
|
||
|
||
out, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||
if err != nil {
|
||
return err
|
||
}
|
||
defer out.Close()
|
||
|
||
if _, err := io.Copy(out, in); err != nil {
|
||
return err
|
||
}
|
||
}
|
||
return nil
|
||
}
|
||
func unrarExtract(srcRar, destDir string) error {
|
||
cmd := exec.Command("unrar", "x", "-y", srcRar, destDir)
|
||
output, err := cmd.CombinedOutput()
|
||
if err == nil {
|
||
return nil
|
||
}
|
||
|
||
|
||
cmd7z := exec.Command("7z", "x", srcRar, "-y", "-o"+destDir)
|
||
output7z, err7z := cmd7z.CombinedOutput()
|
||
if err7z == nil {
|
||
return nil
|
||
}
|
||
|
||
return fmt.Errorf(
|
||
"unrar failed: %v, output: %s\n7z fallback failed: %v, output: %s",
|
||
err, string(output),
|
||
err7z, string(output7z),
|
||
)
|
||
}
|
||
|