Compare commits

..

2 Commits

Author SHA1 Message Date
1940fde7cc up 2025-08-18 21:45:27 +02:00
773ea9dc13 up 2025-08-18 21:41:40 +02:00

View File

@ -6,76 +6,126 @@ import (
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"gorm.io/gorm" "gorm.io/gorm"
"canguidev/shelfy/internal/models" "canguidev/shelfy/internal/models"
) )
// Synchronise le disque avec la base : Film, Série, Manga, Magazine.
// - Disque : crée "upload/<Nom>" si manquant
// - DB : 1 ligne unique par nom (Path="<Nom>", PathName="<Nom>")
func CreateDefaultFolder(db *gorm.DB) { func CreateDefaultFolder(db *gorm.DB) {
type item struct { targets := []string{"Film", "Série", "Manga", "Magazine"}
Name string
} // 0) Normalise les entrées existantes (upload/X -> X) et dédoublonne
defs := []item{ if err := normalizeExistingPaths(db); err != nil {
{Name: "Film"}, log.Printf("[DB] Avertissement: normalisation partielle: %v", err)
{Name: "Série"},
{Name: "Manga"},
{Name: "Magazine"},
} }
for _, it := range defs { for _, name := range targets {
path := filepath.Join("upload", it.Name) // 1) Dossier physique
diskPath := filepath.Join("upload", name)
// 1) Dossier : on s'assure qu'il existe if err := os.MkdirAll(diskPath, 0o755); err != nil {
if err := os.MkdirAll(path, 0o755); err != nil { log.Printf("[FOLDER] Erreur création '%s' : %v", diskPath, err)
log.Printf("[FOLDER] Erreur création '%s' : %v", path, err)
continue continue
} }
// 2) Base : chercher une ligne existante par path OU path_name // 2) DB : cherche toute ligne liée à ce nom (avec/ sans 'upload/')
var existing models.PathDownload var rows []models.PathDownload
err := db.Where("path = ? OR path_name = ?", path, it.Name).First(&existing).Error if err := db.Where("path = ? OR path = ? OR path_name = ? OR path_name = ?",
name, "upload/"+name, name, "upload/"+name,
switch { ).Find(&rows).Error; err != nil {
case errors.Is(err, gorm.ErrRecordNotFound): log.Printf("[DB] Lookup '%s' échouée : %v", name, err)
// 2a) Pas trouvé -> on crée continue
row := models.PathDownload{
Path: path,
PathName: it.Name,
} }
switch len(rows) {
case 0:
// crée proprement
row := models.PathDownload{Path: name, PathName: name}
if err := db.Create(&row).Error; err != nil { if err := db.Create(&row).Error; err != nil {
log.Printf("[DB] Échec création PathDownload(%s, %s) : %v", path, it.Name, err) log.Printf("[DB] Création PathDownload('%s') KO : %v", name, err)
} else { } else {
log.Printf("[DB] Ligne créée PathDownload id=%v (%s | %s)", row.ID, row.PathName, row.Path) log.Printf("[DB] Créé PathDownload id=%v (%s)", row.ID, name)
} }
case err != nil:
// 2b) Erreur DB
log.Printf("[DB] Erreur recherche PathDownload(%s | %s) : %v", path, it.Name, err)
default: default:
// 2c) Trouvé -> on normalise si besoin // garde la première, normalise, supprime doublons
updates := map[string]interface{}{} main := rows[0]
if existing.Path != path { if main.Path != name || main.PathName != name {
updates["path"] = path if err := db.Model(&main).Updates(map[string]any{
} "path": name,
if existing.PathName != it.Name { "path_name": name,
updates["path_name"] = it.Name }).Error; err != nil {
} log.Printf("[DB] Update id=%v -> (%s|%s) KO : %v", main.ID, name, name, err)
if len(updates) > 0 {
if err := db.Model(&existing).Updates(updates).Error; err != nil {
log.Printf("[DB] Échec mise à jour PathDownload id=%v : %v", existing.ID, err)
} else { } else {
log.Printf("[DB] Mis à jour PathDownload id=%v -> %#v", existing.ID, updates) log.Printf("[DB] Normalisé id=%v -> (%s|%s)", main.ID, name, name)
} }
}
if len(rows) > 1 {
var dupIDs []int64
for _, r := range rows[1:] {
dupIDs = append(dupIDs, r.ID) // ID en int64
}
if err := db.Where("id IN ?", dupIDs).Delete(&models.PathDownload{}).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Printf("[DB] Suppression doublons (%v) KO : %v", dupIDs, err)
} else { } else {
log.Printf("[DB] OK PathDownload id=%v déjà synchro (%s | %s)", existing.ID, existing.PathName, existing.Path) log.Printf("[DB] Doublons supprimés pour '%s' : %v", name, dupIDs)
}
} }
} }
// 3) (Facultatif) log côté fichiers log.Printf("[FOLDER] OK : %s (DB='%s')", diskPath, name)
if fi, err := os.Stat(path); err == nil && fi.IsDir() {
fmt.Printf("[FOLDER] OK : %s\n", path)
}
} }
} }
// Met à jour toutes les lignes path="upload/..." -> path="...",
// aligne path_name si nécessaire, puis supprime les doublons.
func normalizeExistingPaths(db *gorm.DB) error {
// 1) Normalisation des paths
var rows []models.PathDownload
if err := db.Where("path LIKE ?", "upload/%").Find(&rows).Error; err != nil {
return err
}
for _, r := range rows {
trimmed := strings.TrimPrefix(r.Path, "upload/")
updates := map[string]any{"path": trimmed}
if r.PathName == "" || strings.HasPrefix(r.PathName, "upload/") {
updates["path_name"] = trimmed
}
if err := db.Model(&r).Updates(updates).Error; err != nil {
return fmt.Errorf("update id=%v (%s -> %s) : %w", r.ID, r.Path, trimmed, err)
}
}
// 2) Déduplication par path (on garde le plus petit id)
type rec struct {
ID int64 // <-- int64
Path string
}
var all []rec
if err := db.Model(&models.PathDownload{}).
Select("id, path").
Order("path, id").
Scan(&all).Error; err != nil {
return err
}
seen := make(map[string]int64)
var dupIDs []int64
for _, r := range all {
if _, ok := seen[r.Path]; ok {
dupIDs = append(dupIDs, r.ID)
} else {
seen[r.Path] = r.ID
}
}
if len(dupIDs) > 0 {
if err := db.Where("id IN ?", dupIDs).Delete(&models.PathDownload{}).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return fmt.Errorf("delete dups %v : %w", dupIDs, err)
}
log.Printf("[DB] Doublons supprimés après normalisation: %v", dupIDs)
}
return nil
}