Initial commit
Proof-of-concept implementation. Bugs will occur.
This commit is contained in:
56
internal/domains/cacher/cacher.go
Normal file
56
internal/domains/cacher/cacher.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"source.hodakov.me/hdkv/faketunes/internal/application"
|
||||
"source.hodakov.me/hdkv/faketunes/internal/domains"
|
||||
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/models"
|
||||
)
|
||||
|
||||
var (
|
||||
_ domains.Cacher = new(Cacher)
|
||||
_ domains.Domain = new(Cacher)
|
||||
)
|
||||
|
||||
type Cacher struct {
|
||||
app *application.App
|
||||
|
||||
transcoder domains.Transcoder
|
||||
|
||||
cacheDir string
|
||||
cacheMutex sync.RWMutex
|
||||
currentSize int64
|
||||
maxSize int64
|
||||
items map[string]*models.CacheItem
|
||||
stat map[string]*models.CacherStat
|
||||
}
|
||||
|
||||
func New(app *application.App) *Cacher {
|
||||
return &Cacher{
|
||||
app: app,
|
||||
cacheDir: app.Config().Paths.Destination + "./.cache",
|
||||
maxSize: app.Config().FakeTunes.CacheSize * 1024 * 1024,
|
||||
items: make(map[string]*models.CacheItem, 0),
|
||||
stat: make(map[string]*models.CacherStat, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cacher) ConnectDependencies() error {
|
||||
transcoder, ok := c.app.RetrieveDomain(domains.TranscoderName).(domains.Transcoder)
|
||||
if !ok {
|
||||
return fmt.Errorf(
|
||||
"%w: %w (%s)", ErrCacher, ErrConnectDependencies,
|
||||
"transcoder domain interface conversion failed",
|
||||
)
|
||||
}
|
||||
|
||||
c.transcoder = transcoder
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cacher) Start() error {
|
||||
return nil
|
||||
}
|
||||
37
internal/domains/cacher/cleanup.go
Normal file
37
internal/domains/cacher/cleanup.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *Cacher) cleanup() error {
|
||||
for c.currentSize > c.maxSize && len(c.items) > 0 {
|
||||
var (
|
||||
itemKey string
|
||||
itemSize int64
|
||||
oldestTime time.Time
|
||||
)
|
||||
|
||||
for key, item := range c.items {
|
||||
if itemKey == "" || item.Updated.Before(oldestTime) {
|
||||
itemKey = key
|
||||
oldestTime = item.Updated
|
||||
itemSize = item.Size
|
||||
}
|
||||
}
|
||||
|
||||
if itemKey != "" {
|
||||
err := os.Remove(c.items[itemKey].Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToDeleteCachedFile, err)
|
||||
}
|
||||
|
||||
delete(c.items, itemKey)
|
||||
c.currentSize -= itemSize
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
9
internal/domains/cacher/dto/cache_item.go
Normal file
9
internal/domains/cacher/dto/cache_item.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package dto
|
||||
|
||||
import "time"
|
||||
|
||||
type CacheItem struct {
|
||||
Path string
|
||||
Size int64
|
||||
Updated time.Time
|
||||
}
|
||||
11
internal/domains/cacher/errors.go
Normal file
11
internal/domains/cacher/errors.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package cacher
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrCacher = errors.New("cacher")
|
||||
ErrConnectDependencies = errors.New("failed to connect dependencies")
|
||||
ErrFailedToDeleteCachedFile = errors.New("failed to delete cached file")
|
||||
ErrFailedToGetSourceFile = errors.New("failed to get source file")
|
||||
ErrFailedToTranscodeFile = errors.New("failed to transcode file")
|
||||
)
|
||||
98
internal/domains/cacher/files.go
Normal file
98
internal/domains/cacher/files.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/dto"
|
||||
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/models"
|
||||
)
|
||||
|
||||
// GetFileDTO gets the ALAC file from cache or transcodes one with transcoder if needed.
|
||||
func (c *Cacher) GetFileDTO(sourcePath string) (*dto.CacheItem, error) {
|
||||
item, err := c.getFile(sourcePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToGetSourceFile, err)
|
||||
}
|
||||
|
||||
return models.CacheItemModelToDTO(item), nil
|
||||
}
|
||||
|
||||
func (c *Cacher) getFile(sourcePath string) (*models.CacheItem, error) {
|
||||
sourceFileInfo, err := os.Stat(sourcePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToGetSourceFile, err)
|
||||
}
|
||||
|
||||
keyData := fmt.Sprintf("%s:%d", sourcePath, sourceFileInfo.ModTime().UnixNano())
|
||||
hash := md5.Sum([]byte(keyData))
|
||||
cacheKey := fmt.Sprintf("%x", hash)
|
||||
cacheFilePath := filepath.Join(c.cacheDir, cacheKey+".m4a")
|
||||
|
||||
c.cacheMutex.Lock()
|
||||
defer c.cacheMutex.Unlock()
|
||||
|
||||
// Check if file information exists in cache
|
||||
if item, ok := c.items[cacheKey]; ok {
|
||||
if _, err := os.Stat(item.Path); err != nil {
|
||||
// File exists in cache and on disk
|
||||
item.Updated = time.Now().UTC()
|
||||
|
||||
c.updateCachedStat(sourcePath, item.Size)
|
||||
|
||||
return item, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if file exists on disk but information about it doesn't exist in
|
||||
// the memory (for example, after application restart).
|
||||
if cachedFileInfo, err := os.Stat(cacheFilePath); err == nil {
|
||||
// Verify that the file on disk is newer than the source file and has content.
|
||||
// If that's the case, return the item information and store it in memory.
|
||||
if cachedFileInfo.ModTime().After(sourceFileInfo.ModTime()) &&
|
||||
cachedFileInfo.Size() > 1024 {
|
||||
item := &models.CacheItem{
|
||||
Path: cacheFilePath,
|
||||
Size: cachedFileInfo.Size(),
|
||||
Updated: time.Now().UTC(),
|
||||
}
|
||||
c.items[cacheKey] = item
|
||||
c.currentSize += cachedFileInfo.Size()
|
||||
|
||||
c.updateCachedStat(sourcePath, item.Size)
|
||||
|
||||
return item, nil
|
||||
}
|
||||
}
|
||||
|
||||
// File does not exist on disk, need to transcode.
|
||||
// Register in the queue
|
||||
c.transcoder.QueueChannel() <- struct{}{}
|
||||
defer func() {
|
||||
<-c.transcoder.QueueChannel()
|
||||
}()
|
||||
|
||||
// Convert file
|
||||
size, err := c.transcoder.Convert(sourcePath, cacheFilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToTranscodeFile, err)
|
||||
}
|
||||
|
||||
// Add converted file information to cache
|
||||
item := &models.CacheItem{
|
||||
Path: cacheFilePath,
|
||||
Size: size,
|
||||
Updated: time.Now(),
|
||||
}
|
||||
c.items[cacheKey] = item
|
||||
c.currentSize += size
|
||||
|
||||
c.updateCachedStat(sourcePath, size)
|
||||
// TODO: run cleanup on inotify events.
|
||||
c.cleanup()
|
||||
|
||||
return item, nil
|
||||
}
|
||||
21
internal/domains/cacher/models/cache_item.go
Normal file
21
internal/domains/cacher/models/cache_item.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/dto"
|
||||
)
|
||||
|
||||
type CacheItem struct {
|
||||
Path string
|
||||
Size int64
|
||||
Updated time.Time
|
||||
}
|
||||
|
||||
func CacheItemModelToDTO(item *CacheItem) *dto.CacheItem {
|
||||
return &dto.CacheItem{
|
||||
Path: item.Path,
|
||||
Size: item.Size,
|
||||
Updated: item.Updated,
|
||||
}
|
||||
}
|
||||
9
internal/domains/cacher/models/cacher_stat.go
Normal file
9
internal/domains/cacher/models/cacher_stat.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package models
|
||||
|
||||
import "time"
|
||||
|
||||
// CacherStat is representing information about a single object size in cache.
|
||||
type CacherStat struct {
|
||||
Size int64
|
||||
Created time.Time
|
||||
}
|
||||
64
internal/domains/cacher/stats.go
Normal file
64
internal/domains/cacher/stats.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/models"
|
||||
)
|
||||
|
||||
// getStat returns file size without triggering conversion (for ls/stat)
|
||||
func (c *Cacher) GetStat(sourcePath string) (int64, error) {
|
||||
// First check cache
|
||||
if size, ok := c.getCachedStat(sourcePath); ok {
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// Check if we have a cached converted file
|
||||
info, err := os.Stat(sourcePath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
keyData := fmt.Sprintf("%s:%d", sourcePath, info.ModTime().UnixNano())
|
||||
hash := md5.Sum([]byte(keyData))
|
||||
key := fmt.Sprintf("%x", hash)
|
||||
cachePath := filepath.Join(c.cacheDir, key+".m4a")
|
||||
|
||||
// Check if converted file exists and is valid
|
||||
if cacheInfo, err := os.Stat(cachePath); err == nil {
|
||||
if cacheInfo.ModTime().After(info.ModTime()) && cacheInfo.Size() > 1024 {
|
||||
c.updateCachedStat(sourcePath, cacheInfo.Size())
|
||||
|
||||
return cacheInfo.Size(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return estimated size (FLAC file size as placeholder)
|
||||
return info.Size(), nil
|
||||
}
|
||||
|
||||
// updateCachedStat updates the stat cache
|
||||
func (c *Cacher) updateCachedStat(sourcePath string, size int64) {
|
||||
c.cacheMutex.Lock()
|
||||
defer c.cacheMutex.Unlock()
|
||||
|
||||
c.stat[sourcePath] = &models.CacherStat{
|
||||
Size: size,
|
||||
Created: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// getCachedStat returns cached file stats
|
||||
func (c *Cacher) getCachedStat(sourcePath string) (int64, bool) {
|
||||
c.cacheMutex.RLock()
|
||||
defer c.cacheMutex.RUnlock()
|
||||
|
||||
if stat, ok := c.stat[sourcePath]; ok {
|
||||
return stat.Size, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
Reference in New Issue
Block a user