You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
ntool/nlog/rotatelogger.go

455 lines
9.7 KiB
Go

package nlog
import (
"compress/gzip"
"errors"
"fmt"
"git.noahlan.cn/noahlan/ntool/nfs"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
const (
dateFormat = "2006-01-02"
fileTimeFormat = time.RFC3339
hoursPerDay = 24
bufferSize = 100
defaultDirMode = 0o755
defaultFileMode = 0o600
gzipExt = ".gz"
megaBytes = 1 << 20
)
// ErrLogFileClosed is an error that indicates the log file is already closed.
var ErrLogFileClosed = errors.New("error: log file closed")
type (
// A RotateRule interface is used to define the log rotating rules.
RotateRule interface {
BackupFileName() string
MarkRotated()
OutdatedFiles() []string
ShallRotate(size int64) bool
}
// A RotateLogger is a Logger that can rotate log files with given rules.
RotateLogger struct {
filename string
backup string
fp *os.File
channel chan []byte
done chan struct{}
rule RotateRule
compress bool
// can't use threading.RoutineGroup because of cycle import
waitGroup sync.WaitGroup
closeOnce sync.Once
currentSize int64
}
// A DailyRotateRule is a rule to daily rotate the log files.
DailyRotateRule struct {
rotatedTime string
filename string
delimiter string
days int
gzip bool
}
// SizeLimitRotateRule a rotation rule that make the log file rotated base on size
SizeLimitRotateRule struct {
DailyRotateRule
maxSize int64
maxBackups int
}
)
// DefaultRotateRule is a default log rotating rule, currently DailyRotateRule.
func DefaultRotateRule(filename, delimiter string, days int, gzip bool) RotateRule {
return &DailyRotateRule{
rotatedTime: getNowDate(),
filename: filename,
delimiter: delimiter,
days: days,
gzip: gzip,
}
}
// BackupFileName returns the backup filename on rotating.
func (r *DailyRotateRule) BackupFileName() string {
return fmt.Sprintf("%s%s%s", r.filename, r.delimiter, getNowDate())
}
// MarkRotated marks the rotated time of r to be the current time.
func (r *DailyRotateRule) MarkRotated() {
r.rotatedTime = getNowDate()
}
// OutdatedFiles returns the files that exceeded the keeping days.
func (r *DailyRotateRule) OutdatedFiles() []string {
if r.days <= 0 {
return nil
}
var pattern string
if r.gzip {
pattern = fmt.Sprintf("%s%s*%s", r.filename, r.delimiter, gzipExt)
} else {
pattern = fmt.Sprintf("%s%s*", r.filename, r.delimiter)
}
files, err := filepath.Glob(pattern)
if err != nil {
Errorf("failed to delete outdated log files, error: %s", err)
return nil
}
var buf strings.Builder
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(dateFormat)
buf.WriteString(r.filename)
buf.WriteString(r.delimiter)
buf.WriteString(boundary)
if r.gzip {
buf.WriteString(gzipExt)
}
boundaryFile := buf.String()
var outdates []string
for _, file := range files {
if file < boundaryFile {
outdates = append(outdates, file)
}
}
return outdates
}
// ShallRotate checks if the file should be rotated.
func (r *DailyRotateRule) ShallRotate(_ int64) bool {
return len(r.rotatedTime) > 0 && getNowDate() != r.rotatedTime
}
// NewSizeLimitRotateRule returns the rotation rule with size limit
func NewSizeLimitRotateRule(filename, delimiter string, days, maxSize, maxBackups int, gzip bool) RotateRule {
return &SizeLimitRotateRule{
DailyRotateRule: DailyRotateRule{
rotatedTime: getNowDateInRFC3339Format(),
filename: filename,
delimiter: delimiter,
days: days,
gzip: gzip,
},
maxSize: int64(maxSize) * megaBytes,
maxBackups: maxBackups,
}
}
func (r *SizeLimitRotateRule) BackupFileName() string {
dir := filepath.Dir(r.filename)
prefix, ext := r.parseFilename()
timestamp := getNowDateInRFC3339Format()
return filepath.Join(dir, fmt.Sprintf("%s%s%s%s", prefix, r.delimiter, timestamp, ext))
}
func (r *SizeLimitRotateRule) MarkRotated() {
r.rotatedTime = getNowDateInRFC3339Format()
}
func (r *SizeLimitRotateRule) OutdatedFiles() []string {
dir := filepath.Dir(r.filename)
prefix, ext := r.parseFilename()
var pattern string
if r.gzip {
pattern = fmt.Sprintf("%s%s%s%s*%s%s", dir, string(filepath.Separator),
prefix, r.delimiter, ext, gzipExt)
} else {
pattern = fmt.Sprintf("%s%s%s%s*%s", dir, string(filepath.Separator),
prefix, r.delimiter, ext)
}
files, err := filepath.Glob(pattern)
if err != nil {
Errorf("failed to delete outdated log files, error: %s", err)
return nil
}
sort.Strings(files)
outdated := make(map[string]struct{})
// test if too many backups
if r.maxBackups > 0 && len(files) > r.maxBackups {
for _, f := range files[:len(files)-r.maxBackups] {
outdated[f] = struct{}{}
}
files = files[len(files)-r.maxBackups:]
}
// test if any too old backups
if r.days > 0 {
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(fileTimeFormat)
boundaryFile := filepath.Join(dir, fmt.Sprintf("%s%s%s%s", prefix, r.delimiter, boundary, ext))
if r.gzip {
boundaryFile += gzipExt
}
for _, f := range files {
if f >= boundaryFile {
break
}
outdated[f] = struct{}{}
}
}
var result []string
for k := range outdated {
result = append(result, k)
}
return result
}
func (r *SizeLimitRotateRule) ShallRotate(size int64) bool {
return r.maxSize > 0 && r.maxSize < size
}
func (r *SizeLimitRotateRule) parseFilename() (prefix, ext string) {
logName := filepath.Base(r.filename)
ext = filepath.Ext(r.filename)
prefix = logName[:len(logName)-len(ext)]
return
}
// NewLogger returns a RotateLogger with given filename and rule, etc.
func NewLogger(filename string, rule RotateRule, compress bool) (*RotateLogger, error) {
l := &RotateLogger{
filename: filename,
channel: make(chan []byte, bufferSize),
done: make(chan struct{}),
rule: rule,
compress: compress,
}
if err := l.initialize(); err != nil {
return nil, err
}
l.startWorker()
return l, nil
}
// Close closes l.
func (l *RotateLogger) Close() error {
var err error
l.closeOnce.Do(func() {
close(l.done)
l.waitGroup.Wait()
if err = l.fp.Sync(); err != nil {
return
}
err = l.fp.Close()
})
return err
}
func (l *RotateLogger) Write(data []byte) (int, error) {
select {
case l.channel <- data:
return len(data), nil
case <-l.done:
log.Println(string(data))
return 0, ErrLogFileClosed
}
}
func (l *RotateLogger) getBackupFilename() string {
if len(l.backup) == 0 {
return l.rule.BackupFileName()
}
return l.backup
}
func (l *RotateLogger) initialize() error {
l.backup = l.rule.BackupFileName()
if fileInfo, err := os.Stat(l.filename); err != nil {
basePath := path.Dir(l.filename)
if _, err = os.Stat(basePath); err != nil {
if err = os.MkdirAll(basePath, defaultDirMode); err != nil {
return err
}
}
if l.fp, err = os.Create(l.filename); err != nil {
return err
}
} else {
if l.fp, err = os.OpenFile(l.filename, os.O_APPEND|os.O_WRONLY, defaultFileMode); err != nil {
return err
}
l.currentSize = fileInfo.Size()
}
nfs.CloseOnExec(l.fp)
return nil
}
func (l *RotateLogger) maybeCompressFile(file string) {
if !l.compress {
return
}
defer func() {
if r := recover(); r != nil {
ErrorStack(r)
}
}()
if _, err := os.Stat(file); err != nil {
// file not exists or other error, ignore compression
return
}
compressLogFile(file)
}
func (l *RotateLogger) maybeDeleteOutdatedFiles() {
files := l.rule.OutdatedFiles()
for _, file := range files {
if err := os.Remove(file); err != nil {
Errorf("failed to remove outdated file: %s", file)
}
}
}
func (l *RotateLogger) postRotate(file string) {
go func() {
// we cannot use threading.GoSafe here, because of import cycle.
l.maybeCompressFile(file)
l.maybeDeleteOutdatedFiles()
}()
}
func (l *RotateLogger) rotate() error {
if l.fp != nil {
err := l.fp.Close()
l.fp = nil
if err != nil {
return err
}
}
_, err := os.Stat(l.filename)
if err == nil && len(l.backup) > 0 {
backupFilename := l.getBackupFilename()
err = os.Rename(l.filename, backupFilename)
if err != nil {
return err
}
l.postRotate(backupFilename)
}
l.backup = l.rule.BackupFileName()
if l.fp, err = os.Create(l.filename); err == nil {
nfs.CloseOnExec(l.fp)
}
return err
}
func (l *RotateLogger) startWorker() {
l.waitGroup.Add(1)
go func() {
defer l.waitGroup.Done()
for {
select {
case event := <-l.channel:
l.write(event)
case <-l.done:
return
}
}
}()
}
func (l *RotateLogger) write(v []byte) {
if l.rule.ShallRotate(l.currentSize + int64(len(v))) {
if err := l.rotate(); err != nil {
log.Println(err)
} else {
l.rule.MarkRotated()
l.currentSize = 0
}
}
if l.fp != nil {
l.fp.Write(v)
l.currentSize += int64(len(v))
}
}
func compressLogFile(file string) {
start := time.Now()
Infof("compressing log file: %s", file)
if err := gzipFile(file, fileSys); err != nil {
Errorf("compress error: %s", err)
} else {
Infof("compressed log file: %s, took %s", file, time.Since(start))
}
}
func getNowDate() string {
return time.Now().Format(dateFormat)
}
func getNowDateInRFC3339Format() string {
return time.Now().Format(fileTimeFormat)
}
func gzipFile(file string, fsys fileSystem) error {
in, err := fsys.Open(file)
if err != nil {
return err
}
defer func() {
if e := fsys.Close(in); e != nil {
Errorf("failed to close file: %s, error: %v", file, e)
}
if err == nil {
// only remove the original file when compression is successful
err = fsys.Remove(file)
}
}()
out, err := fsys.Create(fmt.Sprintf("%s%s", file, gzipExt))
if err != nil {
return err
}
defer func() {
e := fsys.Close(out)
if err == nil {
err = e
}
}()
w := gzip.NewWriter(out)
if _, err = fsys.Copy(w, in); err != nil {
// failed to copy, no need to close w
return err
}
return fsys.Remove(file)
}