Documentation
¶
Overview ¶
Package absnfs implements an NFS server adapter for the absfs filesystem interface.
This package allows any filesystem that implements the absfs.FileSystem interface to be exported as an NFSv3 share over a network. It provides a complete NFS server implementation with support for standard file operations, security features, and performance optimizations.
Key Features:
- NFSv3 protocol implementation
- TLS/SSL encryption support for secure connections
- Symlink support (SYMLINK and READLINK operations)
- Rate limiting and DoS protection
- Attribute caching for improved performance
- Batch operation processing
- Worker pool for concurrent request handling
- Comprehensive metrics and monitoring
Basic Usage:
fs, _ := memfs.NewFS()
server, _ := absnfs.New(fs, absnfs.ExportOptions{})
server.Export("/export/test")
Security Features:
- IP-based access control
- Read-only export mode
- User ID mapping (squash options)
- Rate limiting to prevent DoS attacks
- TLS/SSL encryption
For detailed documentation, see the docs/ directory in the repository.
Index ¶
- Constants
- Variables
- func EncodeRPCReply(w io.Writer, reply *RPCReply) error
- func ExtractCertificateIdentity(cert *x509.Certificate) string
- func GetCertificateInfo(cert *x509.Certificate) string
- func NewUint64MinHeap() *uint64MinHeap
- func ParseClientAuthType(s string) (tls.ClientAuthType, error)
- func ParseTLSVersion(s string) (uint16, error)
- func TLSVersionString(version uint16) string
- type AbsfsNFS
- func (n *AbsfsNFS) Close() error
- func (s *AbsfsNFS) Create(dir *NFSNode, name string, attrs *NFSAttrs) (*NFSNode, error)
- func (s *AbsfsNFS) CreateWithContext(ctx context.Context, dir *NFSNode, name string, attrs *NFSAttrs) (*NFSNode, error)
- func (n *AbsfsNFS) ExecuteWithWorker(task func() interface{}) interface{}
- func (s *AbsfsNFS) Export(mountPath string, port int) error
- func (s *AbsfsNFS) GetAttr(node *NFSNode) (*NFSAttrs, error)
- func (n *AbsfsNFS) GetAttrCacheSize() int
- func (n *AbsfsNFS) GetExportOptions() ExportOptions
- func (n *AbsfsNFS) GetMetrics() NFSMetrics
- func (n *AbsfsNFS) IsHealthy() bool
- func (s *AbsfsNFS) Lookup(path string) (*NFSNode, error)
- func (s *AbsfsNFS) LookupWithContext(ctx context.Context, path string) (*NFSNode, error)
- func (s *AbsfsNFS) Read(node *NFSNode, offset int64, count int64) ([]byte, error)
- func (s *AbsfsNFS) ReadDir(dir *NFSNode) ([]*NFSNode, error)
- func (s *AbsfsNFS) ReadDirPlus(dir *NFSNode) ([]*NFSNode, error)
- func (s *AbsfsNFS) ReadDirWithContext(ctx context.Context, dir *NFSNode) ([]*NFSNode, error)
- func (s *AbsfsNFS) ReadWithContext(ctx context.Context, node *NFSNode, offset int64, count int64) ([]byte, error)
- func (s *AbsfsNFS) Readlink(node *NFSNode) (string, error)
- func (n *AbsfsNFS) RecordAttrCacheHit()
- func (n *AbsfsNFS) RecordAttrCacheMiss()
- func (n *AbsfsNFS) RecordDirCacheHit()
- func (n *AbsfsNFS) RecordDirCacheMiss()
- func (n *AbsfsNFS) RecordNegativeCacheHit()
- func (n *AbsfsNFS) RecordNegativeCacheMiss()
- func (n *AbsfsNFS) RecordOperationStart(opType string) func(err error)
- func (n *AbsfsNFS) RecordReadAheadHit()
- func (n *AbsfsNFS) RecordReadAheadMiss()
- func (s *AbsfsNFS) Remove(dir *NFSNode, name string) error
- func (s *AbsfsNFS) RemoveWithContext(ctx context.Context, dir *NFSNode, name string) error
- func (s *AbsfsNFS) Rename(oldDir *NFSNode, oldName string, newDir *NFSNode, newName string) error
- func (s *AbsfsNFS) RenameWithContext(ctx context.Context, oldDir *NFSNode, oldName string, newDir *NFSNode, ...) error
- func (s *AbsfsNFS) SetAttr(node *NFSNode, attrs *NFSAttrs) error
- func (n *AbsfsNFS) SetLogger(logger Logger) error
- func (s *AbsfsNFS) Symlink(dir *NFSNode, name string, target string, attrs *NFSAttrs) (*NFSNode, error)
- func (s *AbsfsNFS) Unexport() error
- func (n *AbsfsNFS) UpdateExportOptions(newOptions ExportOptions) error
- func (s *AbsfsNFS) Write(node *NFSNode, offset int64, data []byte) (int64, error)
- func (s *AbsfsNFS) WriteWithContext(ctx context.Context, node *NFSNode, offset int64, data []byte) (int64, error)
- type AttrCache
- func (c *AttrCache) Clear()
- func (c *AttrCache) ConfigureNegativeCaching(enable bool, ttl time.Duration)
- func (c *AttrCache) Get(path string, server ...*AbsfsNFS) *NFSAttrs
- func (c *AttrCache) Invalidate(path string)
- func (c *AttrCache) InvalidateNegativeInDir(dirPath string)
- func (c *AttrCache) MaxSize() int
- func (c *AttrCache) NegativeStats() int
- func (c *AttrCache) Put(path string, attrs *NFSAttrs)
- func (c *AttrCache) PutNegative(path string)
- func (c *AttrCache) Resize(newSize int)
- func (c *AttrCache) Size() int
- func (c *AttrCache) Stats() (int, int)
- func (c *AttrCache) UpdateTTL(newTTL time.Duration)
- type AuthContext
- type AuthResult
- type AuthSysCredential
- type Batch
- type BatchProcessor
- func (bp *BatchProcessor) AddRequest(req *BatchRequest) (added bool, triggered bool)
- func (bp *BatchProcessor) BatchGetAttr(ctx context.Context, fileHandle uint64) ([]byte, uint32, error)
- func (bp *BatchProcessor) BatchRead(ctx context.Context, fileHandle uint64, offset int64, length int) ([]byte, uint32, error)
- func (bp *BatchProcessor) BatchWrite(ctx context.Context, fileHandle uint64, offset int64, data []byte) (uint32, error)
- func (bp *BatchProcessor) GetStats() (enabled bool, batchesByType map[BatchType]int)
- func (bp *BatchProcessor) Stop()
- type BatchRequest
- type BatchResult
- type BatchType
- type CachedAttrs
- type CachedDirEntry
- type CreateArgs
- type DirCache
- func (c *DirCache) Clear()
- func (c *DirCache) Get(path string) ([]os.FileInfo, bool)
- func (c *DirCache) Invalidate(path string)
- func (c *DirCache) Put(path string, entries []os.FileInfo)
- func (c *DirCache) Resize(newMaxEntries int)
- func (c *DirCache) Size() int
- func (c *DirCache) Stats() (int, int64, int64)
- func (c *DirCache) UpdateTTL(newTimeout time.Duration)
- type DirOpArg
- type Entry
- type ExportOptions
- type FSInfo
- type FSStats
- type FileAttribute
- type FileBuffer
- type FileHandle
- type FileHandleMap
- func (fm *FileHandleMap) Allocate(f absfs.File) uint64
- func (fm *FileHandleMap) Count() int
- func (fm *FileHandleMap) Get(handle uint64) (absfs.File, bool)
- func (fm *FileHandleMap) GetOrError(handle uint64) (absfs.File, error)
- func (fm *FileHandleMap) Release(handle uint64)
- func (fm *FileHandleMap) ReleaseAll()
- type InvalidFileHandleError
- type LogConfig
- type LogField
- type Logger
- type MemoryMonitor
- type MetricsCollector
- func (m *MetricsCollector) GetMetrics() NFSMetrics
- func (m *MetricsCollector) IncrementOperationCount(opType string)
- func (m *MetricsCollector) IsHealthy() bool
- func (m *MetricsCollector) RecordAttrCacheHit()
- func (m *MetricsCollector) RecordAttrCacheMiss()
- func (m *MetricsCollector) RecordConnection()
- func (m *MetricsCollector) RecordConnectionClosed()
- func (m *MetricsCollector) RecordDirCacheHit()
- func (m *MetricsCollector) RecordDirCacheMiss()
- func (m *MetricsCollector) RecordError(errorType string)
- func (m *MetricsCollector) RecordLatency(opType string, duration time.Duration)
- func (m *MetricsCollector) RecordNegativeCacheHit()
- func (m *MetricsCollector) RecordNegativeCacheMiss()
- func (m *MetricsCollector) RecordRateLimitExceeded()
- func (m *MetricsCollector) RecordReadAheadHit()
- func (m *MetricsCollector) RecordReadAheadMiss()
- func (m *MetricsCollector) RecordRejectedConnection()
- func (m *MetricsCollector) RecordTLSClientCert(validated bool)
- func (m *MetricsCollector) RecordTLSHandshake()
- func (m *MetricsCollector) RecordTLSHandshakeFailure()
- func (m *MetricsCollector) RecordTLSSessionReused()
- func (m *MetricsCollector) RecordTLSVersion(version uint16)
- func (m *MetricsCollector) RecordTimeout(opType string)
- type NFSAttrs
- type NFSMetrics
- type NFSNode
- func (n *NFSNode) Chdir() error
- func (n *NFSNode) Chmod(mode os.FileMode) error
- func (n *NFSNode) Chown(uid, gid int) error
- func (n *NFSNode) Chtimes(atime time.Time, mtime time.Time) error
- func (n *NFSNode) Close() error
- func (n *NFSNode) Name() string
- func (n *NFSNode) Read(p []byte) (int, error)
- func (n *NFSNode) ReadAt(p []byte, off int64) (int, error)
- func (n *NFSNode) ReadDir(count int) ([]fs.DirEntry, error)
- func (n *NFSNode) Readdir(count int) ([]os.FileInfo, error)
- func (n *NFSNode) Readdirnames(count int) ([]string, error)
- func (n *NFSNode) Seek(offset int64, whence int) (int64, error)
- func (n *NFSNode) Stat() (os.FileInfo, error)
- func (n *NFSNode) Sync() error
- func (n *NFSNode) Truncate(size int64) error
- func (n *NFSNode) Write(p []byte) (int, error)
- func (n *NFSNode) WriteAt(p []byte, off int64) (int, error)
- func (n *NFSNode) WriteString(s string) (int, error)
- type NFSProcedureHandler
- type NotSupportedError
- type OperationType
- type PerIPLimiter
- type PerOperationLimiter
- type PortMapping
- type Portmapper
- func (pm *Portmapper) GetMappings() []PortMapping
- func (pm *Portmapper) GetPort(prog, vers, prot uint32) uint32
- func (pm *Portmapper) RegisterService(prog, vers, prot, port uint32)
- func (pm *Portmapper) SetDebug(debug bool)
- func (pm *Portmapper) Start() error
- func (pm *Portmapper) StartOnPort(port int) error
- func (pm *Portmapper) Stop() error
- func (pm *Portmapper) UnregisterService(prog, vers, prot uint32)
- type RPCCall
- type RPCCredential
- type RPCError
- type RPCMsgHeader
- type RPCReply
- type RPCVerifier
- type RateLimiter
- func (rl *RateLimiter) AllocateFileHandle(ip string) bool
- func (rl *RateLimiter) AllowOperation(ip string, opType OperationType) bool
- func (rl *RateLimiter) AllowRequest(ip string, connID string) bool
- func (rl *RateLimiter) CleanupConnection(connID string)
- func (rl *RateLimiter) GetStats() map[string]interface{}
- func (rl *RateLimiter) ReleaseFileHandle(ip string)
- type RateLimiterConfig
- type ReadAheadBuffer
- func (b *ReadAheadBuffer) Clear()
- func (b *ReadAheadBuffer) ClearPath(path string)
- func (b *ReadAheadBuffer) Configure(maxFiles int, maxMemory int64)
- func (b *ReadAheadBuffer) Fill(path string, data []byte, offset int64)
- func (b *ReadAheadBuffer) Read(path string, offset int64, count int, server ...*AbsfsNFS) ([]byte, bool)
- func (b *ReadAheadBuffer) Resize(maxFiles int, maxMemory int64)
- func (b *ReadAheadBuffer) Size() int64
- func (b *ReadAheadBuffer) Stats() (int, int64)
- type ReadArgs
- type ReadDirArgs
- type ReadDirRes
- type RecordMarkingConn
- type RecordMarkingReader
- type RecordMarkingWriter
- type RenameArgs
- type Server
- type ServerOptions
- type SlidingWindow
- type SlogLogger
- type SymlinkFileSystem
- type TLSConfig
- type Task
- type TimeoutConfig
- type TokenBucket
- type WorkerPool
- func (p *WorkerPool) Resize(maxWorkers int)
- func (p *WorkerPool) Start()
- func (p *WorkerPool) Stats() (maxWorkers int, activeWorkers int, queuedTasks int)
- func (p *WorkerPool) Stop()
- func (p *WorkerPool) Submit(execute func() interface{}) chan interface{}
- func (p *WorkerPool) SubmitWait(execute func() interface{}) (interface{}, bool)
- type WriteArgs
Examples ¶
Constants ¶
const ( NF3REG = 1 // Regular file NF3DIR = 2 // Directory NF3BLK = 3 // Block device NF3CHR = 4 // Character device NF3LNK = 5 // Symbolic link NF3SOCK = 6 // Socket NF3FIFO = 7 // Named pipe (FIFO) )
NFSv3 file types (ftype3)
const ( NFS_OK = 0 NFSERR_PERM = 1 NFSERR_NOENT = 2 NFSERR_IO = 5 NFSERR_NXIO = 6 NFSERR_ACCES = 13 NFSERR_EXIST = 17 NFSERR_NODEV = 19 NFSERR_NOTDIR = 20 NFSERR_ISDIR = 21 NFSERR_INVAL = 22 NFSERR_FBIG = 27 NFSERR_NOSPC = 28 NFSERR_ROFS = 30 NFSERR_NAMETOOLONG = 63 NFSERR_NOTEMPTY = 66 NFSERR_DQUOT = 69 NFSERR_STALE = 70 NFSERR_WFLUSH = 99 NFSERR_BADHANDLE = 10001 // Invalid file handle NFSERR_NOTSUPP = 10004 // Operation not supported NFSERR_DELAY = 10013 // Server is temporarily busy (rate limit exceeded) // Alias for backward compatibility - use NFSERR_ACCES for NFS3 access denied errors ACCESS_DENIED = NFSERR_ACCES )
NFS status codes as defined in the NFS protocol
const ( PortmapperPort = 111 PortmapperProgram = 100000 PortmapperVersion = 2 // Portmapper procedures PMAPPROC_NULL = 0 PMAPPROC_SET = 1 PMAPPROC_UNSET = 2 PMAPPROC_GETPORT = 3 PMAPPROC_DUMP = 4 PMAPPROC_CALLIT = 5 // Transport protocols IPPROTO_TCP = 6 IPPROTO_UDP = 17 )
Portmapper constants (RFC 1833)
const ( // LastFragmentFlag is set in the fragment header to indicate the last fragment LastFragmentFlag = 0x80000000 // MaxFragmentSize is the maximum size of a single RPC fragment MaxFragmentSize = 0x7FFFFFFF // 2GB - 1, limited by 31-bit length field // DefaultMaxFragmentSize is the default maximum fragment size for writes DefaultMaxFragmentSize = 1 << 20 // 1MB )
Record Marking constants (RFC 1831 Section 10)
const ( RPC_CALL = 0 RPC_REPLY = 1 )
RPC message types
const ( AUTH_NONE = 0 // No authentication AUTH_SYS = 1 // UNIX-style authentication (formerly AUTH_UNIX) AUTH_SHORT = 2 // Short hand UNIX-style AUTH_DH = 3 // Diffie-Hellman authentication )
RPC authentication flavors (RFC 1831)
const ( // MAX_XDR_STRING_LENGTH is the maximum allowed length for XDR strings (8KB) // This prevents memory exhaustion attacks where malicious clients send // extremely large length values MAX_XDR_STRING_LENGTH = 8192 // MAX_RPC_AUTH_LENGTH is the maximum allowed length for RPC authentication // credentials and verifiers (400 bytes as per RFC 1831) MAX_RPC_AUTH_LENGTH = 400 )
Maximum sizes for XDR data structures to prevent DoS attacks
const ( MSG_ACCEPTED = 0 MSG_DENIED = 1 )
RPC reply status (reply_stat in RFC 1831)
const ( SUCCESS = 0 // RPC executed successfully PROG_UNAVAIL = 1 // remote hasn't exported program PROG_MISMATCH = 2 // remote can't support version # PROC_UNAVAIL = 3 // program can't support procedure GARBAGE_ARGS = 4 // procedure can't decode params SYSTEM_ERR = 5 // system error (e.g., memory allocation failure) )
Accept status values (accept_stat in RFC 1831)
const ( RPC_MISMATCH = 0 // RPC version wrong AUTH_ERROR = 1 // remote can't authenticate caller )
Reject status values for MSG_DENIED
const ( MOUNT_PROGRAM = 100005 NFS_PROGRAM = 100003 )
RPC program numbers
const ( MOUNT_V3 = 3 NFS_V3 = 3 )
RPC versions
const ( NFSPROC3_NULL = 0 NFSPROC3_GETATTR = 1 NFSPROC3_SETATTR = 2 NFSPROC3_LOOKUP = 3 NFSPROC3_ACCESS = 4 NFSPROC3_READLINK = 5 NFSPROC3_READ = 6 NFSPROC3_WRITE = 7 NFSPROC3_CREATE = 8 NFSPROC3_MKDIR = 9 NFSPROC3_SYMLINK = 10 NFSPROC3_MKNOD = 11 NFSPROC3_REMOVE = 12 NFSPROC3_RMDIR = 13 NFSPROC3_RENAME = 14 NFSPROC3_LINK = 15 NFSPROC3_READDIR = 16 NFSPROC3_READDIRPLUS = 17 NFSPROC3_FSSTAT = 18 NFSPROC3_FSINFO = 19 NFSPROC3_PATHCONF = 20 NFSPROC3_COMMIT = 21 )
RPC procedures for NFS v3
const Version = "1.0.3"
Version represents the current version of the absnfs package
Variables ¶
var ErrTimeout = errors.New("operation timed out")
ErrTimeout is returned when an operation times out
Functions ¶
func EncodeRPCReply ¶
EncodeRPCReply encodes an RPC reply to a writer RFC 1831 reply format:
XID (4 bytes) msg_type = REPLY (4 bytes) reply_stat (4 bytes) - MSG_ACCEPTED or MSG_DENIED [if MSG_ACCEPTED:] verf (opaque auth) - flavor (4) + length (4) + body accept_stat (4 bytes) - SUCCESS, PROG_UNAVAIL, etc. [procedure-specific results] [if MSG_DENIED:] reject_stat + auth error info
func ExtractCertificateIdentity ¶
func ExtractCertificateIdentity(cert *x509.Certificate) string
ExtractCertificateIdentity extracts user identity from a client certificate It returns the Common Name (CN) from the certificate subject Can be extended to support other fields or custom mappings
func GetCertificateInfo ¶
func GetCertificateInfo(cert *x509.Certificate) string
GetCertificateInfo returns a human-readable string with certificate details
func NewUint64MinHeap ¶
func NewUint64MinHeap() *uint64MinHeap
NewUint64MinHeap creates and initializes a new min-heap
func ParseClientAuthType ¶
func ParseClientAuthType(s string) (tls.ClientAuthType, error)
ParseClientAuthType parses a string into a tls.ClientAuthType
func ParseTLSVersion ¶
ParseTLSVersion parses a string into a TLS version constant
func TLSVersionString ¶
TLSVersionString returns the string representation of a TLS version
Types ¶
type AbsfsNFS ¶
type AbsfsNFS struct {
// contains filtered or unexported fields
}
AbsfsNFS represents an NFS server that exports an absfs filesystem
func New ¶
func New(fs absfs.SymlinkFileSystem, options ExportOptions) (*AbsfsNFS, error)
New creates a new AbsfsNFS server instance
func (*AbsfsNFS) CreateWithContext ¶
func (s *AbsfsNFS) CreateWithContext(ctx context.Context, dir *NFSNode, name string, attrs *NFSAttrs) (*NFSNode, error)
CreateWithContext implements the CREATE operation with timeout support
func (*AbsfsNFS) ExecuteWithWorker ¶
func (n *AbsfsNFS) ExecuteWithWorker(task func() interface{}) interface{}
ExecuteWithWorker runs a task in the worker pool If the worker pool is not available (disabled or full), it executes the task directly
func (*AbsfsNFS) GetAttrCacheSize ¶
GetAttrCacheSize returns the current attribute cache size in a thread-safe manner
func (*AbsfsNFS) GetExportOptions ¶
func (n *AbsfsNFS) GetExportOptions() ExportOptions
GetExportOptions returns a copy of the current export options This is thread-safe and returns a snapshot of the current configuration
func (*AbsfsNFS) GetMetrics ¶
func (n *AbsfsNFS) GetMetrics() NFSMetrics
GetMetrics returns a snapshot of the current NFS server metrics
func (*AbsfsNFS) LookupWithContext ¶
LookupWithContext implements the LOOKUP operation with timeout support
func (*AbsfsNFS) ReadDirPlus ¶
ReadDirPlus implements the READDIRPLUS operation
func (*AbsfsNFS) ReadDirWithContext ¶
ReadDirWithContext implements the READDIR operation with timeout support
func (*AbsfsNFS) ReadWithContext ¶
func (s *AbsfsNFS) ReadWithContext(ctx context.Context, node *NFSNode, offset int64, count int64) ([]byte, error)
ReadWithContext implements the READ operation with timeout support
func (*AbsfsNFS) RecordAttrCacheHit ¶
func (n *AbsfsNFS) RecordAttrCacheHit()
RecordAttrCacheHit records a hit in the attribute cache
func (*AbsfsNFS) RecordAttrCacheMiss ¶
func (n *AbsfsNFS) RecordAttrCacheMiss()
RecordAttrCacheMiss records a miss in the attribute cache
func (*AbsfsNFS) RecordDirCacheHit ¶
func (n *AbsfsNFS) RecordDirCacheHit()
RecordDirCacheHit records a hit in the directory cache
func (*AbsfsNFS) RecordDirCacheMiss ¶
func (n *AbsfsNFS) RecordDirCacheMiss()
RecordDirCacheMiss records a miss in the directory cache
func (*AbsfsNFS) RecordNegativeCacheHit ¶
func (n *AbsfsNFS) RecordNegativeCacheHit()
RecordNegativeCacheHit records a hit in the negative cache
func (*AbsfsNFS) RecordNegativeCacheMiss ¶
func (n *AbsfsNFS) RecordNegativeCacheMiss()
RecordNegativeCacheMiss records a miss in the negative cache
func (*AbsfsNFS) RecordOperationStart ¶
RecordOperationStart records the start of an NFS operation for metrics tracking Returns a function that should be called when the operation completes
func (*AbsfsNFS) RecordReadAheadHit ¶
func (n *AbsfsNFS) RecordReadAheadHit()
RecordReadAheadHit records a hit in the read-ahead buffer
func (*AbsfsNFS) RecordReadAheadMiss ¶
func (n *AbsfsNFS) RecordReadAheadMiss()
RecordReadAheadMiss records a miss in the read-ahead buffer
func (*AbsfsNFS) RemoveWithContext ¶
RemoveWithContext implements the REMOVE operation with timeout support
func (*AbsfsNFS) RenameWithContext ¶
func (s *AbsfsNFS) RenameWithContext(ctx context.Context, oldDir *NFSNode, oldName string, newDir *NFSNode, newName string) error
RenameWithContext implements the RENAME operation with timeout support
func (*AbsfsNFS) SetLogger ¶
SetLogger sets or updates the structured logger for the NFS server This allows changing the logger after the server has been created Pass nil to disable logging (uses no-op logger)
func (*AbsfsNFS) Symlink ¶
func (s *AbsfsNFS) Symlink(dir *NFSNode, name string, target string, attrs *NFSAttrs) (*NFSNode, error)
Symlink implements the SYMLINK operation
func (*AbsfsNFS) UpdateExportOptions ¶
func (n *AbsfsNFS) UpdateExportOptions(newOptions ExportOptions) error
UpdateExportOptions updates the server's export options at runtime Some fields require a server restart and will return an error if changed Returns an error if the update contains invalid values or changes to immutable fields
type AttrCache ¶
type AttrCache struct {
// contains filtered or unexported fields
}
AttrCache provides caching for file attributes and negative lookups
func NewAttrCache ¶
NewAttrCache creates a new attribute cache with the specified TTL and maximum size
func (*AttrCache) ConfigureNegativeCaching ¶
ConfigureNegativeCaching configures negative lookup caching
func (*AttrCache) Get ¶
Get retrieves cached attributes if they exist and are not expired Returns nil if the entry is not found or expired For negative cache entries (file not found), it returns a special marker
func (*AttrCache) Invalidate ¶
Invalidate removes an entry from the cache
func (*AttrCache) InvalidateNegativeInDir ¶
InvalidateNegativeInDir invalidates all negative cache entries in a directory This is called when a file is created in the directory
func (*AttrCache) NegativeStats ¶
NegativeStats returns the count of negative cache entries
func (*AttrCache) PutNegative ¶
PutNegative adds a negative cache entry (file not found)
func (*AttrCache) Resize ¶
Resize changes the maximum size of the attribute cache If the new size is smaller than current entries, LRU entries will be evicted
type AuthContext ¶
type AuthContext struct {
ClientIP string // Client IP address
ClientPort int // Client port number
Credential *RPCCredential // RPC credential
AuthSys *AuthSysCredential // Parsed AUTH_SYS credential (if applicable)
ClientCert *x509.Certificate // Client certificate (if TLS with client auth)
TLSEnabled bool // Whether this connection is using TLS
}
AuthContext contains information about the client making a request
type AuthResult ¶
type AuthResult struct {
Allowed bool // Whether the request is allowed
UID uint32 // Effective UID after squashing
GID uint32 // Effective GID after squashing
Reason string // Reason for denial (if not allowed)
}
AuthResult contains the result of authentication validation
func ValidateAuthentication ¶
func ValidateAuthentication(ctx *AuthContext, options ExportOptions) *AuthResult
ValidateAuthentication validates a client request against export options
type AuthSysCredential ¶
type AuthSysCredential struct {
Stamp uint32 // Arbitrary ID which the client may generate
MachineName string // Name of the client machine (or empty string)
UID uint32 // Caller's effective user ID
GID uint32 // Caller's effective group ID
AuxGIDs []uint32 // Auxiliary group IDs
}
AuthSysCredential represents AUTH_SYS credentials (RFC 1831)
func ParseAuthSysCredential ¶
func ParseAuthSysCredential(body []byte) (*AuthSysCredential, error)
ParseAuthSysCredential parses AUTH_SYS credential data from raw bytes
type Batch ¶
type Batch struct {
Type BatchType // Type of operations in this batch
Requests []*BatchRequest // Requests in this batch
MaxSize int // Maximum number of requests in this batch
ReadyTime time.Time // Time when this batch should be processed
// contains filtered or unexported fields
}
Batch represents a group of similar operations that can be processed together
type BatchProcessor ¶
type BatchProcessor struct {
// contains filtered or unexported fields
}
BatchProcessor manages the batching of operations
func NewBatchProcessor ¶
func NewBatchProcessor(nfs *AbsfsNFS, maxSize int) *BatchProcessor
NewBatchProcessor creates a new batch processor
func (*BatchProcessor) AddRequest ¶
func (bp *BatchProcessor) AddRequest(req *BatchRequest) (added bool, triggered bool)
AddRequest adds a request to a batch Returns (added, triggered):
- added: true if the request was added to a batch, false if caller should handle individually
- triggered: true if batch processing was triggered (only meaningful when added=true)
func (*BatchProcessor) BatchGetAttr ¶
func (bp *BatchProcessor) BatchGetAttr(ctx context.Context, fileHandle uint64) ([]byte, uint32, error)
BatchGetAttr submits a getattr request to be batched Returns the attributes, NFS status code, and error (error is last per Go convention)
func (*BatchProcessor) BatchRead ¶
func (bp *BatchProcessor) BatchRead(ctx context.Context, fileHandle uint64, offset int64, length int) ([]byte, uint32, error)
BatchRead submits a read request to be batched Returns the read data, NFS status code, and error (error is last per Go convention)
func (*BatchProcessor) BatchWrite ¶
func (bp *BatchProcessor) BatchWrite(ctx context.Context, fileHandle uint64, offset int64, data []byte) (uint32, error)
BatchWrite submits a write request to be batched Returns NFS status code and error (error is last per Go convention)
type BatchRequest ¶
type BatchRequest struct {
Type BatchType // Type of operation
FileHandle uint64 // File handle for the operation
Offset int64 // Offset for read/write operations
Length int // Length for read/write operations
Data []byte // Data for write operations
Time time.Time // Time the request was added to the batch
ResultChan chan *BatchResult // Channel to send results back to caller
Context context.Context // Context for cancellation
}
BatchRequest represents a single operation in a batch
type BatchResult ¶
type BatchResult struct {
Data []byte // Data for read operations
Error error // Error if any occurred
Status uint32 // NFS status code
}
BatchResult represents the result of a batched operation
type CachedAttrs ¶
type CachedAttrs struct {
// contains filtered or unexported fields
}
CachedAttrs represents cached file attributes with expiration When attrs is nil, this represents a negative cache entry (file not found)
type CachedDirEntry ¶
type CachedDirEntry struct {
// contains filtered or unexported fields
}
CachedDirEntry represents cached directory entries with expiration
type CreateArgs ¶
CreateArgs represents arguments for file creation
type DirCache ¶
type DirCache struct {
// contains filtered or unexported fields
}
DirCache provides caching for directory entries
func NewDirCache ¶
NewDirCache creates a new directory cache with the specified timeout and limits
func (*DirCache) Invalidate ¶
Invalidate removes an entry from the cache
func (*DirCache) Resize ¶
Resize changes the maximum number of entries in the directory cache If the new size is smaller than current entries, LRU entries will be evicted
type DirOpArg ¶
type DirOpArg struct {
Handle FileHandle
Name string
}
DirOpArg represents arguments for directory operations
type ExportOptions ¶
type ExportOptions struct {
ReadOnly bool // Export as read-only
Secure bool // Require secure ports (<1024)
AllowedIPs []string // List of allowed client IPs/subnets
Squash string // User mapping (root/all/none)
Async bool // Allow async writes
MaxFileSize int64 // Maximum file size
// TransferSize controls the maximum size in bytes of read/write transfers
// Larger values may improve performance but require more memory
// Default: 65536 (64KB)
TransferSize int
// EnableReadAhead enables read-ahead buffering for improved sequential read performance
// When a client reads a file sequentially, the server prefetches additional data
// Default: true
EnableReadAhead bool
// ReadAheadSize controls the size in bytes of the read-ahead buffer
// Only applicable when EnableReadAhead is true
// Default: 262144 (256KB)
ReadAheadSize int
// ReadAheadMaxFiles controls the maximum number of files that can have active read-ahead buffers
// Helps limit memory usage by read-ahead buffering
// Default: 100 files
ReadAheadMaxFiles int
// ReadAheadMaxMemory controls the maximum amount of memory in bytes that can be used for read-ahead buffers
// Once this limit is reached, least recently used buffers will be evicted
// Default: 104857600 (100MB)
ReadAheadMaxMemory int64
// AttrCacheTimeout controls how long file attributes are cached
// Longer timeouts improve performance but may cause clients to see stale data
// Default: 5 * time.Second
AttrCacheTimeout time.Duration
// AttrCacheSize controls the maximum number of entries in the attribute cache
// Larger values improve performance but consume more memory
// Default: 10000 entries
AttrCacheSize int
// CacheNegativeLookups enables caching of failed lookups (file not found)
// This can significantly reduce filesystem load for repeated lookups of non-existent files
// Negative cache entries use a shorter TTL than positive entries
// Default: false (disabled)
CacheNegativeLookups bool
// NegativeCacheTimeout controls how long negative cache entries are kept
// Shorter timeouts reduce the chance of stale negative cache entries
// Only applicable when CacheNegativeLookups is true
// Default: 5 * time.Second
NegativeCacheTimeout time.Duration
// EnableDirCache enables caching of directory entries for improved performance
// When enabled, directory listings are cached to reduce filesystem calls
// Default: false (disabled)
EnableDirCache bool
// DirCacheTimeout controls how long directory entries are cached
// Longer timeouts improve performance but may cause clients to see stale directory listings
// Only applicable when EnableDirCache is true
// Default: 10 * time.Second
DirCacheTimeout time.Duration
// DirCacheMaxEntries controls the maximum number of directories that can be cached
// Helps limit memory usage by directory entry caching
// Only applicable when EnableDirCache is true
// Default: 1000 directories
DirCacheMaxEntries int
// DirCacheMaxDirSize controls the maximum number of entries in a single directory that will be cached
// Directories with more entries than this will not be cached to prevent memory issues
// Only applicable when EnableDirCache is true
// Default: 10000 entries per directory
DirCacheMaxDirSize int
// AdaptToMemoryPressure enables automatic cache reduction when system memory is under pressure
// When enabled, the server will periodically check system memory usage and reduce cache sizes
// when memory usage exceeds MemoryHighWatermark, until usage falls below MemoryLowWatermark
// Default: false (disabled)
AdaptToMemoryPressure bool
// MemoryHighWatermark defines the threshold (as a fraction of total memory) at which
// memory pressure reduction actions will be triggered
// Only applicable when AdaptToMemoryPressure is true
// Valid range: 0.0 to 1.0 (0% to 100% of total memory)
// Default: 0.8 (80% of total memory)
MemoryHighWatermark float64
// MemoryLowWatermark defines the target memory usage (as a fraction of total memory)
// that the server will try to achieve when reducing cache sizes in response to memory pressure
// Only applicable when AdaptToMemoryPressure is true
// Valid range: 0.0 to MemoryHighWatermark
// Default: 0.6 (60% of total memory)
MemoryLowWatermark float64
// MemoryCheckInterval defines how frequently memory usage is checked for pressure detection
// Only applicable when AdaptToMemoryPressure is true
// Default: 30 * time.Second
MemoryCheckInterval time.Duration
// MaxWorkers controls the maximum number of goroutines used for handling concurrent operations
// More workers can improve performance for concurrent workloads but consume more CPU resources
// Default: runtime.NumCPU() * 4 (number of logical CPUs multiplied by 4)
MaxWorkers int
// BatchOperations enables grouping of similar operations for improved performance
// When enabled, the server will attempt to process multiple read/write operations
// together to reduce context switching and improve throughput
// Default: true
BatchOperations bool
// MaxBatchSize controls the maximum number of operations that can be included in a single batch
// Larger batches can improve performance but may increase latency for individual operations
// Only applicable when BatchOperations is true
// Default: 10 operations
MaxBatchSize int
// MaxConnections limits the number of simultaneous client connections
// Setting to 0 means unlimited connections (limited only by system resources)
// Default: 100
MaxConnections int
// IdleTimeout defines how long to keep inactive connections before closing them
// This helps reclaim resources from abandoned connections
// Default: 5 * time.Minute
IdleTimeout time.Duration
// TCPKeepAlive enables TCP keep-alive probes on NFS connections
// Keep-alive helps detect dead connections when clients disconnect improperly
// Default: true
TCPKeepAlive bool
// TCPNoDelay disables Nagle's algorithm on TCP connections to reduce latency
// This may improve performance for small requests at the cost of increased bandwidth usage
// Default: true
TCPNoDelay bool
// SendBufferSize controls the size of the TCP send buffer in bytes
// Larger buffers can improve throughput but consume more memory
// Default: 262144 (256KB)
SendBufferSize int
// ReceiveBufferSize controls the size of the TCP receive buffer in bytes
// Larger buffers can improve throughput but consume more memory
// Default: 262144 (256KB)
ReceiveBufferSize int
// EnableRateLimiting enables rate limiting and DoS protection
// When enabled, the server will limit requests per IP, per connection, and per operation type
// Default: true
EnableRateLimiting bool
// RateLimitConfig provides detailed rate limiting configuration
// Only applicable when EnableRateLimiting is true
// If nil, default configuration will be used
RateLimitConfig *RateLimiterConfig
// TLS holds the TLS/SSL configuration for encrypted connections
// When TLS.Enabled is true, all NFS connections will be encrypted using TLS
// Provides confidentiality, integrity, and optional mutual authentication
// If nil, TLS is disabled and connections are unencrypted (default NFSv3 behavior)
TLS *TLSConfig
// Log holds the logging configuration for the NFS server
// When nil, logging is disabled (no-op logger is used)
// When provided, enables structured logging with configurable level, format, and output
Log *LogConfig
// Timeouts controls operation-specific timeout durations
// When nil, default timeouts are used for all operations
// Allows fine-grained control over how long each operation type can take
Timeouts *TimeoutConfig
// contains filtered or unexported fields
}
ExportOptions defines the configuration for an NFS export
type FSInfo ¶
type FSInfo struct {
MaxFileSize uint64
SpaceAvail uint64
SpaceTotal uint64
SpaceFree uint64
FileSlotsFree uint32
FileSlotsTotal uint32
Properties uint32
}
FSInfo represents filesystem information
type FSStats ¶
type FSStats struct {
TotalBytes uint64
FreeBytes uint64
AvailBytes uint64
TotalFiles uint64
FreeFiles uint64
AvailFiles uint64
InvarSec uint32
}
FSStats represents filesystem statistics
type FileAttribute ¶
type FileAttribute struct {
Type uint32
Mode uint32
Nlink uint32
Uid uint32
Gid uint32
Size uint64
Used uint64
SpecData [2]uint32
Fsid uint64
Fileid uint64
Atime, Mtime, Ctime uint32
}
FileAttribute represents NFS file attributes
type FileBuffer ¶
type FileBuffer struct {
// contains filtered or unexported fields
}
FileBuffer represents a read-ahead buffer for a specific file
type FileHandleMap ¶
FileHandleMap manages the mapping between NFS file handles and absfs files
func (*FileHandleMap) Allocate ¶
func (fm *FileHandleMap) Allocate(f absfs.File) uint64
Allocate creates a new file handle for the given absfs.File Optimized to O(log n) or O(1) using a free list instead of O(n) linear search
Example (Performance) ¶
Example demonstrates the performance characteristics of the optimized allocation
fs, _ := memfs.NewFS()
f, _ := fs.Create("/test.txt")
f.Close()
fm := &FileHandleMap{
handles: make(map[uint64]absfs.File),
nextHandle: 1,
freeHandles: NewUint64MinHeap(),
}
// Allocate handles - O(1) time
handles := make([]uint64, 5)
for i := 0; i < 5; i++ {
f, _ := fs.OpenFile("/test.txt", 0, 0)
handles[i] = fm.Allocate(f)
fmt.Printf("Allocated handle: %d\n", handles[i])
}
// Release some handles - O(log n) time
fm.Release(handles[2]) // Release handle 3
fmt.Println("Released handle 3")
// Allocate again - reuses handle 3 in O(log n) time
f, _ = fs.OpenFile("/test.txt", 0, 0)
reused := fm.Allocate(f)
fmt.Printf("Reused handle: %d\n", reused)
Output: Allocated handle: 1 Allocated handle: 2 Allocated handle: 3 Allocated handle: 4 Allocated handle: 5 Released handle 3 Reused handle: 3
func (*FileHandleMap) Count ¶
func (fm *FileHandleMap) Count() int
Count returns the number of active file handles
func (*FileHandleMap) Get ¶
func (fm *FileHandleMap) Get(handle uint64) (absfs.File, bool)
Get retrieves the absfs.File associated with the given handle
func (*FileHandleMap) GetOrError ¶
func (fm *FileHandleMap) GetOrError(handle uint64) (absfs.File, error)
GetOrError retrieves the absfs.File associated with the given handle Returns an InvalidFileHandleError if the handle is not found
func (*FileHandleMap) Release ¶
func (fm *FileHandleMap) Release(handle uint64)
Release removes the file handle mapping and closes the associated file
func (*FileHandleMap) ReleaseAll ¶
func (fm *FileHandleMap) ReleaseAll()
ReleaseAll closes and removes all file handles
type InvalidFileHandleError ¶
InvalidFileHandleError represents an error when a file handle is invalid
func (*InvalidFileHandleError) Error ¶
func (e *InvalidFileHandleError) Error() string
Error implements the error interface for InvalidFileHandleError
type LogConfig ¶
type LogConfig struct {
// Level sets the minimum log level to output
// Valid values: "debug", "info", "warn", "error"
// Default: "info"
Level string
// Format sets the log output format
// Valid values: "json", "text"
// Default: "text"
Format string
// Output sets the log destination
// Valid values: "stdout", "stderr", or a file path
// Default: "stderr"
Output string
// LogClientIPs enables logging of client IP addresses
// When true, client IPs are included in connection and authentication logs
// Default: false (for privacy)
LogClientIPs bool
// LogOperations enables detailed logging of NFS operations
// When true, logs each NFS operation (LOOKUP, READ, WRITE, etc.) with timing
// Default: false (reduces log volume)
LogOperations bool
// LogFileAccess enables logging of file access patterns
// When true, logs file opens, closes, and access patterns
// Default: false (reduces log volume)
LogFileAccess bool
// MaxSize defines the maximum size of log file in megabytes before rotation
// NOTE: File rotation is not yet implemented. This field is reserved for future enhancement.
// Only applicable when Output is a file path
// Default: 100 MB
MaxSize int
// MaxBackups defines the maximum number of old log files to retain
// NOTE: File rotation is not yet implemented. This field is reserved for future enhancement.
// Only applicable when Output is a file path
// Default: 3
MaxBackups int
// MaxAge defines the maximum number of days to retain old log files
// NOTE: File rotation is not yet implemented. This field is reserved for future enhancement.
// Only applicable when Output is a file path
// Default: 28 days
MaxAge int
// Compress enables gzip compression of rotated log files
// NOTE: File rotation is not yet implemented. This field is reserved for future enhancement.
// Only applicable when Output is a file path
// Default: false
Compress bool
}
LogConfig defines the logging configuration for the NFS server
type LogField ¶
type LogField struct {
Key string
Value interface{}
}
LogField represents a structured logging field with a key-value pair
type Logger ¶
type Logger interface {
// Debug logs a debug-level message with optional structured fields
Debug(msg string, fields ...LogField)
// Info logs an info-level message with optional structured fields
Info(msg string, fields ...LogField)
// Warn logs a warning-level message with optional structured fields
Warn(msg string, fields ...LogField)
// Error logs an error-level message with optional structured fields
Error(msg string, fields ...LogField)
}
Logger defines the interface for logging in ABSNFS. Applications can provide their own implementation to integrate with existing logging systems.
Thread Safety: All Logger implementations must be safe for concurrent use by multiple goroutines. The provided SlogLogger implementation uses mutex protection to ensure thread safety.
Performance Notes: - Logging operations should be non-blocking to avoid impacting NFS performance - Consider using buffered I/O for file-based loggers - Structured fields (LogField) enable efficient filtering and analysis
func NewNoopLogger ¶
func NewNoopLogger() Logger
NewNoopLogger creates a logger that discards all log messages
type MemoryMonitor ¶
type MemoryMonitor struct {
// contains filtered or unexported fields
}
MemoryMonitor tracks system memory usage and manages memory pressure responses
func NewMemoryMonitor ¶
func NewMemoryMonitor(nfs *AbsfsNFS) *MemoryMonitor
NewMemoryMonitor creates a new memory monitor for the given AbsfsNFS instance
func (*MemoryMonitor) GetMemoryStats ¶
func (m *MemoryMonitor) GetMemoryStats() memoryStats
GetMemoryStats returns a copy of the current memory statistics
func (*MemoryMonitor) IsActive ¶
func (m *MemoryMonitor) IsActive() bool
IsActive returns true if monitoring is active
func (*MemoryMonitor) Start ¶
func (m *MemoryMonitor) Start(interval time.Duration)
Start begins monitoring system memory usage at the specified interval
type MetricsCollector ¶
type MetricsCollector struct {
// contains filtered or unexported fields
}
MetricsCollector handles collecting and aggregating metrics
func NewMetricsCollector ¶
func NewMetricsCollector(server *AbsfsNFS) *MetricsCollector
NewMetricsCollector creates a new metrics collector
func (*MetricsCollector) GetMetrics ¶
func (m *MetricsCollector) GetMetrics() NFSMetrics
GetMetrics returns a snapshot of the current metrics
func (*MetricsCollector) IncrementOperationCount ¶
func (m *MetricsCollector) IncrementOperationCount(opType string)
IncrementOperationCount increments the count for the specified operation type
func (*MetricsCollector) IsHealthy ¶
func (m *MetricsCollector) IsHealthy() bool
IsHealthy checks if the server is in a healthy state
func (*MetricsCollector) RecordAttrCacheHit ¶
func (m *MetricsCollector) RecordAttrCacheHit()
RecordAttrCacheHit records a hit in the attribute cache
func (*MetricsCollector) RecordAttrCacheMiss ¶
func (m *MetricsCollector) RecordAttrCacheMiss()
RecordAttrCacheMiss records a miss in the attribute cache
func (*MetricsCollector) RecordConnection ¶
func (m *MetricsCollector) RecordConnection()
RecordConnection records a new connection
func (*MetricsCollector) RecordConnectionClosed ¶
func (m *MetricsCollector) RecordConnectionClosed()
RecordConnectionClosed records a closed connection
func (*MetricsCollector) RecordDirCacheHit ¶
func (m *MetricsCollector) RecordDirCacheHit()
RecordDirCacheHit records a hit in the directory cache
func (*MetricsCollector) RecordDirCacheMiss ¶
func (m *MetricsCollector) RecordDirCacheMiss()
RecordDirCacheMiss records a miss in the directory cache
func (*MetricsCollector) RecordError ¶
func (m *MetricsCollector) RecordError(errorType string)
RecordError records an error
func (*MetricsCollector) RecordLatency ¶
func (m *MetricsCollector) RecordLatency(opType string, duration time.Duration)
RecordLatency records the latency for an operation
func (*MetricsCollector) RecordNegativeCacheHit ¶
func (m *MetricsCollector) RecordNegativeCacheHit()
RecordNegativeCacheHit records a hit in the negative cache
func (*MetricsCollector) RecordNegativeCacheMiss ¶
func (m *MetricsCollector) RecordNegativeCacheMiss()
RecordNegativeCacheMiss records a miss in the negative cache
func (*MetricsCollector) RecordRateLimitExceeded ¶
func (m *MetricsCollector) RecordRateLimitExceeded()
RecordRateLimitExceeded records a rate limit rejection
func (*MetricsCollector) RecordReadAheadHit ¶
func (m *MetricsCollector) RecordReadAheadHit()
RecordReadAheadHit records a hit in the read-ahead buffer
func (*MetricsCollector) RecordReadAheadMiss ¶
func (m *MetricsCollector) RecordReadAheadMiss()
RecordReadAheadMiss records a miss in the read-ahead buffer
func (*MetricsCollector) RecordRejectedConnection ¶
func (m *MetricsCollector) RecordRejectedConnection()
RecordRejectedConnection records a rejected connection
func (*MetricsCollector) RecordTLSClientCert ¶
func (m *MetricsCollector) RecordTLSClientCert(validated bool)
RecordTLSClientCert records a connection with a client certificate
func (*MetricsCollector) RecordTLSHandshake ¶
func (m *MetricsCollector) RecordTLSHandshake()
RecordTLSHandshake records a successful TLS handshake
func (*MetricsCollector) RecordTLSHandshakeFailure ¶
func (m *MetricsCollector) RecordTLSHandshakeFailure()
RecordTLSHandshakeFailure records a failed TLS handshake
func (*MetricsCollector) RecordTLSSessionReused ¶
func (m *MetricsCollector) RecordTLSSessionReused()
RecordTLSSessionReused records a TLS session resumption
func (*MetricsCollector) RecordTLSVersion ¶
func (m *MetricsCollector) RecordTLSVersion(version uint16)
RecordTLSVersion records the TLS version used for a connection
func (*MetricsCollector) RecordTimeout ¶
func (m *MetricsCollector) RecordTimeout(opType string)
RecordTimeout records a timeout for a specific operation type
type NFSAttrs ¶
type NFSAttrs struct {
Mode os.FileMode
Size int64
FileId uint64 // Unique file identifier (inode number)
Uid uint32
Gid uint32
// contains filtered or unexported fields
}
NFSAttrs holds the NFS attributes for a file or directory with caching
func NewNFSAttrs ¶
NewNFSAttrs creates a new NFSAttrs with the specified values
func (*NFSAttrs) Invalidate ¶
func (a *NFSAttrs) Invalidate()
Invalidate marks the attributes as invalid
type NFSMetrics ¶
type NFSMetrics struct {
// Operation counts
TotalOperations uint64
ReadOperations uint64
WriteOperations uint64
LookupOperations uint64
GetAttrOperations uint64
CreateOperations uint64
RemoveOperations uint64
RenameOperations uint64
MkdirOperations uint64
RmdirOperations uint64
ReaddirOperations uint64
AccessOperations uint64
// Latency metrics
AvgReadLatency time.Duration
AvgWriteLatency time.Duration
MaxReadLatency time.Duration
MaxWriteLatency time.Duration
P95ReadLatency time.Duration
P95WriteLatency time.Duration
// Cache metrics
CacheHitRate float64
ReadAheadHitRate float64
AttrCacheSize int
AttrCacheCapacity int
ReadAheadBufferSize int64
DirCacheHitRate float64
NegativeCacheSize int // Number of negative cache entries
NegativeCacheHitRate float64 // Hit rate for negative cache lookups
// Connection metrics
ActiveConnections int
TotalConnections uint64
RejectedConnections uint64
// TLS metrics
TLSHandshakes uint64 // Successful TLS handshakes
TLSHandshakeFailures uint64 // Failed TLS handshakes
TLSClientCertProvided uint64 // Connections with client certificates
TLSClientCertValidated uint64 // Successfully validated client certificates
TLSClientCertRejected uint64 // Rejected client certificates
TLSSessionReused uint64 // TLS session resumptions
TLSVersion12 uint64 // Connections using TLS 1.2
TLSVersion13 uint64 // Connections using TLS 1.3
// Error metrics
ErrorCount uint64
AuthFailures uint64
AccessViolations uint64
StaleHandles uint64
ResourceErrors uint64
RateLimitExceeded uint64
// Timeout metrics
ReadTimeouts uint64
WriteTimeouts uint64
LookupTimeouts uint64
ReaddirTimeouts uint64
CreateTimeouts uint64
RemoveTimeouts uint64
RenameTimeouts uint64
HandleTimeouts uint64
TotalTimeouts uint64
// Time-based metrics
StartTime time.Time
UptimeSeconds int64
}
NFSMetrics holds all metrics for the NFS server
type NFSNode ¶
type NFSNode struct {
absfs.SymlinkFileSystem
// contains filtered or unexported fields
}
NFSNode represents a file or directory in the NFS tree
func (*NFSNode) Readdirnames ¶
Readdirnames implements absfs.File
type NFSProcedureHandler ¶
type NFSProcedureHandler struct {
// contains filtered or unexported fields
}
NFSProcedureHandler handles NFS procedure calls
func (*NFSProcedureHandler) HandleCall ¶
func (h *NFSProcedureHandler) HandleCall(call *RPCCall, body io.Reader, authCtx *AuthContext) (*RPCReply, error)
HandleCall processes an NFS RPC call and returns a reply
type NotSupportedError ¶
NotSupportedError represents an error when an operation is not supported
func (*NotSupportedError) Error ¶
func (e *NotSupportedError) Error() string
Error implements the error interface for NotSupportedError
type OperationType ¶
type OperationType string
OperationType represents different NFS operation types for rate limiting
const ( OpTypeReadLarge OperationType = "read_large" // READ >64KB OpTypeWriteLarge OperationType = "write_large" // WRITE >64KB OpTypeReaddir OperationType = "readdir" // READDIR OpTypeMount OperationType = "mount" // MOUNT operations )
type PerIPLimiter ¶
type PerIPLimiter struct {
// contains filtered or unexported fields
}
PerIPLimiter manages rate limiters per IP address
func NewPerIPLimiter ¶
func NewPerIPLimiter(rate float64, burst int, cleanupInterval time.Duration) *PerIPLimiter
NewPerIPLimiter creates a new per-IP rate limiter
func (*PerIPLimiter) Allow ¶
func (pl *PerIPLimiter) Allow(ip string) bool
Allow checks if a request from the given IP can proceed
func (*PerIPLimiter) GetStats ¶
func (pl *PerIPLimiter) GetStats() map[string]float64
GetStats returns statistics about the limiter
type PerOperationLimiter ¶
type PerOperationLimiter struct {
// contains filtered or unexported fields
}
PerOperationLimiter manages rate limiters per operation type per IP
func NewPerOperationLimiter ¶
func NewPerOperationLimiter(config RateLimiterConfig) *PerOperationLimiter
NewPerOperationLimiter creates a new per-operation rate limiter
func (*PerOperationLimiter) Allow ¶
func (pol *PerOperationLimiter) Allow(ip string, opType OperationType) bool
Allow checks if an operation from the given IP can proceed
type PortMapping ¶ added in v1.0.0
type PortMapping struct {
Program uint32
Version uint32
Protocol uint32 // IPPROTO_TCP or IPPROTO_UDP
Port uint32
}
PortMapping represents a registered RPC service
type Portmapper ¶ added in v1.0.0
type Portmapper struct {
// contains filtered or unexported fields
}
Portmapper implements the RFC 1833 portmapper service
func NewPortmapper ¶ added in v1.0.0
func NewPortmapper() *Portmapper
NewPortmapper creates a new portmapper instance
func (*Portmapper) GetMappings ¶ added in v1.0.0
func (pm *Portmapper) GetMappings() []PortMapping
GetMappings returns a copy of all registered mappings
func (*Portmapper) GetPort ¶ added in v1.0.0
func (pm *Portmapper) GetPort(prog, vers, prot uint32) uint32
GetPort returns the port for a registered service (0 if not found)
func (*Portmapper) RegisterService ¶ added in v1.0.0
func (pm *Portmapper) RegisterService(prog, vers, prot, port uint32)
RegisterService registers an RPC service with the portmapper
func (*Portmapper) SetDebug ¶ added in v1.0.0
func (pm *Portmapper) SetDebug(debug bool)
SetDebug enables or disables debug logging
func (*Portmapper) Start ¶ added in v1.0.0
func (pm *Portmapper) Start() error
Start starts the portmapper service on port 111
func (*Portmapper) StartOnPort ¶ added in v1.0.0
func (pm *Portmapper) StartOnPort(port int) error
StartOnPort starts the portmapper service on a custom port. This is useful for testing without root privileges.
func (*Portmapper) Stop ¶ added in v1.0.0
func (pm *Portmapper) Stop() error
Stop stops the portmapper service
func (*Portmapper) UnregisterService ¶ added in v1.0.0
func (pm *Portmapper) UnregisterService(prog, vers, prot uint32)
UnregisterService unregisters an RPC service
type RPCCall ¶
type RPCCall struct {
Header RPCMsgHeader
Credential RPCCredential
Verifier RPCVerifier
}
RPCCall represents an incoming RPC call
type RPCCredential ¶
RPCCredential represents RPC authentication credentials
type RPCMsgHeader ¶
type RPCMsgHeader struct {
Xid uint32
MsgType uint32
RPCVersion uint32
Program uint32
Version uint32
Procedure uint32
}
RPC message header
type RPCReply ¶
type RPCReply struct {
Header RPCMsgHeader
Status uint32 // reply_stat: MSG_ACCEPTED or MSG_DENIED
AcceptStatus uint32 // accept_stat: SUCCESS, PROG_UNAVAIL, etc. (only when Status == MSG_ACCEPTED)
Verifier RPCVerifier
Data interface{}
}
RPCReply represents an RPC reply message
type RPCVerifier ¶
RPCVerifier represents RPC authentication verifier
type RateLimiter ¶
type RateLimiter struct {
// contains filtered or unexported fields
}
RateLimiter manages all rate limiting for the NFS server
func NewRateLimiter ¶
func NewRateLimiter(config RateLimiterConfig) *RateLimiter
NewRateLimiter creates a new rate limiter with the given configuration
func (*RateLimiter) AllocateFileHandle ¶
func (rl *RateLimiter) AllocateFileHandle(ip string) bool
AllocateFileHandle attempts to allocate a file handle for an IP
func (*RateLimiter) AllowOperation ¶
func (rl *RateLimiter) AllowOperation(ip string, opType OperationType) bool
AllowOperation checks if a specific operation type should be allowed
func (*RateLimiter) AllowRequest ¶
func (rl *RateLimiter) AllowRequest(ip string, connID string) bool
AllowRequest checks if a request should be allowed
func (*RateLimiter) CleanupConnection ¶
func (rl *RateLimiter) CleanupConnection(connID string)
CleanupConnection removes rate limiter for a connection
func (*RateLimiter) GetStats ¶
func (rl *RateLimiter) GetStats() map[string]interface{}
GetStats returns rate limiter statistics
func (*RateLimiter) ReleaseFileHandle ¶
func (rl *RateLimiter) ReleaseFileHandle(ip string)
ReleaseFileHandle releases a file handle for an IP
type RateLimiterConfig ¶
type RateLimiterConfig struct {
// Global limits
GlobalRequestsPerSecond int // Maximum requests per second across all clients
// Per-IP limits
PerIPRequestsPerSecond int // Maximum requests per second per IP
PerIPBurstSize int // Burst allowance per IP
// Per-connection limits
PerConnectionRequestsPerSecond int // Maximum requests per second per connection
PerConnectionBurstSize int // Burst allowance per connection
// Per-operation type limits
ReadLargeOpsPerSecond int // Large reads (>64KB) per second per IP
WriteLargeOpsPerSecond int // Large writes (>64KB) per second per IP
ReaddirOpsPerSecond int // READDIR operations per second per IP
// Mount operation limits
MountOpsPerMinute int // MOUNT operations per minute per IP
// File handle limits
FileHandlesPerIP int // Maximum file handles per IP
FileHandlesGlobal int // Maximum file handles globally
// Cleanup
CleanupInterval time.Duration // How often to cleanup old entries
}
RateLimiterConfig defines rate limiting parameters
func DefaultRateLimiterConfig ¶
func DefaultRateLimiterConfig() RateLimiterConfig
DefaultRateLimiterConfig returns sensible defaults
type ReadAheadBuffer ¶
type ReadAheadBuffer struct {
// contains filtered or unexported fields
}
ReadAheadBuffer implements a multi-file read-ahead buffer with memory management
func NewReadAheadBuffer ¶
func NewReadAheadBuffer(size int) *ReadAheadBuffer
NewReadAheadBuffer creates a new read-ahead buffer with specified size and limits
func (*ReadAheadBuffer) ClearPath ¶
func (b *ReadAheadBuffer) ClearPath(path string)
ClearPath clears the buffer for a specific path
func (*ReadAheadBuffer) Configure ¶
func (b *ReadAheadBuffer) Configure(maxFiles int, maxMemory int64)
Configure sets the configuration options for the read-ahead buffer
func (*ReadAheadBuffer) Fill ¶
func (b *ReadAheadBuffer) Fill(path string, data []byte, offset int64)
Fill fills the buffer for a file with data from the given offset
func (*ReadAheadBuffer) Read ¶
func (b *ReadAheadBuffer) Read(path string, offset int64, count int, server ...*AbsfsNFS) ([]byte, bool)
Read attempts to read from the buffer for a file
func (*ReadAheadBuffer) Resize ¶
func (b *ReadAheadBuffer) Resize(maxFiles int, maxMemory int64)
Resize changes the maximum number of files and total memory for the read-ahead buffer If the new limits are smaller, buffers will be evicted to meet the new limits
func (*ReadAheadBuffer) Size ¶
func (b *ReadAheadBuffer) Size() int64
Size returns the current memory usage of all read-ahead buffers
func (*ReadAheadBuffer) Stats ¶
func (b *ReadAheadBuffer) Stats() (int, int64)
Stats returns the number of files and memory usage of the read-ahead buffer
type ReadArgs ¶
type ReadArgs struct {
Handle FileHandle
Offset uint64
Count uint32
}
ReadArgs represents arguments for read operations
type ReadDirArgs ¶
type ReadDirArgs struct {
Handle FileHandle
Cookie uint64
Count uint32
}
ReadDirArgs represents arguments for readdir operations
type ReadDirRes ¶
ReadDirRes represents the result of a readdir operation
type RecordMarkingConn ¶ added in v1.0.0
type RecordMarkingConn struct {
// contains filtered or unexported fields
}
RecordMarkingConn wraps a connection to provide record marking semantics
func NewRecordMarkingConn ¶ added in v1.0.0
func NewRecordMarkingConn(r io.Reader, w io.Writer) *RecordMarkingConn
NewRecordMarkingConn creates a new record marking connection wrapper
func (*RecordMarkingConn) ReadRecord ¶ added in v1.0.0
func (c *RecordMarkingConn) ReadRecord() ([]byte, error)
ReadRecord reads a complete RPC record
func (*RecordMarkingConn) WriteRecord ¶ added in v1.0.0
func (c *RecordMarkingConn) WriteRecord(data []byte) error
WriteRecord writes a complete RPC record
type RecordMarkingReader ¶ added in v1.0.0
type RecordMarkingReader struct {
// contains filtered or unexported fields
}
RecordMarkingReader wraps a reader to handle RPC record marking. RPC over TCP uses "record marking" where each message is preceded by a 4-byte header containing the fragment length and last-fragment flag.
func NewRecordMarkingReader ¶ added in v1.0.0
func NewRecordMarkingReader(r io.Reader) *RecordMarkingReader
NewRecordMarkingReader creates a new record marking reader
func (*RecordMarkingReader) ReadRecord ¶ added in v1.0.0
func (rm *RecordMarkingReader) ReadRecord() ([]byte, error)
ReadRecord reads a complete RPC record (all fragments) from the underlying reader. It returns the complete record data.
type RecordMarkingWriter ¶ added in v1.0.0
type RecordMarkingWriter struct {
// contains filtered or unexported fields
}
RecordMarkingWriter wraps a writer to add RPC record marking.
func NewRecordMarkingWriter ¶ added in v1.0.0
func NewRecordMarkingWriter(w io.Writer) *RecordMarkingWriter
NewRecordMarkingWriter creates a new record marking writer
func NewRecordMarkingWriterWithSize ¶ added in v1.0.0
func NewRecordMarkingWriterWithSize(w io.Writer, maxFragment int) *RecordMarkingWriter
NewRecordMarkingWriterWithSize creates a new record marking writer with custom max fragment size
func (*RecordMarkingWriter) WriteRecord ¶ added in v1.0.0
func (rm *RecordMarkingWriter) WriteRecord(data []byte) error
WriteRecord writes a complete RPC record with record marking. For records larger than maxFragment, multiple fragments are sent.
type RenameArgs ¶
RenameArgs represents arguments for rename operations
type Server ¶
type Server struct {
// contains filtered or unexported fields
}
Server represents an NFS server instance
func NewServer ¶
func NewServer(options ServerOptions) (*Server, error)
NewServer creates a new NFS server
func (*Server) SetHandler ¶
SetHandler sets the filesystem handler for the server
func (*Server) StartWithPortmapper ¶ added in v1.0.0
StartWithPortmapper starts the NFS server with portmapper service. This is required for standard NFS clients that expect to query portmapper. Note: Portmapper requires root/administrator privileges to bind to port 111.
type ServerOptions ¶
type ServerOptions struct {
Name string // Server name
UID uint32 // Server UID
GID uint32 // Server GID
ReadOnly bool // Read-only mode
Port int // Port to listen on (0 = random port, default NFS port is 2049)
MountPort int // Port for mount daemon (0 = same as NFS port, 635 = standard mountd port)
Hostname string // Hostname to bind to
Debug bool // Enable debug logging
UsePortmapper bool // Whether to start portmapper service (requires root for port 111)
UseRecordMarking bool // Use RPC record marking (required for standard NFS clients)
}
ServerOptions defines the configuration for the NFS server
type SlidingWindow ¶
type SlidingWindow struct {
// contains filtered or unexported fields
}
SlidingWindow implements a sliding window rate limiter
func NewSlidingWindow ¶
func NewSlidingWindow(window time.Duration, maxCount int) *SlidingWindow
NewSlidingWindow creates a new sliding window rate limiter
func (*SlidingWindow) Allow ¶
func (sw *SlidingWindow) Allow() bool
Allow checks if a request can proceed
func (*SlidingWindow) Count ¶
func (sw *SlidingWindow) Count() int
Count returns the current request count in the window
type SlogLogger ¶
type SlogLogger struct {
// contains filtered or unexported fields
}
SlogLogger is the default Logger implementation using Go's stdlib slog package (Go 1.21+)
func NewSlogLogger ¶
func NewSlogLogger(config *LogConfig) (*SlogLogger, error)
NewSlogLogger creates a new SlogLogger with the provided configuration
func (*SlogLogger) Close ¶
func (l *SlogLogger) Close() error
Close closes the logger and any associated resources
func (*SlogLogger) Debug ¶
func (l *SlogLogger) Debug(msg string, fields ...LogField)
Debug logs a debug-level message
func (*SlogLogger) Error ¶
func (l *SlogLogger) Error(msg string, fields ...LogField)
Error logs an error-level message
func (*SlogLogger) Info ¶
func (l *SlogLogger) Info(msg string, fields ...LogField)
Info logs an info-level message
func (*SlogLogger) Warn ¶
func (l *SlogLogger) Warn(msg string, fields ...LogField)
Warn logs a warning-level message
type SymlinkFileSystem ¶
type SymlinkFileSystem interface {
Symlink(oldname, newname string) error
Readlink(name string) (string, error)
Lstat(name string) (os.FileInfo, error)
}
SymlinkFileSystem represents a filesystem that supports symbolic links Deprecated: Use absfs.SymlinkFileSystem instead
type TLSConfig ¶
type TLSConfig struct {
// Enabled indicates whether TLS is enabled
Enabled bool
// CertFile is the path to the server certificate file (PEM format)
CertFile string
// KeyFile is the path to the server private key file (PEM format)
KeyFile string
// CAFile is the path to the CA certificate file for client verification (optional)
CAFile string
// ClientAuth specifies the client authentication policy
// Options: NoClientCert, RequestClientCert, RequireAnyClientCert,
// VerifyClientCertIfGiven, RequireAndVerifyClientCert
ClientAuth tls.ClientAuthType
// MinVersion specifies the minimum TLS version to accept
// Default: TLS 1.2
MinVersion uint16
// MaxVersion specifies the maximum TLS version to accept
// Default: TLS 1.3
MaxVersion uint16
// CipherSuites is a list of enabled cipher suites
// If empty, a secure default list will be used
CipherSuites []uint16
// PreferServerCipherSuites controls whether the server's cipher suite
// preferences should be used instead of the client's
PreferServerCipherSuites bool
// InsecureSkipVerify controls whether the client should skip verification
// of the server's certificate chain and host name
// WARNING: Only for testing purposes
InsecureSkipVerify bool
// contains filtered or unexported fields
}
TLSConfig holds the TLS configuration for the NFS server
func DefaultTLSConfig ¶
func DefaultTLSConfig() *TLSConfig
DefaultTLSConfig returns a TLS configuration with secure defaults
func (*TLSConfig) BuildConfig ¶
BuildConfig creates and returns a Go TLS configuration
func (*TLSConfig) GetClientAuthString ¶
GetClientAuthString returns a string representation of the ClientAuth setting
func (*TLSConfig) GetConfig ¶
GetConfig returns the cached TLS configuration If not built yet, it builds and caches it
func (*TLSConfig) ReloadCertificates ¶
ReloadCertificates reloads the server certificates without changing other settings This is useful for certificate rotation without restarting the server
type Task ¶
type Task struct {
// The function to execute
Execute func() interface{}
// Channel to receive the result
ResultChan chan interface{}
// contains filtered or unexported fields
}
Task represents a unit of work to be processed by a worker
type TimeoutConfig ¶
type TimeoutConfig struct {
// ReadTimeout is the maximum time allowed for read operations
// Default: 30 seconds
ReadTimeout time.Duration
// WriteTimeout is the maximum time allowed for write operations
// Default: 60 seconds
WriteTimeout time.Duration
// LookupTimeout is the maximum time allowed for lookup operations
// Default: 10 seconds
LookupTimeout time.Duration
// ReaddirTimeout is the maximum time allowed for readdir operations
// Default: 30 seconds
ReaddirTimeout time.Duration
// CreateTimeout is the maximum time allowed for create operations
// Default: 15 seconds
CreateTimeout time.Duration
// RemoveTimeout is the maximum time allowed for remove operations
// Default: 15 seconds
RemoveTimeout time.Duration
// RenameTimeout is the maximum time allowed for rename operations
// Default: 20 seconds
RenameTimeout time.Duration
// HandleTimeout is the maximum time allowed for file handle operations
// Default: 5 seconds
HandleTimeout time.Duration
// DefaultTimeout is the fallback timeout for operations without a specific timeout
// Default: 30 seconds
DefaultTimeout time.Duration
}
TimeoutConfig defines timeout durations for various NFS operations
type TokenBucket ¶
type TokenBucket struct {
// contains filtered or unexported fields
}
TokenBucket implements a token bucket rate limiter
func NewTokenBucket ¶
func NewTokenBucket(rate float64, burst int) *TokenBucket
NewTokenBucket creates a new token bucket
func (*TokenBucket) Allow ¶
func (tb *TokenBucket) Allow() bool
Allow checks if a request can proceed and consumes a token if so
func (*TokenBucket) AllowN ¶
func (tb *TokenBucket) AllowN(n int) bool
AllowN checks if N requests can proceed and consumes N tokens if so
func (*TokenBucket) Tokens ¶
func (tb *TokenBucket) Tokens() float64
Tokens returns the current token count (for testing/metrics)
type WorkerPool ¶
type WorkerPool struct {
// contains filtered or unexported fields
}
WorkerPool manages a pool of worker goroutines for handling concurrent operations
func NewWorkerPool ¶
func NewWorkerPool(maxWorkers int, logger *AbsfsNFS) *WorkerPool
NewWorkerPool creates a new worker pool with the specified number of workers
func (*WorkerPool) Resize ¶
func (p *WorkerPool) Resize(maxWorkers int)
Resize changes the number of workers in the pool This operation requires stopping and restarting the worker pool
func (*WorkerPool) Stats ¶
func (p *WorkerPool) Stats() (maxWorkers int, activeWorkers int, queuedTasks int)
Stats returns statistics about the worker pool
func (*WorkerPool) Submit ¶
func (p *WorkerPool) Submit(execute func() interface{}) chan interface{}
Submit adds a task to the worker pool Returns a channel that will receive the result, or nil if the task was rejected
func (*WorkerPool) SubmitWait ¶
func (p *WorkerPool) SubmitWait(execute func() interface{}) (interface{}, bool)
SubmitWait adds a task to the worker pool and waits for the result Returns the result and a boolean indicating if the task was successfully processed
type WriteArgs ¶
type WriteArgs struct {
Handle FileHandle
Offset uint64
Data []byte
}
WriteArgs represents arguments for write operations
Source Files
¶
- attributes.go
- auth.go
- batch.go
- cache.go
- errors.go
- filehandle.go
- logger.go
- memory_monitor.go
- metrics.go
- metrics_api.go
- minheap.go
- mount_handlers.go
- nfs_handlers.go
- nfs_node.go
- nfs_operations.go
- nfs_proc_handlers.go
- nfs_types.go
- operations.go
- portmapper.go
- rate_limiter.go
- rpc_transport.go
- rpc_types.go
- server.go
- tls_config.go
- types.go
- worker_pool.go
Directories
¶
| Path | Synopsis |
|---|---|
|
cmd
|
|
|
testserver
command
Example NFS server using absnfs with an in-memory filesystem.
|
Example NFS server using absnfs with an in-memory filesystem. |
|
examples
|
|
|
composed-workspace
command
Composed Workspace NFS Server
|
Composed Workspace NFS Server |
|
fusion-drive
command
Fusion Drive NFS Server
|
Fusion Drive NFS Server |