diff --git a/api/grpc/mpi/v1/command_grpc.pb.go b/api/grpc/mpi/v1/command_grpc.pb.go index dbf61a337..ba20831d9 100644 --- a/api/grpc/mpi/v1/command_grpc.pb.go +++ b/api/grpc/mpi/v1/command_grpc.pb.go @@ -8,7 +8,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 +// - protoc-gen-go-grpc v1.6.0 // - protoc (unknown) // source: mpi/v1/command.proto @@ -144,16 +144,16 @@ type CommandServiceServer interface { type UnimplementedCommandServiceServer struct{} func (UnimplementedCommandServiceServer) CreateConnection(context.Context, *CreateConnectionRequest) (*CreateConnectionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateConnection not implemented") + return nil, status.Error(codes.Unimplemented, "method CreateConnection not implemented") } func (UnimplementedCommandServiceServer) UpdateDataPlaneStatus(context.Context, *UpdateDataPlaneStatusRequest) (*UpdateDataPlaneStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateDataPlaneStatus not implemented") + return nil, status.Error(codes.Unimplemented, "method UpdateDataPlaneStatus not implemented") } func (UnimplementedCommandServiceServer) UpdateDataPlaneHealth(context.Context, *UpdateDataPlaneHealthRequest) (*UpdateDataPlaneHealthResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateDataPlaneHealth not implemented") + return nil, status.Error(codes.Unimplemented, "method UpdateDataPlaneHealth not implemented") } func (UnimplementedCommandServiceServer) Subscribe(grpc.BidiStreamingServer[DataPlaneResponse, ManagementPlaneRequest]) error { - return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") + return status.Error(codes.Unimplemented, "method Subscribe not implemented") } func (UnimplementedCommandServiceServer) testEmbeddedByValue() {} @@ -165,7 +165,7 @@ type UnsafeCommandServiceServer interface { } func RegisterCommandServiceServer(s grpc.ServiceRegistrar, srv CommandServiceServer) { - // If the following call pancis, it indicates UnimplementedCommandServiceServer was + // If the following call panics, it indicates UnimplementedCommandServiceServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. diff --git a/api/grpc/mpi/v1/files_grpc.pb.go b/api/grpc/mpi/v1/files_grpc.pb.go index 69efda491..80ed54f75 100644 --- a/api/grpc/mpi/v1/files_grpc.pb.go +++ b/api/grpc/mpi/v1/files_grpc.pb.go @@ -5,7 +5,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 +// - protoc-gen-go-grpc v1.6.0 // - protoc (unknown) // source: mpi/v1/files.proto @@ -174,22 +174,22 @@ type FileServiceServer interface { type UnimplementedFileServiceServer struct{} func (UnimplementedFileServiceServer) GetOverview(context.Context, *GetOverviewRequest) (*GetOverviewResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOverview not implemented") + return nil, status.Error(codes.Unimplemented, "method GetOverview not implemented") } func (UnimplementedFileServiceServer) UpdateOverview(context.Context, *UpdateOverviewRequest) (*UpdateOverviewResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateOverview not implemented") + return nil, status.Error(codes.Unimplemented, "method UpdateOverview not implemented") } func (UnimplementedFileServiceServer) GetFile(context.Context, *GetFileRequest) (*GetFileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetFile not implemented") + return nil, status.Error(codes.Unimplemented, "method GetFile not implemented") } func (UnimplementedFileServiceServer) UpdateFile(context.Context, *UpdateFileRequest) (*UpdateFileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateFile not implemented") + return nil, status.Error(codes.Unimplemented, "method UpdateFile not implemented") } func (UnimplementedFileServiceServer) GetFileStream(*GetFileRequest, grpc.ServerStreamingServer[FileDataChunk]) error { - return status.Errorf(codes.Unimplemented, "method GetFileStream not implemented") + return status.Error(codes.Unimplemented, "method GetFileStream not implemented") } func (UnimplementedFileServiceServer) UpdateFileStream(grpc.ClientStreamingServer[FileDataChunk, UpdateFileResponse]) error { - return status.Errorf(codes.Unimplemented, "method UpdateFileStream not implemented") + return status.Error(codes.Unimplemented, "method UpdateFileStream not implemented") } func (UnimplementedFileServiceServer) testEmbeddedByValue() {} @@ -201,7 +201,7 @@ type UnsafeFileServiceServer interface { } func RegisterFileServiceServer(s grpc.ServiceRegistrar, srv FileServiceServer) { - // If the following call pancis, it indicates UnimplementedFileServiceServer was + // If the following call panics, it indicates UnimplementedFileServiceServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. diff --git a/internal/config/config.go b/internal/config/config.go index 75c829e64..d765bdcf0 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -50,6 +50,10 @@ const ( regexLabelPattern = "^[a-zA-Z0-9]([a-zA-Z0-9-_]{0,254}[a-zA-Z0-9])?$" ) +var domainRegex = regexp.MustCompile( + `^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`, +) + var viperInstance = viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)) func RegisterRunner(r func(cmd *cobra.Command, args []string)) { @@ -157,6 +161,7 @@ func ResolveConfig() (*Config, error) { Features: viperInstance.GetStringSlice(FeaturesKey), Labels: resolveLabels(), LibDir: viperInstance.GetString(LibDirPathKey), + ExternalDataSource: resolveExternalDataSource(), SyslogServer: resolveSyslogServer(), } @@ -475,6 +480,7 @@ func registerFlags() { registerCollectorFlags(fs) registerClientFlags(fs) registerDataPlaneFlags(fs) + registerExternalDataSourceFlags(fs) fs.SetNormalizeFunc(normalizeFunc) @@ -489,6 +495,24 @@ func registerFlags() { }) } +func registerExternalDataSourceFlags(fs *flag.FlagSet) { + fs.String( + ExternalDataSourceProxyUrlKey, + DefExternalDataSourceProxyUrl, + "Url to the proxy service for fetching external files.", + ) + fs.StringSlice( + ExternalDataSourceAllowDomainsKey, + []string{}, + "List of allowed domains for external data sources.", + ) + fs.Int64( + ExternalDataSourceMaxBytesKey, + DefExternalDataSourceMaxBytes, + "Maximum size in bytes for external data sources.", + ) +} + func registerDataPlaneFlags(fs *flag.FlagSet) { fs.Duration( NginxReloadMonitoringPeriodKey, @@ -628,6 +652,11 @@ func registerClientFlags(fs *flag.FlagSet) { DefMaxFileSize, "Max file size in bytes.", ) + fs.Duration( + ClientFileDownloadTimeoutKey, + DefClientFileDownloadTimeout, + "Timeout value in seconds, for downloading a file during a config apply.", + ) fs.Int( ClientGRPCMaxParallelFileOperationsKey, @@ -1120,6 +1149,7 @@ func resolveClient() *Client { RandomizationFactor: viperInstance.GetFloat64(ClientBackoffRandomizationFactorKey), Multiplier: viperInstance.GetFloat64(ClientBackoffMultiplierKey), }, + FileDownloadTimeout: viperInstance.GetDuration(ClientFileDownloadTimeoutKey), } } @@ -1560,3 +1590,36 @@ func areCommandServerProxyTLSSettingsSet() bool { viperInstance.IsSet(CommandServerProxyTLSSkipVerifyKey) || viperInstance.IsSet(CommandServerProxyTLSServerNameKey) } + +func resolveExternalDataSource() *ExternalDataSource { + proxyURLStruct := ProxyURL{ + URL: viperInstance.GetString(ExternalDataSourceProxyUrlKey), + } + externalDataSource := &ExternalDataSource{ + ProxyURL: proxyURLStruct, + AllowedDomains: viperInstance.GetStringSlice(ExternalDataSourceAllowDomainsKey), + MaxBytes: viperInstance.GetInt64(ExternalDataSourceMaxBytesKey), + } + + if err := validateAllowedDomains(externalDataSource.AllowedDomains); err != nil { + return nil + } + + return externalDataSource +} + +func validateAllowedDomains(domains []string) error { + if len(domains) == 0 { + return nil + } + + for _, domain := range domains { + // Validating syntax using the RFC-compliant regex + if !domainRegex.MatchString(domain) || domain == "" { + slog.Error("domain specified in allowed_domains is invalid", "domain", domain) + return errors.New("invalid domain found in allowed_domains") + } + } + + return nil +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 5c4384ba8..8fd3dd1fe 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1425,6 +1425,13 @@ func createConfig() *Config { config.FeatureCertificates, config.FeatureFileWatcher, config.FeatureMetrics, config.FeatureAPIAction, config.FeatureLogsNap, }, + ExternalDataSource: &ExternalDataSource{ + ProxyURL: ProxyURL{ + URL: "http://proxy.example.com", + }, + AllowedDomains: []string{"example.com", "api.example.com"}, + MaxBytes: 1048576, + }, } } @@ -1569,3 +1576,73 @@ func TestValidateLabel(t *testing.T) { }) } } + +func TestValidateAllowedDomains(t *testing.T) { + tests := []struct { + name string + domains []string + wantErr bool + }{ + { + name: "Test 1: Success: Empty slice", + domains: []string{}, + wantErr: false, + }, + { + name: "Test 2: Success: Nil slice", + domains: nil, + wantErr: false, + }, + { + name: "Test 3: Success: Valid domains", + domains: []string{"example.com", "api.nginx.com", "sub.domain.io"}, + wantErr: false, + }, + { + name: "Test 4: Failure: Domain contains space", + domains: []string{"valid.com", "bad domain.com"}, + wantErr: true, + }, + { + name: "Test 5: Failure: Empty string domain", + domains: []string{"valid.com", ""}, + wantErr: true, + }, + { + name: "Test 6: Failure: Domain contains forward slash /", + domains: []string{"domain.com/path"}, + wantErr: true, + }, + { + name: "Test 7: Failure: Domain contains backward slash \\", + domains: []string{"domain.com\\path"}, + wantErr: true, + }, + { + name: "Test 8: Failure: Mixed valid and invalid (first is invalid)", + domains: []string{" only.com", "good.com"}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var logBuffer bytes.Buffer + logHandler := slog.NewTextHandler(&logBuffer, &slog.HandlerOptions{Level: slog.LevelError}) + + originalLogger := slog.Default() + slog.SetDefault(slog.New(logHandler)) + defer slog.SetDefault(originalLogger) + + actualErr := validateAllowedDomains(tt.domains) + + if tt.wantErr { + require.Error(t, actualErr, "Expected an error but got nil.") + assert.Contains(t, logBuffer.String(), "domain specified in allowed_domains is invalid", + "Expected the error log message to be present in the output.") + } else { + assert.NoError(t, actualErr, "Did not expect an error but got one: %v", actualErr) + } + }) + } +} diff --git a/internal/config/defaults.go b/internal/config/defaults.go index 0f1e08075..70b32fbd6 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -82,6 +82,8 @@ const ( DefBackoffMaxInterval = 20 * time.Second DefBackoffMaxElapsedTime = 1 * time.Minute + DefClientFileDownloadTimeout = 60 * time.Second + // Watcher defaults DefInstanceWatcherMonitoringFrequency = 5 * time.Second DefInstanceHealthWatcherMonitoringFrequency = 5 * time.Second @@ -114,6 +116,9 @@ const ( // File defaults DefLibDir = "/var/lib/nginx-agent" + + DefExternalDataSourceProxyUrl = "" + DefExternalDataSourceMaxBytes = 100 * 1024 * 1024 // default 100MB ) func DefaultFeatures() []string { diff --git a/internal/config/flags.go b/internal/config/flags.go index d0f664540..135cacb71 100644 --- a/internal/config/flags.go +++ b/internal/config/flags.go @@ -25,6 +25,7 @@ const ( InstanceHealthWatcherMonitoringFrequencyKey = "watchers_instance_health_watcher_monitoring_frequency" FileWatcherKey = "watchers_file_watcher" LibDirPathKey = "lib_dir" + ExternalDataSourceRootKey = "external_data_source" ) var ( @@ -47,6 +48,7 @@ var ( ClientBackoffMaxElapsedTimeKey = pre(ClientRootKey) + "backoff_max_elapsed_time" ClientBackoffRandomizationFactorKey = pre(ClientRootKey) + "backoff_randomization_factor" ClientBackoffMultiplierKey = pre(ClientRootKey) + "backoff_multiplier" + ClientFileDownloadTimeoutKey = pre(ClientRootKey) + "file_download_timeout" CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" CollectorAdditionalConfigPathsKey = pre(CollectorRootKey) + "additional_config_paths" @@ -141,6 +143,11 @@ var ( FileWatcherMonitoringFrequencyKey = pre(FileWatcherKey) + "monitoring_frequency" NginxExcludeFilesKey = pre(FileWatcherKey) + "exclude_files" + + ExternalDataSourceProxyKey = pre(ExternalDataSourceRootKey) + "proxy" + ExternalDataSourceProxyUrlKey = pre(ExternalDataSourceProxyKey) + "url" + ExternalDataSourceMaxBytesKey = pre(ExternalDataSourceRootKey) + "max_bytes" + ExternalDataSourceAllowDomainsKey = pre(ExternalDataSourceRootKey) + "allowed_domains" ) func pre(prefixes ...string) string { diff --git a/internal/config/testdata/nginx-agent.conf b/internal/config/testdata/nginx-agent.conf index 2ac87b9ee..d7892c4c6 100644 --- a/internal/config/testdata/nginx-agent.conf +++ b/internal/config/testdata/nginx-agent.conf @@ -183,3 +183,11 @@ collector: log: level: "INFO" path: "/var/log/nginx-agent/opentelemetry-collector-agent.log" + +external_data_source: + proxy: + url: "http://proxy.example.com" + allowed_domains: + - example.com + - api.example.com + max_bytes: 1048576 diff --git a/internal/config/types.go b/internal/config/types.go index 72eda1369..e553e0d40 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -36,21 +36,22 @@ func parseServerType(str string) (ServerType, bool) { type ( Config struct { - Command *Command `yaml:"command" mapstructure:"command"` - AuxiliaryCommand *Command `yaml:"auxiliary_command" mapstructure:"auxiliary_command"` - Log *Log `yaml:"log" mapstructure:"log"` - DataPlaneConfig *DataPlaneConfig `yaml:"data_plane_config" mapstructure:"data_plane_config"` - Client *Client `yaml:"client" mapstructure:"client"` - Collector *Collector `yaml:"collector" mapstructure:"collector"` - Watchers *Watchers `yaml:"watchers" mapstructure:"watchers"` - SyslogServer *SyslogServer `yaml:"syslog_server" mapstructure:"syslog_server"` - Labels map[string]any `yaml:"labels" mapstructure:"labels"` - Version string `yaml:"-"` - Path string `yaml:"-"` - UUID string `yaml:"-"` - LibDir string `yaml:"-"` - AllowedDirectories []string `yaml:"allowed_directories" mapstructure:"allowed_directories"` - Features []string `yaml:"features" mapstructure:"features"` + Command *Command `yaml:"command" mapstructure:"command"` + AuxiliaryCommand *Command `yaml:"auxiliary_command" mapstructure:"auxiliary_command"` + Log *Log `yaml:"log" mapstructure:"log"` + DataPlaneConfig *DataPlaneConfig `yaml:"data_plane_config" mapstructure:"data_plane_config"` + Client *Client `yaml:"client" mapstructure:"client"` + Collector *Collector `yaml:"collector" mapstructure:"collector"` + Watchers *Watchers `yaml:"watchers" mapstructure:"watchers"` + SyslogServer *SyslogServer `yaml:"syslog_server" mapstructure:"syslog_server"` + ExternalDataSource *ExternalDataSource `yaml:"external_data_source" mapstructure:"external_data_source"` + Labels map[string]any `yaml:"labels" mapstructure:"labels"` + Version string `yaml:"-"` + Path string `yaml:"-"` + UUID string `yaml:"-"` + LibDir string `yaml:"-"` + AllowedDirectories []string `yaml:"allowed_directories" mapstructure:"allowed_directories"` + Features []string `yaml:"features" mapstructure:"features"` } Log struct { @@ -74,9 +75,10 @@ type ( } Client struct { - HTTP *HTTP `yaml:"http" mapstructure:"http"` - Grpc *GRPC `yaml:"grpc" mapstructure:"grpc"` - Backoff *BackOff `yaml:"backoff" mapstructure:"backoff"` + HTTP *HTTP `yaml:"http" mapstructure:"http"` + Grpc *GRPC `yaml:"grpc" mapstructure:"grpc"` + Backoff *BackOff `yaml:"backoff" mapstructure:"backoff"` + FileDownloadTimeout time.Duration `yaml:"file_download_timeout" mapstructure:"file_download_timeout"` } HTTP struct { @@ -358,6 +360,16 @@ type ( Token string `yaml:"token,omitempty" mapstructure:"token"` Timeout time.Duration `yaml:"timeout" mapstructure:"timeout"` } + + ProxyURL struct { + URL string `yaml:"url" mapstructure:"url"` + } + + ExternalDataSource struct { + ProxyURL ProxyURL `yaml:"proxy" mapstructure:"proxy"` + AllowedDomains []string `yaml:"allowed_domains" mapstructure:"allowed_domains"` + MaxBytes int64 `yaml:"max_bytes" mapstructure:"max_bytes"` + } ) func (col *Collector) Validate(allowedDirectories []string) error { diff --git a/internal/file/file_manager_service.go b/internal/file/file_manager_service.go index af0e67a91..224ebb142 100644 --- a/internal/file/file_manager_service.go +++ b/internal/file/file_manager_service.go @@ -11,10 +11,14 @@ import ( "encoding/json" "errors" "fmt" + "io" "log/slog" + "net/http" + "net/url" "os" "path/filepath" "strconv" + "strings" "sync" "golang.org/x/sync/errgroup" @@ -33,6 +37,9 @@ import ( //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate //counterfeiter:generate . fileManagerServiceInterface +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate +//counterfeiter:generate . fileServiceOperatorInterface + const ( maxAttempts = 5 dirPerm = 0o755 @@ -40,6 +47,11 @@ const ( executePerm = 0o111 ) +type DownloadHeader struct { + ETag string + LastModified string +} + type ( fileOperator interface { Write(ctx context.Context, fileContent []byte, fileName, filePermissions string) error @@ -73,7 +85,8 @@ type ( fileToUpdate *mpi.File, ) error SetIsConnected(isConnected bool) - RenameFile(ctx context.Context, hash, fileName, tempDir string) error + RenameFile(ctx context.Context, fileName, tempDir string) error + ValidateFileHash(ctx context.Context, fileName, expectedHash string) error UpdateClient(ctx context.Context, fileServiceClient mpi.FileServiceClient) } @@ -106,6 +119,7 @@ type FileManagerService struct { // map of the files currently on disk, used to determine the file action during config apply currentFilesOnDisk map[string]*mpi.File // key is file path previousManifestFiles map[string]*model.ManifestFile + externalFileHeaders map[string]DownloadHeader manifestFilePath string rollbackManifest bool filesMutex sync.RWMutex @@ -121,6 +135,7 @@ func NewFileManagerService(fileServiceClient mpi.FileServiceClient, agentConfig fileActions: make(map[string]*model.FileCache), currentFilesOnDisk: make(map[string]*mpi.File), previousManifestFiles: make(map[string]*model.ManifestFile), + externalFileHeaders: make(map[string]DownloadHeader), rollbackManifest: true, manifestFilePath: agentConfig.LibDir + "/manifest.json", manifestLock: manifestLock, @@ -233,7 +248,7 @@ func (fms *FileManagerService) Rollback(ctx context.Context, instanceID string) delete(fms.currentFilesOnDisk, fileAction.File.GetFileMeta().GetName()) continue - case model.Delete, model.Update: + case model.Delete, model.Update, model.ExternalFile: content, err := fms.restoreFiles(fileAction) if err != nil { return err @@ -390,6 +405,17 @@ func (fms *FileManagerService) DetermineFileActions( slog.DebugContext(ctx, "Skipping unmanaged file updates", "file_name", fileName) continue } + + // If either the modified file or the current file is an external data source, + // treat this as an ExternalFile and skip the regular Add/Update checks. This + // ensures external files are downloaded/validated every single time. + if modifiedFile.File.GetExternalDataSource() != nil || (ok && currentFile.GetExternalDataSource() != nil) { + modifiedFile.Action = model.ExternalFile + fileDiff[fileName] = modifiedFile + + continue + } + // if file doesn't exist in the current files, file has been added // set file action if _, statErr := os.Stat(fileName); errors.Is(statErr, os.ErrNotExist) { @@ -581,7 +607,8 @@ func (fms *FileManagerService) executeFileActions(ctx context.Context) (actionEr func (fms *FileManagerService) downloadUpdatedFilesToTempLocation(ctx context.Context) (updateError error) { var downloadFiles []*model.FileCache for _, fileAction := range fms.fileActions { - if fileAction.Action == model.Add || fileAction.Action == model.Update { + if fileAction.Action == model.ExternalFile || fileAction.Action == model.Add || + fileAction.Action == model.Update { downloadFiles = append(downloadFiles, fileAction) } } @@ -590,7 +617,6 @@ func (fms *FileManagerService) downloadUpdatedFilesToTempLocation(ctx context.Co slog.DebugContext(ctx, "No updated files to download") return nil } - errGroup, errGroupCtx := errgroup.WithContext(ctx) errGroup.SetLimit(fms.agentConfig.Client.Grpc.MaxParallelFileOperations) @@ -598,26 +624,39 @@ func (fms *FileManagerService) downloadUpdatedFilesToTempLocation(ctx context.Co errGroup.Go(func() error { tempFilePath := tempFilePath(fileAction.File.GetFileMeta().GetName()) - slog.DebugContext( - errGroupCtx, - "Downloading file to temp location", - "file", tempFilePath, - ) + switch fileAction.Action { + case model.ExternalFile: + return fms.downloadExternalFile(errGroupCtx, fileAction, tempFilePath) + case model.Add, model.Update: + slog.DebugContext( + errGroupCtx, + "Downloading file to temp location", + "file", tempFilePath, + ) - return fms.fileUpdate(errGroupCtx, fileAction.File, tempFilePath) + return fms.fileUpdate(errGroupCtx, fileAction.File, tempFilePath) + case model.Delete, model.Unchanged: // had to add for linter + return nil + default: + return nil + } }) } return errGroup.Wait() } +//nolint:revive // cognitive-complexity of 14 max is 12, loop is needed cant be broken up func (fms *FileManagerService) moveOrDeleteFiles(ctx context.Context, actionError error) error { actionsLoop: for _, fileAction := range fms.fileActions { + var err error + fileMeta := fileAction.File.GetFileMeta() + tempFilePath := tempFilePath(fileAction.File.GetFileMeta().GetName()) switch fileAction.Action { case model.Delete: slog.DebugContext(ctx, "Deleting file", "file", fileAction.File.GetFileMeta().GetName()) - if err := os.Remove(fileAction.File.GetFileMeta().GetName()); err != nil && !os.IsNotExist(err) { + if err = os.Remove(fileAction.File.GetFileMeta().GetName()); err != nil && !os.IsNotExist(err) { actionError = fmt.Errorf("error deleting file: %s error: %w", fileAction.File.GetFileMeta().GetName(), err) @@ -626,17 +665,21 @@ actionsLoop: continue case model.Add, model.Update: - fileMeta := fileAction.File.GetFileMeta() - tempFilePath := tempFilePath(fileAction.File.GetFileMeta().GetName()) - err := fms.fileServiceOperator.RenameFile(ctx, fileMeta.GetHash(), tempFilePath, fileMeta.GetName()) + err = fms.fileServiceOperator.RenameFile(ctx, tempFilePath, fileMeta.GetName()) if err != nil { actionError = err - break actionsLoop } + err = fms.fileServiceOperator.ValidateFileHash(ctx, fileMeta.GetName(), fileMeta.GetHash()) + case model.ExternalFile: + err = fms.fileServiceOperator.RenameFile(ctx, tempFilePath, fileMeta.GetName()) case model.Unchanged: slog.DebugContext(ctx, "File unchanged") } + if err != nil { + actionError = err + break actionsLoop + } } return actionError @@ -792,3 +835,207 @@ func tempBackupFilePath(fileName string) string { tempFileName := "." + filepath.Base(fileName) + ".agent.backup" return filepath.Join(filepath.Dir(fileName), tempFileName) } + +func (fms *FileManagerService) downloadExternalFile(ctx context.Context, fileAction *model.FileCache, + filePath string, +) error { + location := fileAction.File.GetExternalDataSource().GetLocation() + permission := fileAction.File.GetFileMeta().GetPermissions() + + slog.InfoContext(ctx, "Downloading external file from", "location", location) + + var contentToWrite []byte + var downloadErr, updateError error + var headers DownloadHeader + + contentToWrite, headers, downloadErr = fms.downloadFileContent(ctx, fileAction.File) + + if downloadErr != nil { + updateError = fmt.Errorf("failed to download file %s from %s: %w", + fileAction.File.GetFileMeta().GetName(), location, downloadErr) + + return updateError + } + + if contentToWrite == nil { + slog.DebugContext(ctx, "External file unchanged (304), skipping disk write.", + "file", fileAction.File.GetFileMeta().GetName()) + + fileAction.Action = model.Unchanged + + return nil + } + + fileName := fileAction.File.GetFileMeta().GetName() + fms.externalFileHeaders[fileName] = headers + + writeErr := fms.fileOperator.Write( + ctx, + contentToWrite, + filePath, + permission, + ) + + if writeErr != nil { + return fmt.Errorf("failed to write downloaded content to temp file %s: %w", filePath, writeErr) + } + + return nil +} + +// downloadFileContent performs an HTTP GET request to the given URL and returns the file content as a byte slice. +func (fms *FileManagerService) downloadFileContent( + ctx context.Context, + file *mpi.File, +) (content []byte, headers DownloadHeader, err error) { + fileName := file.GetFileMeta().GetName() + downloadURL := file.GetExternalDataSource().GetLocation() + externalConfig := fms.agentConfig.ExternalDataSource + + if !isDomainAllowed(downloadURL, externalConfig.AllowedDomains) { + return nil, DownloadHeader{}, fmt.Errorf("download URL %s is not in the allowed domains list", downloadURL) + } + + httpClient, err := fms.setupHTTPClient(ctx, externalConfig.ProxyURL.URL) + if err != nil { + return nil, DownloadHeader{}, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil) + if err != nil { + return nil, DownloadHeader{}, fmt.Errorf("failed to create request for %s: %w", downloadURL, err) + } + + if externalConfig.ProxyURL.URL != "" { + fms.addConditionalHeaders(ctx, req, fileName) + } else { + slog.DebugContext(ctx, "No proxy configured; sending plain HTTP request without caching headers.") + } + + resp, err := httpClient.Do(req) + if err != nil { + return nil, DownloadHeader{}, fmt.Errorf("failed to execute download request for %s: %w", downloadURL, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + headers.ETag = resp.Header.Get("ETag") + headers.LastModified = resp.Header.Get("Last-Modified") + case http.StatusNotModified: + slog.DebugContext(ctx, "File content unchanged (304 Not Modified)", "file_name", fileName) + return nil, DownloadHeader{}, nil + default: + return nil, DownloadHeader{}, fmt.Errorf("download failed with status code %d", resp.StatusCode) + } + + reader := io.Reader(resp.Body) + if fms.agentConfig.ExternalDataSource.MaxBytes > 0 { + reader = io.LimitReader(resp.Body, fms.agentConfig.ExternalDataSource.MaxBytes) + } + + content, err = io.ReadAll(reader) + if err != nil { + return nil, DownloadHeader{}, fmt.Errorf("failed to read content from response body: %w", err) + } + + slog.InfoContext(ctx, "Successfully downloaded file content", "file_name", fileName, "size", len(content)) + + return content, headers, nil +} + +func isDomainAllowed(downloadURL string, allowedDomains []string) bool { + u, err := url.Parse(downloadURL) + if err != nil { + slog.Debug("Failed to parse download URL for domain check", "url", downloadURL, "error", err) + return false + } + + hostname := u.Hostname() + if hostname == "" { + return false + } + + for _, domain := range allowedDomains { + if domain == "" { + continue + } + + if domain == hostname || isMatchesWildcardDomain(hostname, domain) { + return true + } + } + + return false +} + +func (fms *FileManagerService) setupHTTPClient(ctx context.Context, proxyURLString string) (*http.Client, error) { + var transport *http.Transport + + if proxyURLString != "" { + proxyURL, err := url.Parse(proxyURLString) + if err != nil { + return nil, fmt.Errorf("invalid proxy URL configured: %w", err) + } + slog.DebugContext(ctx, "Configuring HTTP client to use proxy", "proxy_url", proxyURLString) + transport = &http.Transport{ + Proxy: http.ProxyURL(proxyURL), + } + } else { + slog.DebugContext(ctx, "Configuring HTTP client for direct connection (no proxy)") + transport = &http.Transport{ + Proxy: nil, + } + } + + httpClient := &http.Client{ + Transport: transport, + Timeout: fms.agentConfig.Client.FileDownloadTimeout, + } + + return httpClient, nil +} + +func (fms *FileManagerService) addConditionalHeaders(ctx context.Context, req *http.Request, fileName string) { + slog.DebugContext(ctx, "Proxy configured; adding headers to GET request.") + + manifestFiles, _, manifestFileErr := fms.manifestFile() + + if manifestFileErr != nil && !errors.Is(manifestFileErr, os.ErrNotExist) { + slog.WarnContext(ctx, "Error reading manifest file for headers", "error", manifestFileErr) + } + + manifestFile, ok := manifestFiles[fileName] + + if ok && manifestFile != nil && manifestFile.ManifestFileMeta != nil { + fileMeta := manifestFile.ManifestFileMeta + + if fileMeta.ETag != "" { + req.Header.Set("If-None-Match", fileMeta.ETag) + } + if fileMeta.LastModified != "" { + req.Header.Set("If-Modified-Since", fileMeta.LastModified) + } + } else { + slog.DebugContext(ctx, "File not found in manifest or missing metadata; skipping conditional headers.", + "file", fileName) + } +} + +func isMatchesWildcardDomain(hostname, pattern string) bool { + if !strings.HasPrefix(pattern, "*.") { + return false + } + + baseDomain := pattern[2:] + if strings.HasSuffix(hostname, baseDomain) { + // Check to ensure it's a true subdomain match (e.g., must have a '.' + // before baseDomain unless it IS the baseDomain) + // This handles cases like preventing 'foo.com' matching '*.oo.com' + if hostname == baseDomain || hostname[len(hostname)-len(baseDomain)-1] == '.' { + return true + } + } + + return false +} diff --git a/internal/file/file_manager_service_test.go b/internal/file/file_manager_service_test.go index e42128063..904b58086 100644 --- a/internal/file/file_manager_service_test.go +++ b/internal/file/file_manager_service_test.go @@ -10,11 +10,17 @@ import ( "encoding/json" "errors" "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" "path/filepath" + "strings" "sync" "testing" + "time" + "github.com/nginx/agent/v3/internal/config" "github.com/nginx/agent/v3/internal/model" "github.com/nginx/agent/v3/pkg/files" @@ -1173,3 +1179,378 @@ rQHX6DP4w6IwZY8JB8LS }) } } + +func TestFileManagerService_DetermineFileActions_ExternalFile(t *testing.T) { + ctx := context.Background() + tempDir := t.TempDir() + fileName := filepath.Join(tempDir, "external.conf") + + modifiedFiles := map[string]*model.FileCache{ + fileName: { + File: &mpi.File{ + FileMeta: &mpi.FileMeta{ + Name: fileName, + }, + ExternalDataSource: &mpi.ExternalDataSource{Location: "http://example.com/file"}, + }, + }, + } + + fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) + fileManagerService.agentConfig.AllowedDirectories = []string{tempDir} + + diff, err := fileManagerService.DetermineFileActions(ctx, make(map[string]*mpi.File), modifiedFiles) + require.NoError(t, err) + + fc, ok := diff[fileName] + require.True(t, ok, "expected file to be present in diff") + assert.Equal(t, model.ExternalFile, fc.Action) +} + +//nolint:gocognit,revive,govet // cognitive complexity is 22 +func TestFileManagerService_downloadExternalFiles(t *testing.T) { + type tc struct { + allowedDomains []string + expectContent []byte + name string + expectHeaderETag string + expectHeaderLastMod string + expectErrContains string + handler http.HandlerFunc + maxBytes int + expectError bool + expectTempFile bool + } + + tests := []tc{ + { + name: "Test 1: Success", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("ETag", "test-etag") + w.Header().Set("Last-Modified", time.RFC1123) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("external file content")) + }, + allowedDomains: nil, + maxBytes: 0, + expectError: false, + expectTempFile: true, + expectContent: []byte("external file content"), + expectHeaderETag: "test-etag", + expectHeaderLastMod: time.RFC1123, + }, + { + name: "Test 2: NotModified", + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotModified) + }, + allowedDomains: nil, + maxBytes: 0, + expectError: false, + expectTempFile: false, + expectContent: nil, + expectHeaderETag: "", + expectHeaderLastMod: "", + }, + { + name: "Test 3: NotAllowedDomain", + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("external file content")) + }, + allowedDomains: []string{"not-the-host"}, + maxBytes: 0, + expectError: true, + expectErrContains: "not in the allowed domains", + expectTempFile: false, + }, + { + name: "Test 4: NotFound", + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }, + allowedDomains: nil, + maxBytes: 0, + expectError: true, + expectErrContains: "status code 404", + expectTempFile: false, + }, + { + name: "Test 5: ProxyWithConditionalHeaders", + handler: func(w http.ResponseWriter, r *http.Request) { + // verify conditional headers from manifest are added + if r.Header.Get("If-None-Match") != "manifest-test-etag" { + http.Error(w, "missing If-None-Match", http.StatusBadRequest) + return + } + if r.Header.Get("If-Modified-Since") != time.RFC1123 { + http.Error(w, "missing If-Modified-Since", http.StatusBadRequest) + return + } + w.Header().Set("ETag", "resp-etag") + w.Header().Set("Last-Modified", time.RFC1123) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("external file via proxy")) + }, + allowedDomains: nil, + maxBytes: 0, + expectError: false, + expectTempFile: true, + expectContent: []byte("external file via proxy"), + expectHeaderETag: "resp-etag", + expectHeaderLastMod: time.RFC1123, + expectErrContains: "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + tempDir := t.TempDir() + fileName := filepath.Join(tempDir, "external.conf") + + ts := httptest.NewServer(test.handler) + defer ts.Close() + + u, err := url.Parse(ts.URL) + require.NoError(t, err) + host := u.Hostname() + + fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) + + eds := &config.ExternalDataSource{ + ProxyURL: config.ProxyURL{URL: ""}, + AllowedDomains: []string{host}, + MaxBytes: int64(test.maxBytes), + } + + if test.allowedDomains != nil { + eds.AllowedDomains = test.allowedDomains + } + + if test.name == "Test 5: ProxyWithConditionalHeaders" { + manifestFiles := map[string]*model.ManifestFile{ + fileName: { + ManifestFileMeta: &model.ManifestFileMeta{ + Name: fileName, + ETag: "manifest-test-etag", + LastModified: time.RFC1123, + }, + }, + } + manifestJSON, mErr := json.MarshalIndent(manifestFiles, "", " ") + require.NoError(t, mErr) + + manifestFile, mErr := os.CreateTemp(tempDir, "manifest.json") + require.NoError(t, mErr) + _, mErr = manifestFile.Write(manifestJSON) + require.NoError(t, mErr) + _ = manifestFile.Close() + + fileManagerService.agentConfig.LibDir = tempDir + fileManagerService.manifestFilePath = manifestFile.Name() + + eds.ProxyURL = config.ProxyURL{URL: ts.URL} + } + + fileManagerService.agentConfig.ExternalDataSource = eds + + fileManagerService.fileActions = map[string]*model.FileCache{ + fileName: { + File: &mpi.File{ + FileMeta: &mpi.FileMeta{Name: fileName}, + ExternalDataSource: &mpi.ExternalDataSource{Location: ts.URL}, + }, + Action: model.ExternalFile, + }, + } + + err = fileManagerService.downloadUpdatedFilesToTempLocation(ctx) + + if test.expectError { + require.Error(t, err) + if test.expectErrContains != "" { + assert.Contains(t, err.Error(), test.expectErrContains) + } + _, statErr := os.Stat(tempFilePath(fileName)) + assert.True(t, os.IsNotExist(statErr)) + + return + } + + require.NoError(t, err) + + if test.expectTempFile { + b, readErr := os.ReadFile(tempFilePath(fileName)) + require.NoError(t, readErr) + assert.Equal(t, test.expectContent, b) + + h, ok := fileManagerService.externalFileHeaders[fileName] + require.True(t, ok) + assert.Equal(t, test.expectHeaderETag, h.ETag) + assert.Equal(t, test.expectHeaderLastMod, h.LastModified) + + _ = os.Remove(tempFilePath(fileName)) + } else { + _, statErr := os.Stat(tempFilePath(fileName)) + assert.True(t, os.IsNotExist(statErr)) + } + }) + } +} + +func TestFileManagerService_DownloadFileContent_MaxBytesLimit(t *testing.T) { + ctx := context.Background() + fms := NewFileManagerService(nil, types.AgentConfig(), &sync.RWMutex{}) + + // test server returns 10 bytes, we set MaxBytes to 4 and expect only 4 bytes returned + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("ETag", "etag-1") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("0123456789")) + })) + defer ts.Close() + + u, err := url.Parse(ts.URL) + require.NoError(t, err) + + fms.agentConfig.ExternalDataSource = &config.ExternalDataSource{ + AllowedDomains: []string{u.Hostname()}, + MaxBytes: 4, + } + + fileName := filepath.Join(t.TempDir(), "external.conf") + file := &mpi.File{ + FileMeta: &mpi.FileMeta{Name: fileName}, + ExternalDataSource: &mpi.ExternalDataSource{Location: ts.URL}, + } + + content, headers, err := fms.downloadFileContent(ctx, file) + require.NoError(t, err) + assert.Len(t, content, 4) + assert.Equal(t, "etag-1", headers.ETag) +} + +func TestFileManagerService_TestDownloadFileContent_InvalidProxyURL(t *testing.T) { + ctx := context.Background() + fms := NewFileManagerService(nil, types.AgentConfig(), &sync.RWMutex{}) + + downURL := "http://example.com/file" + fms.agentConfig.ExternalDataSource = &config.ExternalDataSource{ + AllowedDomains: []string{"example.com"}, + ProxyURL: config.ProxyURL{URL: "http://:"}, + } + + file := &mpi.File{ + FileMeta: &mpi.FileMeta{Name: "/tmp/file"}, + ExternalDataSource: &mpi.ExternalDataSource{Location: downURL}, + } + + _, _, err := fms.downloadFileContent(ctx, file) + require.Error(t, err) + if !strings.Contains(err.Error(), "invalid proxy URL configured") && + !strings.Contains(err.Error(), "failed to execute download request") && + !strings.Contains(err.Error(), "proxyconnect") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestFileManagerService_IsDomainAllowed(t *testing.T) { + type testCase struct { + name string + url string + allowedDomains []string + expected bool + } + + tests := []testCase{ + { + name: "Invalid URL (Percent)", + url: "http://%", + allowedDomains: []string{"example.com"}, + expected: false, + }, + { + name: "Invalid URL (Empty Host)", + url: "http://", + allowedDomains: []string{"example.com"}, + expected: false, + }, + { + name: "Empty Allowed List", + url: "http://example.com/path", + allowedDomains: []string{""}, + expected: false, + }, + { + name: "Basic Match", + url: "http://example.com/path", + allowedDomains: []string{"example.com"}, + expected: true, + }, + { + name: "Wildcard Subdomain Match", + url: "http://sub.example.com/path", + allowedDomains: []string{"*.example.com"}, + expected: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := isDomainAllowed(tc.url, tc.allowedDomains) + assert.Equal(t, tc.expected, actual, "for URL: %s and domains: %v", tc.url, tc.allowedDomains) + }) + } +} + +func TestFileManagerService_IsMatchesWildcardDomain(t *testing.T) { + type testCase struct { + name string + hostname string + pattern string + expected bool + } + + tests := []testCase{ + { + name: "True Match - Subdomain", + hostname: "sub.example.com", + pattern: "*.example.com", + expected: true, + }, + { + name: "True Match - Exact Base Domain", + hostname: "example.com", + pattern: "*.example.com", + expected: true, + }, + { + name: "False Match - Bad Domain Suffix", + hostname: "badexample.com", + pattern: "*.example.com", + expected: false, + }, + { + name: "False Match - No Wildcard Prefix", + hostname: "test.com", + pattern: "google.com", + expected: false, + }, + { + name: "False Match - Different Suffix", + hostname: "sub.anotherexample.com", + pattern: "*.example.com", + expected: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := isMatchesWildcardDomain(tc.hostname, tc.pattern) + assert.Equal(t, tc.expected, actual, "Hostname: %s, Pattern: %s", tc.hostname, tc.pattern) + }) + } +} diff --git a/internal/file/file_service_operator.go b/internal/file/file_service_operator.go index 19211c600..35e4731da 100644 --- a/internal/file/file_service_operator.go +++ b/internal/file/file_service_operator.go @@ -79,7 +79,10 @@ func (fso *FileServiceOperator) File( defer backoffCancel() getFile := func() (*mpi.GetFileResponse, error) { - return fso.fileServiceClient.GetFile(ctx, &mpi.GetFileRequest{ + grpcCtx, cancel := context.WithTimeout(ctx, fso.agentConfig.Client.FileDownloadTimeout) + defer cancel() + + return fso.fileServiceClient.GetFile(grpcCtx, &mpi.GetFileRequest{ MessageMeta: &mpi.MessageMeta{ MessageId: id.GenerateMessageID(), CorrelationId: logger.CorrelationID(ctx), @@ -107,7 +110,7 @@ func (fso *FileServiceOperator) File( return writeErr } - return fso.validateFileHash(tempFilePath, expectedHash) + return fso.ValidateFileHash(ctx, tempFilePath, expectedHash) } func (fso *FileServiceOperator) UpdateOverview( @@ -225,7 +228,10 @@ func (fso *FileServiceOperator) ChunkedFile( ) error { slog.DebugContext(ctx, "Getting chunked file", "file", file.GetFileMeta().GetName()) - stream, err := fso.fileServiceClient.GetFileStream(ctx, &mpi.GetFileRequest{ + grpcCtx, cancel := context.WithTimeout(ctx, fso.agentConfig.Client.FileDownloadTimeout) + defer cancel() + + stream, err := fso.fileServiceClient.GetFileStream(grpcCtx, &mpi.GetFileRequest{ MessageMeta: &mpi.MessageMeta{ MessageId: id.GenerateMessageID(), CorrelationId: logger.CorrelationID(ctx), @@ -254,7 +260,7 @@ func (fso *FileServiceOperator) ChunkedFile( return writeChunkedFileError } - return fso.validateFileHash(tempFilePath, expectedHash) + return fso.ValidateFileHash(ctx, tempFilePath, expectedHash) } func (fso *FileServiceOperator) UpdateFile( @@ -278,7 +284,7 @@ func (fso *FileServiceOperator) UpdateFile( // renameFile, renames (moves) file from tempDir to new location to update file. func (fso *FileServiceOperator) RenameFile( - ctx context.Context, hash, source, desination string, + ctx context.Context, source, desination string, ) error { slog.DebugContext(ctx, fmt.Sprintf("Renaming file %s to %s", source, desination)) @@ -292,10 +298,11 @@ func (fso *FileServiceOperator) RenameFile( return fmt.Errorf("failed to rename file: %w", moveErr) } - return fso.validateFileHash(desination, hash) + return nil } -func (fso *FileServiceOperator) validateFileHash(filePath, expectedHash string) error { +func (fso *FileServiceOperator) ValidateFileHash(ctx context.Context, filePath, expectedHash string) error { + slog.DebugContext(ctx, "Validating file hash for file ", "file_path", filePath) content, err := os.ReadFile(filePath) if err != nil { return err @@ -371,12 +378,15 @@ func (fso *FileServiceOperator) sendUpdateFileRequest( return nil, errors.New("CreateConnection rpc has not being called yet") } - response, updateError := fso.fileServiceClient.UpdateFile(ctx, request) + grpcCtx, cancel := context.WithTimeout(ctx, fso.agentConfig.Client.FileDownloadTimeout) + defer cancel() + + response, updateError := fso.fileServiceClient.UpdateFile(grpcCtx, request) validatedError := internalgrpc.ValidateGrpcError(updateError) if validatedError != nil { - slog.ErrorContext(ctx, "Failed to send update file", "error", validatedError) + slog.ErrorContext(grpcCtx, "Failed to send update file", "error", validatedError) return nil, validatedError } @@ -406,7 +416,10 @@ func (fso *FileServiceOperator) sendUpdateFileStream( return errors.New("file chunk size must be greater than zero") } - updateFileStreamClient, err := fso.fileServiceClient.UpdateFileStream(ctx) + grpcCtx, cancel := context.WithTimeout(ctx, fso.agentConfig.Client.FileDownloadTimeout) + defer cancel() + + updateFileStreamClient, err := fso.fileServiceClient.UpdateFileStream(grpcCtx) if err != nil { return err } diff --git a/internal/file/filefakes/fake_file_service_operator_interface.go b/internal/file/filefakes/fake_file_service_operator_interface.go new file mode 100644 index 000000000..dc559e41d --- /dev/null +++ b/internal/file/filefakes/fake_file_service_operator_interface.go @@ -0,0 +1,662 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package filefakes + +import ( + "context" + "sync" + + v1 "github.com/nginx/agent/v3/api/grpc/mpi/v1" +) + +type FakeFileServiceOperatorInterface struct { + ChunkedFileStub func(context.Context, *v1.File, string, string) error + chunkedFileMutex sync.RWMutex + chunkedFileArgsForCall []struct { + arg1 context.Context + arg2 *v1.File + arg3 string + arg4 string + } + chunkedFileReturns struct { + result1 error + } + chunkedFileReturnsOnCall map[int]struct { + result1 error + } + FileStub func(context.Context, *v1.File, string, string) error + fileMutex sync.RWMutex + fileArgsForCall []struct { + arg1 context.Context + arg2 *v1.File + arg3 string + arg4 string + } + fileReturns struct { + result1 error + } + fileReturnsOnCall map[int]struct { + result1 error + } + IsConnectedStub func() bool + isConnectedMutex sync.RWMutex + isConnectedArgsForCall []struct { + } + isConnectedReturns struct { + result1 bool + } + isConnectedReturnsOnCall map[int]struct { + result1 bool + } + RenameFileStub func(context.Context, string, string) error + renameFileMutex sync.RWMutex + renameFileArgsForCall []struct { + arg1 context.Context + arg2 string + arg3 string + } + renameFileReturns struct { + result1 error + } + renameFileReturnsOnCall map[int]struct { + result1 error + } + SetIsConnectedStub func(bool) + setIsConnectedMutex sync.RWMutex + setIsConnectedArgsForCall []struct { + arg1 bool + } + UpdateClientStub func(context.Context, v1.FileServiceClient) + updateClientMutex sync.RWMutex + updateClientArgsForCall []struct { + arg1 context.Context + arg2 v1.FileServiceClient + } + UpdateFileStub func(context.Context, string, *v1.File) error + updateFileMutex sync.RWMutex + updateFileArgsForCall []struct { + arg1 context.Context + arg2 string + arg3 *v1.File + } + updateFileReturns struct { + result1 error + } + updateFileReturnsOnCall map[int]struct { + result1 error + } + UpdateOverviewStub func(context.Context, string, []*v1.File, string, int) error + updateOverviewMutex sync.RWMutex + updateOverviewArgsForCall []struct { + arg1 context.Context + arg2 string + arg3 []*v1.File + arg4 string + arg5 int + } + updateOverviewReturns struct { + result1 error + } + updateOverviewReturnsOnCall map[int]struct { + result1 error + } + ValidateFileHashStub func(context.Context, string, string) error + validateFileHashMutex sync.RWMutex + validateFileHashArgsForCall []struct { + arg1 context.Context + arg2 string + arg3 string + } + validateFileHashReturns struct { + result1 error + } + validateFileHashReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeFileServiceOperatorInterface) ChunkedFile(arg1 context.Context, arg2 *v1.File, arg3 string, arg4 string) error { + fake.chunkedFileMutex.Lock() + ret, specificReturn := fake.chunkedFileReturnsOnCall[len(fake.chunkedFileArgsForCall)] + fake.chunkedFileArgsForCall = append(fake.chunkedFileArgsForCall, struct { + arg1 context.Context + arg2 *v1.File + arg3 string + arg4 string + }{arg1, arg2, arg3, arg4}) + stub := fake.ChunkedFileStub + fakeReturns := fake.chunkedFileReturns + fake.recordInvocation("ChunkedFile", []interface{}{arg1, arg2, arg3, arg4}) + fake.chunkedFileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) ChunkedFileCallCount() int { + fake.chunkedFileMutex.RLock() + defer fake.chunkedFileMutex.RUnlock() + return len(fake.chunkedFileArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) ChunkedFileCalls(stub func(context.Context, *v1.File, string, string) error) { + fake.chunkedFileMutex.Lock() + defer fake.chunkedFileMutex.Unlock() + fake.ChunkedFileStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) ChunkedFileArgsForCall(i int) (context.Context, *v1.File, string, string) { + fake.chunkedFileMutex.RLock() + defer fake.chunkedFileMutex.RUnlock() + argsForCall := fake.chunkedFileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeFileServiceOperatorInterface) ChunkedFileReturns(result1 error) { + fake.chunkedFileMutex.Lock() + defer fake.chunkedFileMutex.Unlock() + fake.ChunkedFileStub = nil + fake.chunkedFileReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) ChunkedFileReturnsOnCall(i int, result1 error) { + fake.chunkedFileMutex.Lock() + defer fake.chunkedFileMutex.Unlock() + fake.ChunkedFileStub = nil + if fake.chunkedFileReturnsOnCall == nil { + fake.chunkedFileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.chunkedFileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) File(arg1 context.Context, arg2 *v1.File, arg3 string, arg4 string) error { + fake.fileMutex.Lock() + ret, specificReturn := fake.fileReturnsOnCall[len(fake.fileArgsForCall)] + fake.fileArgsForCall = append(fake.fileArgsForCall, struct { + arg1 context.Context + arg2 *v1.File + arg3 string + arg4 string + }{arg1, arg2, arg3, arg4}) + stub := fake.FileStub + fakeReturns := fake.fileReturns + fake.recordInvocation("File", []interface{}{arg1, arg2, arg3, arg4}) + fake.fileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) FileCallCount() int { + fake.fileMutex.RLock() + defer fake.fileMutex.RUnlock() + return len(fake.fileArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) FileCalls(stub func(context.Context, *v1.File, string, string) error) { + fake.fileMutex.Lock() + defer fake.fileMutex.Unlock() + fake.FileStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) FileArgsForCall(i int) (context.Context, *v1.File, string, string) { + fake.fileMutex.RLock() + defer fake.fileMutex.RUnlock() + argsForCall := fake.fileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeFileServiceOperatorInterface) FileReturns(result1 error) { + fake.fileMutex.Lock() + defer fake.fileMutex.Unlock() + fake.FileStub = nil + fake.fileReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) FileReturnsOnCall(i int, result1 error) { + fake.fileMutex.Lock() + defer fake.fileMutex.Unlock() + fake.FileStub = nil + if fake.fileReturnsOnCall == nil { + fake.fileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.fileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) IsConnected() bool { + fake.isConnectedMutex.Lock() + ret, specificReturn := fake.isConnectedReturnsOnCall[len(fake.isConnectedArgsForCall)] + fake.isConnectedArgsForCall = append(fake.isConnectedArgsForCall, struct { + }{}) + stub := fake.IsConnectedStub + fakeReturns := fake.isConnectedReturns + fake.recordInvocation("IsConnected", []interface{}{}) + fake.isConnectedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) IsConnectedCallCount() int { + fake.isConnectedMutex.RLock() + defer fake.isConnectedMutex.RUnlock() + return len(fake.isConnectedArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) IsConnectedCalls(stub func() bool) { + fake.isConnectedMutex.Lock() + defer fake.isConnectedMutex.Unlock() + fake.IsConnectedStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) IsConnectedReturns(result1 bool) { + fake.isConnectedMutex.Lock() + defer fake.isConnectedMutex.Unlock() + fake.IsConnectedStub = nil + fake.isConnectedReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) IsConnectedReturnsOnCall(i int, result1 bool) { + fake.isConnectedMutex.Lock() + defer fake.isConnectedMutex.Unlock() + fake.IsConnectedStub = nil + if fake.isConnectedReturnsOnCall == nil { + fake.isConnectedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isConnectedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) RenameFile(arg1 context.Context, arg2 string, arg3 string) error { + fake.renameFileMutex.Lock() + ret, specificReturn := fake.renameFileReturnsOnCall[len(fake.renameFileArgsForCall)] + fake.renameFileArgsForCall = append(fake.renameFileArgsForCall, struct { + arg1 context.Context + arg2 string + arg3 string + }{arg1, arg2, arg3}) + stub := fake.RenameFileStub + fakeReturns := fake.renameFileReturns + fake.recordInvocation("RenameFile", []interface{}{arg1, arg2, arg3}) + fake.renameFileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) RenameFileCallCount() int { + fake.renameFileMutex.RLock() + defer fake.renameFileMutex.RUnlock() + return len(fake.renameFileArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) RenameFileCalls(stub func(context.Context, string, string) error) { + fake.renameFileMutex.Lock() + defer fake.renameFileMutex.Unlock() + fake.RenameFileStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) RenameFileArgsForCall(i int) (context.Context, string, string) { + fake.renameFileMutex.RLock() + defer fake.renameFileMutex.RUnlock() + argsForCall := fake.renameFileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeFileServiceOperatorInterface) RenameFileReturns(result1 error) { + fake.renameFileMutex.Lock() + defer fake.renameFileMutex.Unlock() + fake.RenameFileStub = nil + fake.renameFileReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) RenameFileReturnsOnCall(i int, result1 error) { + fake.renameFileMutex.Lock() + defer fake.renameFileMutex.Unlock() + fake.RenameFileStub = nil + if fake.renameFileReturnsOnCall == nil { + fake.renameFileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.renameFileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) SetIsConnected(arg1 bool) { + fake.setIsConnectedMutex.Lock() + fake.setIsConnectedArgsForCall = append(fake.setIsConnectedArgsForCall, struct { + arg1 bool + }{arg1}) + stub := fake.SetIsConnectedStub + fake.recordInvocation("SetIsConnected", []interface{}{arg1}) + fake.setIsConnectedMutex.Unlock() + if stub != nil { + fake.SetIsConnectedStub(arg1) + } +} + +func (fake *FakeFileServiceOperatorInterface) SetIsConnectedCallCount() int { + fake.setIsConnectedMutex.RLock() + defer fake.setIsConnectedMutex.RUnlock() + return len(fake.setIsConnectedArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) SetIsConnectedCalls(stub func(bool)) { + fake.setIsConnectedMutex.Lock() + defer fake.setIsConnectedMutex.Unlock() + fake.SetIsConnectedStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) SetIsConnectedArgsForCall(i int) bool { + fake.setIsConnectedMutex.RLock() + defer fake.setIsConnectedMutex.RUnlock() + argsForCall := fake.setIsConnectedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeFileServiceOperatorInterface) UpdateClient(arg1 context.Context, arg2 v1.FileServiceClient) { + fake.updateClientMutex.Lock() + fake.updateClientArgsForCall = append(fake.updateClientArgsForCall, struct { + arg1 context.Context + arg2 v1.FileServiceClient + }{arg1, arg2}) + stub := fake.UpdateClientStub + fake.recordInvocation("UpdateClient", []interface{}{arg1, arg2}) + fake.updateClientMutex.Unlock() + if stub != nil { + fake.UpdateClientStub(arg1, arg2) + } +} + +func (fake *FakeFileServiceOperatorInterface) UpdateClientCallCount() int { + fake.updateClientMutex.RLock() + defer fake.updateClientMutex.RUnlock() + return len(fake.updateClientArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) UpdateClientCalls(stub func(context.Context, v1.FileServiceClient)) { + fake.updateClientMutex.Lock() + defer fake.updateClientMutex.Unlock() + fake.UpdateClientStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) UpdateClientArgsForCall(i int) (context.Context, v1.FileServiceClient) { + fake.updateClientMutex.RLock() + defer fake.updateClientMutex.RUnlock() + argsForCall := fake.updateClientArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeFileServiceOperatorInterface) UpdateFile(arg1 context.Context, arg2 string, arg3 *v1.File) error { + fake.updateFileMutex.Lock() + ret, specificReturn := fake.updateFileReturnsOnCall[len(fake.updateFileArgsForCall)] + fake.updateFileArgsForCall = append(fake.updateFileArgsForCall, struct { + arg1 context.Context + arg2 string + arg3 *v1.File + }{arg1, arg2, arg3}) + stub := fake.UpdateFileStub + fakeReturns := fake.updateFileReturns + fake.recordInvocation("UpdateFile", []interface{}{arg1, arg2, arg3}) + fake.updateFileMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) UpdateFileCallCount() int { + fake.updateFileMutex.RLock() + defer fake.updateFileMutex.RUnlock() + return len(fake.updateFileArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) UpdateFileCalls(stub func(context.Context, string, *v1.File) error) { + fake.updateFileMutex.Lock() + defer fake.updateFileMutex.Unlock() + fake.UpdateFileStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) UpdateFileArgsForCall(i int) (context.Context, string, *v1.File) { + fake.updateFileMutex.RLock() + defer fake.updateFileMutex.RUnlock() + argsForCall := fake.updateFileArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeFileServiceOperatorInterface) UpdateFileReturns(result1 error) { + fake.updateFileMutex.Lock() + defer fake.updateFileMutex.Unlock() + fake.UpdateFileStub = nil + fake.updateFileReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) UpdateFileReturnsOnCall(i int, result1 error) { + fake.updateFileMutex.Lock() + defer fake.updateFileMutex.Unlock() + fake.UpdateFileStub = nil + if fake.updateFileReturnsOnCall == nil { + fake.updateFileReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateFileReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) UpdateOverview(arg1 context.Context, arg2 string, arg3 []*v1.File, arg4 string, arg5 int) error { + var arg3Copy []*v1.File + if arg3 != nil { + arg3Copy = make([]*v1.File, len(arg3)) + copy(arg3Copy, arg3) + } + fake.updateOverviewMutex.Lock() + ret, specificReturn := fake.updateOverviewReturnsOnCall[len(fake.updateOverviewArgsForCall)] + fake.updateOverviewArgsForCall = append(fake.updateOverviewArgsForCall, struct { + arg1 context.Context + arg2 string + arg3 []*v1.File + arg4 string + arg5 int + }{arg1, arg2, arg3Copy, arg4, arg5}) + stub := fake.UpdateOverviewStub + fakeReturns := fake.updateOverviewReturns + fake.recordInvocation("UpdateOverview", []interface{}{arg1, arg2, arg3Copy, arg4, arg5}) + fake.updateOverviewMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4, arg5) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) UpdateOverviewCallCount() int { + fake.updateOverviewMutex.RLock() + defer fake.updateOverviewMutex.RUnlock() + return len(fake.updateOverviewArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) UpdateOverviewCalls(stub func(context.Context, string, []*v1.File, string, int) error) { + fake.updateOverviewMutex.Lock() + defer fake.updateOverviewMutex.Unlock() + fake.UpdateOverviewStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) UpdateOverviewArgsForCall(i int) (context.Context, string, []*v1.File, string, int) { + fake.updateOverviewMutex.RLock() + defer fake.updateOverviewMutex.RUnlock() + argsForCall := fake.updateOverviewArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 +} + +func (fake *FakeFileServiceOperatorInterface) UpdateOverviewReturns(result1 error) { + fake.updateOverviewMutex.Lock() + defer fake.updateOverviewMutex.Unlock() + fake.UpdateOverviewStub = nil + fake.updateOverviewReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) UpdateOverviewReturnsOnCall(i int, result1 error) { + fake.updateOverviewMutex.Lock() + defer fake.updateOverviewMutex.Unlock() + fake.UpdateOverviewStub = nil + if fake.updateOverviewReturnsOnCall == nil { + fake.updateOverviewReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateOverviewReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) ValidateFileHash(arg1 context.Context, arg2 string, arg3 string) error { + fake.validateFileHashMutex.Lock() + ret, specificReturn := fake.validateFileHashReturnsOnCall[len(fake.validateFileHashArgsForCall)] + fake.validateFileHashArgsForCall = append(fake.validateFileHashArgsForCall, struct { + arg1 context.Context + arg2 string + arg3 string + }{arg1, arg2, arg3}) + stub := fake.ValidateFileHashStub + fakeReturns := fake.validateFileHashReturns + fake.recordInvocation("ValidateFileHash", []interface{}{arg1, arg2, arg3}) + fake.validateFileHashMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeFileServiceOperatorInterface) ValidateFileHashCallCount() int { + fake.validateFileHashMutex.RLock() + defer fake.validateFileHashMutex.RUnlock() + return len(fake.validateFileHashArgsForCall) +} + +func (fake *FakeFileServiceOperatorInterface) ValidateFileHashCalls(stub func(context.Context, string, string) error) { + fake.validateFileHashMutex.Lock() + defer fake.validateFileHashMutex.Unlock() + fake.ValidateFileHashStub = stub +} + +func (fake *FakeFileServiceOperatorInterface) ValidateFileHashArgsForCall(i int) (context.Context, string, string) { + fake.validateFileHashMutex.RLock() + defer fake.validateFileHashMutex.RUnlock() + argsForCall := fake.validateFileHashArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeFileServiceOperatorInterface) ValidateFileHashReturns(result1 error) { + fake.validateFileHashMutex.Lock() + defer fake.validateFileHashMutex.Unlock() + fake.ValidateFileHashStub = nil + fake.validateFileHashReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) ValidateFileHashReturnsOnCall(i int, result1 error) { + fake.validateFileHashMutex.Lock() + defer fake.validateFileHashMutex.Unlock() + fake.ValidateFileHashStub = nil + if fake.validateFileHashReturnsOnCall == nil { + fake.validateFileHashReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.validateFileHashReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeFileServiceOperatorInterface) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.chunkedFileMutex.RLock() + defer fake.chunkedFileMutex.RUnlock() + fake.fileMutex.RLock() + defer fake.fileMutex.RUnlock() + fake.isConnectedMutex.RLock() + defer fake.isConnectedMutex.RUnlock() + fake.renameFileMutex.RLock() + defer fake.renameFileMutex.RUnlock() + fake.setIsConnectedMutex.RLock() + defer fake.setIsConnectedMutex.RUnlock() + fake.updateClientMutex.RLock() + defer fake.updateClientMutex.RUnlock() + fake.updateFileMutex.RLock() + defer fake.updateFileMutex.RUnlock() + fake.updateOverviewMutex.RLock() + defer fake.updateOverviewMutex.RUnlock() + fake.validateFileHashMutex.RLock() + defer fake.validateFileHashMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeFileServiceOperatorInterface) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} diff --git a/internal/model/config.go b/internal/model/config.go index d13c299fd..324a5ffd6 100644 --- a/internal/model/config.go +++ b/internal/model/config.go @@ -42,6 +42,10 @@ type ManifestFileMeta struct { Name string `json:"name"` // The hash of the file contents sha256, hex encoded Hash string `json:"hash"` + // ETag of the 3rd Party external file + ETag string `json:"etag"` + // Last modified time of the 3rd Party external file + LastModified string `json:"last_modified"` // The size of the file in bytes Size int64 `json:"size"` // File referenced in the NGINX config diff --git a/internal/model/file.go b/internal/model/file.go index fc6c5baca..671a4ff85 100644 --- a/internal/model/file.go +++ b/internal/model/file.go @@ -19,4 +19,5 @@ const ( Update Delete Unchanged + ExternalFile ) diff --git a/test/mock/grpc/mock_management_command_service.go b/test/mock/grpc/mock_management_command_service.go index f68c4c7cd..b1badb443 100644 --- a/test/mock/grpc/mock_management_command_service.go +++ b/test/mock/grpc/mock_management_command_service.go @@ -577,7 +577,8 @@ func processConfigApplyRequestBody(c *gin.Context, initialFiles []*mpi.File) ([] } else { newFile := &mpi.File{ FileMeta: &mpi.FileMeta{ - Name: ed.FilePath, + Name: ed.FilePath, + Permissions: "0644", }, ExternalDataSource: &mpi.ExternalDataSource{ Location: ed.Location,