-
}
- endIcon={
- getAppliedFiltersCount(searchParams) > 0 ? (
-
- ) : null
- }
- size="sm"
- onClick={() => {
- setFiltersExpanded((prev) => !prev);
- }}
- >
- Filter
-
+
+ }
+ onClick={() => {
+ const formData = new FormData();
+ formData.append('actionType', ActionEnumType.DOWNLOAD);
+ fetcher.submit(formData, {
+ method: 'post',
+ });
+ }}
+ >
+ Download
+
+ }
+ endIcon={
+ getAppliedFiltersCount(searchParams) > 0 ? (
+
+ ) : null
+ }
+ size="sm"
+ onClick={() => {
+ setFiltersExpanded((prev) => !prev);
+ }}
+ >
+ Filter
+
+
{filtersExpanded ?
: null}
}>
@@ -467,4 +598,5 @@ const MostExploitableVulnerabilities = () => {
export const module = {
element:
,
+ action,
};
diff --git a/deepfence_server/apiDocs/operation.go b/deepfence_server/apiDocs/operation.go
index 7350d1c78d..744567dbcb 100644
--- a/deepfence_server/apiDocs/operation.go
+++ b/deepfence_server/apiDocs/operation.go
@@ -758,6 +758,9 @@ func (d *OpenApiDocs) AddSettingsOperations() {
d.AddOperation("addScheduledTask", http.MethodPost, "/deepfence/scheduled-task",
"Add scheduled task", "Add scheduled task",
http.StatusNoContent, []string{tagSettings}, bearerToken, new(AddScheduledTaskRequest), nil)
+ d.AddOperation("deleteCustomScheduledTask", http.MethodDelete, "/deepfence/scheduled-task/{id}",
+ "Delete Custom Schedule task", "Delete Custom Schedule task",
+ http.StatusNoContent, []string{tagSettings}, bearerToken, new(ScheduleJobId), nil)
// Database upload
d.AddOperation("uploadVulnerabilityDatabase", http.MethodPut, "/deepfence/database/vulnerability",
diff --git a/deepfence_server/controls/agent.go b/deepfence_server/controls/agent.go
index a0182ab91c..bf0c4ea279 100644
--- a/deepfence_server/controls/agent.go
+++ b/deepfence_server/controls/agent.go
@@ -81,10 +81,7 @@ func GetPendingAgentScans(ctx context.Context, nodeId string, availableWorkload
return res, err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return res, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -99,7 +96,9 @@ func GetPendingAgentScans(ctx context.Context, nodeId string, availableWorkload
AND s.retries < 3
SET s.retries = s.retries + 1
WITH s
- RETURN s.trigger_action`, map[string]interface{}{"id": nodeId})
+ RETURN s.trigger_action
+ ORDER BY s.is_priority DESC, s.updated_at ASC`,
+ map[string]interface{}{"id": nodeId})
if err != nil {
return res, err
@@ -134,10 +133,8 @@ func GetPendingAgentScans(ctx context.Context, nodeId string, availableWorkload
}
func hasAgentDiagnosticLogRequests(client neo4j.Driver, nodeId string, nodeType controls.ScanResource, max_work int) (bool, error) {
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return false, err
- }
+
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -178,10 +175,7 @@ func ExtractAgentDiagnosticLogRequests(ctx context.Context, nodeId string, nodeT
return res, err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return res, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -230,10 +224,7 @@ func ExtractAgentDiagnosticLogRequests(ctx context.Context, nodeId string, nodeT
}
func hasPendingAgentScans(client neo4j.Driver, nodeId string, max_work int) (bool, error) {
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return false, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -274,10 +265,7 @@ func ExtractStartingAgentScans(ctx context.Context, nodeId string,
return res, err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return res, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -289,7 +277,7 @@ func ExtractStartingAgentScans(ctx context.Context, nodeId string,
r, err := tx.Run(`MATCH (s) -[:SCHEDULED]-> (n:Node{node_id:$id})
WHERE s.status = '`+utils.SCAN_STATUS_STARTING+`'
AND s.retries < 3
- WITH s LIMIT $max_work
+ WITH s ORDER BY s.is_priority DESC, s.updated_at ASC LIMIT $max_work
SET s.status = '`+utils.SCAN_STATUS_INPROGRESS+`', s.updated_at = TIMESTAMP()
WITH s
RETURN s.trigger_action`,
@@ -340,10 +328,7 @@ func ExtractStoppingAgentScans(ctx context.Context, nodeId string,
return res, err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return res, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -406,10 +391,7 @@ func ExtractStoppingAgentScans(ctx context.Context, nodeId string,
}
func hasPendingAgentUpgrade(client neo4j.Driver, nodeId string, max_work int) (bool, error) {
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return false, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -448,10 +430,7 @@ func ExtractPendingAgentUpgrade(ctx context.Context, nodeId string, max_work int
return res, err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return res, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -512,10 +491,7 @@ func CheckNodeExist(ctx context.Context, nodeId string) error {
return err
}
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
diff --git a/deepfence_server/controls/kubernetes_cluster.go b/deepfence_server/controls/kubernetes_cluster.go
index a72a5e64cf..73f137c704 100644
--- a/deepfence_server/controls/kubernetes_cluster.go
+++ b/deepfence_server/controls/kubernetes_cluster.go
@@ -113,10 +113,7 @@ func ExtractPendingKubernetesClusterUpgrade(ctx context.Context, nodeId string,
return res, err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return res, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
diff --git a/deepfence_server/controls/version.go b/deepfence_server/controls/version.go
index d6ff9bbbe0..8a3a9312f9 100644
--- a/deepfence_server/controls/version.go
+++ b/deepfence_server/controls/version.go
@@ -25,10 +25,7 @@ func ScheduleAgentUpgrade(ctx context.Context, version string, nodeIds []string,
return err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -69,10 +66,7 @@ func GetAgentVersionTarball(ctx context.Context, version string) (string, error)
return "", err
}
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return "", err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -83,7 +77,7 @@ func GetAgentVersionTarball(ctx context.Context, version string) (string, error)
res, err := tx.Run(`
MATCH (v:AgentVersion{node_id: $version})
- return v.url`,
+ RETURN v.url`,
map[string]interface{}{
"version": version,
})
@@ -108,10 +102,7 @@ func GetAgentPluginVersionTarball(ctx context.Context, version, plugin_name stri
return "", err
}
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return "", err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -148,10 +139,7 @@ func hasPendingUpgradeOrNew(ctx context.Context, version string, nodeId string)
return false, err
}
- session, err := client.Session(neo4j.AccessModeRead)
- if err != nil {
- return false, err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -199,10 +187,7 @@ func CompleteAgentUpgrade(ctx context.Context, version string, nodeId string) er
return err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -248,10 +233,7 @@ func ScheduleAgentPluginEnable(ctx context.Context, version, plugin_name string,
return err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -299,10 +281,7 @@ func ScheduleAgentPluginDisable(ctx context.Context, plugin_name string, nodeIds
return err
}
- session, err := client.Session(neo4j.AccessModeWrite)
- if err != nil {
- return err
- }
+ session := client.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
diff --git a/deepfence_server/diagnosis/agent-diagnosis/agent_diagnosis.go b/deepfence_server/diagnosis/agent-diagnosis/agent_diagnosis.go
index 2df8e41ed5..51ab0fbd2b 100644
--- a/deepfence_server/diagnosis/agent-diagnosis/agent_diagnosis.go
+++ b/deepfence_server/diagnosis/agent-diagnosis/agent_diagnosis.go
@@ -3,7 +3,6 @@ package agent_diagnosis
import (
"context"
"encoding/json"
- "errors"
"fmt"
"net/url"
"path/filepath"
@@ -18,19 +17,6 @@ import (
"github.com/neo4j/neo4j-go-driver/v4/neo4j"
)
-func missing(a, b []string) string {
- ma := make(map[string]bool, len(a))
- for _, ka := range a {
- ma[ka] = true
- }
- for _, kb := range b {
- if !ma[kb] {
- return kb
- }
- }
- return ""
-}
-
func verifyNodeIds(ctx context.Context, nodeIdentifiers []diagnosis.NodeIdentifier) (map[string]struct{}, error) {
inProgressNodeIds := map[string]struct{}{}
driver, err := directory.Neo4jClient(ctx)
@@ -82,7 +68,7 @@ func verifyNodeIds(ctx context.Context, nodeIdentifiers []diagnosis.NodeIdentifi
}
}
if len(missingNodes) > 0 {
- return inProgressNodeIds, errors.New(fmt.Sprintf("could not find nodes %v", missingNodes))
+ return inProgressNodeIds, fmt.Errorf("could not find nodes %v", missingNodes)
}
return inProgressNodeIds, nil
}
@@ -98,6 +84,9 @@ func UpdateAgentDiagnosticLogsStatus(ctx context.Context, status diagnosis.Diagn
}
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
+ if err != nil {
+ return err
+ }
defer tx.Close()
_, err = tx.Run(`
@@ -148,6 +137,9 @@ func GenerateAgentDiagnosticLogs(ctx context.Context, nodeIdentifiers []diagnosi
}
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
+ if err != nil {
+ return err
+ }
defer tx.Close()
fileNameSuffix := "-" + time.Now().Format("2006-01-02-15-04-05") + ".zip"
diff --git a/deepfence_server/diagnosis/cloudscanner-diagnosis/cloudscanner_diagnosis.go b/deepfence_server/diagnosis/cloudscanner-diagnosis/cloudscanner_diagnosis.go
index efafd90fc8..b9a6f4c028 100644
--- a/deepfence_server/diagnosis/cloudscanner-diagnosis/cloudscanner_diagnosis.go
+++ b/deepfence_server/diagnosis/cloudscanner-diagnosis/cloudscanner_diagnosis.go
@@ -3,7 +3,6 @@ package cloudscanner_diagnosis
import (
"context"
"encoding/json"
- "errors"
"fmt"
"net/url"
"time"
@@ -25,10 +24,6 @@ func getInProgressCloudScannerNodeIds(ctx context.Context, nodeIdentifiers []dia
}
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
- if err != nil {
- return inProgressNodeIds, err
- }
-
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -77,7 +72,7 @@ func getInProgressCloudScannerNodeIds(ctx context.Context, nodeIdentifiers []dia
}
if len(missingNodes) > 0 {
- return inProgressNodeIds, errors.New(fmt.Sprintf("could not find nodes %v", missingNodes))
+ return inProgressNodeIds, fmt.Errorf("could not find nodes %v", missingNodes)
}
return inProgressNodeIds, nil
@@ -89,11 +84,12 @@ func UpdateCloudScannerDiagnosticLogsStatus(ctx context.Context, status diagnosi
return err
}
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
+ defer session.Close()
+
+ tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
if err != nil {
return err
}
- defer session.Close()
- tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
defer tx.Close()
_, err = tx.Run(`
@@ -139,11 +135,11 @@ func GenerateCloudScannerDiagnosticLogs(ctx context.Context, nodeIdentifiers []d
return err
}
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
+ defer session.Close()
+ tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
if err != nil {
return err
}
- defer session.Close()
- tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
defer tx.Close()
fileNameSuffix := "-" + time.Now().Format("2006-01-02-15-04-05") + ".zip"
@@ -189,12 +185,12 @@ func GetQueuedCloudScannerDiagnosticLogs(ctx context.Context, nodeIDs []string)
return ctl.Action{}, err
}
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
- if err != nil {
- return ctl.Action{}, err
- }
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
+ if err != nil {
+ return ctl.Action{}, err
+ }
defer tx.Close()
res, err := tx.Run(`MATCH (n:CloudScannerDiagnosticLogs)
diff --git a/deepfence_server/diagnosis/common.go b/deepfence_server/diagnosis/common.go
index 6789b5bf46..eb06ddc9b9 100644
--- a/deepfence_server/diagnosis/common.go
+++ b/deepfence_server/diagnosis/common.go
@@ -20,7 +20,6 @@ const (
ConsoleDiagnosisFileServerPrefix = "diagnosis/console-diagnosis/"
AgentDiagnosisFileServerPrefix = "diagnosis/agent-diagnosis/"
CloudScannerDiagnosticLogsPrefix = "diagnosis/cloud-scanner-diagnosis/"
-
)
type DiagnosticNotification struct {
@@ -75,12 +74,16 @@ func GetDiagnosticLogs(ctx context.Context) (*GetDiagnosticLogsResponse, error)
if err != nil {
return nil, err
}
+ diagLogs, err := getCloudScannerDiagnosticLogs(ctx, mc, CloudScannerDiagnosticLogsPrefix)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
diagnosticLogs := GetDiagnosticLogsResponse{
ConsoleLogs: getDiagnosticLogsHelper(ctx, mc, ConsoleDiagnosisFileServerPrefix),
AgentLogs: getAgentDiagnosticLogs(ctx, mc, AgentDiagnosisFileServerPrefix),
- CloudScannerLogs: getCloudScannerDiagnosticLogs(ctx, mc, CloudScannerDiagnosticLogsPrefix),
+ CloudScannerLogs: diagLogs,
}
- return &diagnosticLogs, err
+ return &diagnosticLogs, nil
}
func getDiagnosticLogsHelper(ctx context.Context, mc directory.FileManager, pathPrefix string) []DiagnosticLogsLink {
@@ -129,9 +132,6 @@ func getAgentDiagnosticLogs(ctx context.Context, mc directory.FileManager, pathP
return diagnosticLogs
}
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
- if err != nil {
- return diagnosticLogs
- }
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
if err != nil {
@@ -199,7 +199,7 @@ func getAgentDiagnosticLogs(ctx context.Context, mc directory.FileManager, pathP
return diagnosticLogs
}
-func getCloudScannerDiagnosticLogs(ctx context.Context, mc directory.FileManager, pathPrefix string) []DiagnosticLogsLink {
+func getCloudScannerDiagnosticLogs(ctx context.Context, mc directory.FileManager, pathPrefix string) ([]DiagnosticLogsLink, error) {
diagnosticLogs := getDiagnosticLogsHelper(ctx, mc, CloudScannerDiagnosticLogsPrefix)
minioAgentLogsKeys := make(map[string]int)
for i, log := range diagnosticLogs {
@@ -209,25 +209,29 @@ func getCloudScannerDiagnosticLogs(ctx context.Context, mc directory.FileManager
// Get in progress ones from neo4j
driver, err := directory.Neo4jClient(ctx)
if err != nil {
- return diagnosticLogs
+ return diagnosticLogs, err
}
session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
- if err != nil {
- return diagnosticLogs
- }
defer session.Close()
+
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
+ if err != nil {
+ return diagnosticLogs, err
+ }
defer tx.Close()
r, err := tx.Run(`
MATCH (n:CloudScannerDiagnosticLogs)-[:SCHEDULEDLOGS]->(m)
RETURN n.node_id, n.minio_file_name, n.message, n.status, n.updated_at, m.node_name`, map[string]interface{}{})
if err != nil {
- return diagnosticLogs
+ return diagnosticLogs, err
}
nodeIdToName := make(map[string]string)
records, err := r.Collect()
+ if err != nil {
+ return diagnosticLogs, err
+ }
for _, rec := range records {
var nodeId, fileName, message, status, updatedAt, nodeName interface{}
var ok bool
@@ -271,5 +275,5 @@ func getCloudScannerDiagnosticLogs(ctx context.Context, mc directory.FileManager
sort.Slice(diagnosticLogs, func(i, j int) bool {
return diagnosticLogs[i].CreatedAt > diagnosticLogs[j].CreatedAt
})
- return diagnosticLogs
+ return diagnosticLogs, nil
}
diff --git a/deepfence_server/diagnosis/console-diagnosis/docker.go b/deepfence_server/diagnosis/console-diagnosis/docker.go
index 71ae008669..c411658771 100644
--- a/deepfence_server/diagnosis/console-diagnosis/docker.go
+++ b/deepfence_server/diagnosis/console-diagnosis/docker.go
@@ -76,6 +76,7 @@ func (d *DockerConsoleDiagnosisHandler) GenerateDiagnosticLogs(ctx context.Conte
_, err = mc.UploadLocalFile(ctx,
filepath.Join(diagnosis.ConsoleDiagnosisFileServerPrefix, filepath.Base(zipFile.Name())),
zipFile.Name(),
+ true,
minio.PutObjectOptions{ContentType: "application/zip"})
if err != nil {
return err
diff --git a/deepfence_server/diagnosis/console-diagnosis/kubernetes.go b/deepfence_server/diagnosis/console-diagnosis/kubernetes.go
index 67f007bf86..da4641f217 100644
--- a/deepfence_server/diagnosis/console-diagnosis/kubernetes.go
+++ b/deepfence_server/diagnosis/console-diagnosis/kubernetes.go
@@ -125,6 +125,7 @@ func (k *KubernetesConsoleDiagnosisHandler) GenerateDiagnosticLogs(ctx context.C
_, err = mc.UploadLocalFile(ctx,
filepath.Join(diagnosis.ConsoleDiagnosisFileServerPrefix, filepath.Base(zipFile.Name())),
zipFile.Name(),
+ true,
minio.PutObjectOptions{ContentType: "application/zip"})
if err != nil {
return err
@@ -185,6 +186,10 @@ func (k *KubernetesConsoleDiagnosisHandler) CopyFromPod(pod *apiv1.Pod, srcPath
}
return filepath.Walk(tmpFolder,
func(file string, fi os.FileInfo, err error) error {
+ if err != nil {
+ log.Error().Msg(err.Error())
+ return nil
+ }
if !fi.IsDir() {
// here number 3 has been used to cut some nested path values in tar writer
// like if path is /tmp/some1/some2/some3 then dir structure in tar will be /some2/some3
diff --git a/deepfence_server/go.mod b/deepfence_server/go.mod
index b7132da7b1..1e5ec3409a 100644
--- a/deepfence_server/go.mod
+++ b/deepfence_server/go.mod
@@ -1,6 +1,6 @@
module github.com/deepfence/ThreatMapper/deepfence_server
-go 1.21.0
+go 1.21
replace github.com/deepfence/ThreatMapper/deepfence_utils => ../deepfence_utils
diff --git a/deepfence_server/handler/agent_report.go b/deepfence_server/handler/agent_report.go
index 3fd8e28263..ccfbe87e1a 100644
--- a/deepfence_server/handler/agent_report.go
+++ b/deepfence_server/handler/agent_report.go
@@ -110,11 +110,10 @@ func (h *Handler) IngestAgentReport(w http.ResponseWriter, r *http.Request) {
func (h *Handler) IngestSyncAgentReport(w http.ResponseWriter, r *http.Request) {
var (
- buf = &bytes.Buffer{}
- reader = io.TeeReader(r.Body, buf)
+ buf = &bytes.Buffer{}
)
- reader = io.TeeReader(r.Body, gzip.NewWriter(buf))
+ reader := io.TeeReader(r.Body, gzip.NewWriter(buf))
ctx := r.Context()
diff --git a/deepfence_server/handler/audit_log.go b/deepfence_server/handler/audit_log.go
index 0ebb3d80f9..6c7197b96c 100644
--- a/deepfence_server/handler/audit_log.go
+++ b/deepfence_server/handler/audit_log.go
@@ -104,7 +104,7 @@ func (h *Handler) AuditUserActivity(
namespace = user.CompanyNamespace
}
- var resourceStr string = ""
+ var resourceStr string
if resources != nil {
rStr, err := json.Marshal(resources)
if err != nil {
@@ -139,10 +139,11 @@ func (h *Handler) AuditUserActivity(
go h.AddAuditLog(namespace, params)
}
-func (h *Handler) AddAuditLog(namespace string, params postgresql_db.CreateAuditLogParams) error {
+func (h *Handler) AddAuditLog(namespace string, params postgresql_db.CreateAuditLogParams) {
data, err := json.Marshal(params)
if err != nil {
- return err
+ log.Error().Msg(err.Error())
+ return
}
h.IngestChan <- &kgo.Record{
@@ -152,8 +153,6 @@ func (h *Handler) AddAuditLog(namespace string, params postgresql_db.CreateAudit
{Key: "namespace", Value: []byte(namespace)},
},
}
-
- return nil
}
func (h *Handler) GetAuditLogsCount(w http.ResponseWriter, r *http.Request) {
@@ -172,9 +171,12 @@ func (h *Handler) GetAuditLogsCount(w http.ResponseWriter, r *http.Request) {
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
Count: int(count),
})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) GetAuditLogs(w http.ResponseWriter, r *http.Request) {
diff --git a/deepfence_server/handler/auth.go b/deepfence_server/handler/auth.go
index 9f0adedfef..182ca2a840 100644
--- a/deepfence_server/handler/auth.go
+++ b/deepfence_server/handler/auth.go
@@ -9,6 +9,7 @@ import (
"github.com/deepfence/ThreatMapper/deepfence_server/model"
"github.com/deepfence/ThreatMapper/deepfence_utils/directory"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/log"
"github.com/deepfence/ThreatMapper/deepfence_utils/utils"
"github.com/go-chi/jwtauth/v5"
httpext "github.com/go-playground/pkg/v5/net/http"
@@ -65,7 +66,10 @@ func (h *Handler) ApiAuthHandler(w http.ResponseWriter, r *http.Request) {
user.Password = ""
h.AuditUserActivity(r, EVENT_AUTH, ACTION_TOKEN_AUTH, user, true)
- httpext.JSON(w, http.StatusOK, accessTokenResponse)
+ err = httpext.JSON(w, http.StatusOK, accessTokenResponse)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) RefreshTokenHandler(w http.ResponseWriter, r *http.Request) {
@@ -83,7 +87,10 @@ func (h *Handler) RefreshTokenHandler(w http.ResponseWriter, r *http.Request) {
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, accessTokenResponse)
+ err = httpext.JSON(w, http.StatusOK, accessTokenResponse)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) parseRefreshToken(requestContext context.Context) (*model.User, string, error) {
@@ -106,7 +113,7 @@ func (h *Handler) parseRefreshToken(requestContext context.Context) (*model.User
if err != nil {
return nil, "", err
}
- if revoked == true {
+ if revoked {
return nil, "", &accessTokenRevokedError
}
userId, err := utils.GetInt64ValueFromInterfaceMap(claims, "user")
@@ -117,7 +124,7 @@ func (h *Handler) parseRefreshToken(requestContext context.Context) (*model.User
if err != nil {
return nil, "", err
}
- if user.IsActive == false {
+ if !user.IsActive {
return nil, "", &userInactiveError
}
grantType, err := utils.GetStringValueFromInterfaceMap(claims, "grant_type")
@@ -147,7 +154,7 @@ func (h *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
h.respondWithErrorCode(err, w, statusCode)
return
}
- if u.IsActive == false {
+ if !u.IsActive {
h.respondError(&userInactiveError, w)
return
}
@@ -165,11 +172,14 @@ func (h *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {
u.Password = ""
h.AuditUserActivity(r, EVENT_AUTH, ACTION_LOGIN, u, true)
- httpext.JSON(w, http.StatusOK, model.LoginResponse{
+ err = httpext.JSON(w, http.StatusOK, model.LoginResponse{
ResponseAccessToken: *accessTokenResponse,
OnboardingRequired: model.IsOnboardingRequired(ctx),
PasswordInvalidated: u.PasswordInvalidated,
})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {
diff --git a/deepfence_server/handler/cloud_node.go b/deepfence_server/handler/cloud_node.go
index bd8bfb6cb9..258880da04 100644
--- a/deepfence_server/handler/cloud_node.go
+++ b/deepfence_server/handler/cloud_node.go
@@ -4,10 +4,11 @@ import (
"context"
"errors"
"fmt"
- ctl "github.com/deepfence/ThreatMapper/deepfence_utils/controls"
"net/http"
"net/http/httputil"
+ ctl "github.com/deepfence/ThreatMapper/deepfence_utils/controls"
+
cloudscanner_diagnosis "github.com/deepfence/ThreatMapper/deepfence_server/diagnosis/cloudscanner-diagnosis"
"github.com/deepfence/ThreatMapper/deepfence_server/model"
reporters_scan "github.com/deepfence/ThreatMapper/deepfence_server/reporters/scan"
@@ -19,7 +20,7 @@ import (
)
func (h *Handler) RegisterCloudNodeAccountCount(w http.ResponseWriter, r *http.Request) {
- return
+ //TODO: Is this used?
}
func (h *Handler) RegisterCloudNodeAccountHandler(w http.ResponseWriter, r *http.Request) {
@@ -122,6 +123,7 @@ func (h *Handler) RegisterCloudNodeAccountHandler(w http.ResponseWriter, r *http
if err != nil {
logrus.Infof("Error while upserting node: %+v", err)
h.complianceError(w, err.Error())
+ return
}
// get log request for cloudscanner, if any
logRequestAction, err := cloudscanner_diagnosis.GetQueuedCloudScannerDiagnosticLogs(ctx, []string{nodeId})
@@ -131,9 +133,12 @@ func (h *Handler) RegisterCloudNodeAccountHandler(w http.ResponseWriter, r *http
pendingScansList, err := reporters_scan.GetCloudCompliancePendingScansList(ctx, utils.NEO4J_CLOUD_COMPLIANCE_SCAN, nodeId)
if err != nil || len(pendingScansList.ScansInfo) == 0 {
logrus.Debugf("No pending scans found for node id: %s", nodeId)
- httpext.JSON(w, http.StatusOK,
+ err = httpext.JSON(w, http.StatusOK,
model.CloudNodeAccountRegisterResp{Data: model.CloudNodeAccountRegisterRespData{Scans: scanList,
CloudtrailTrails: cloudtrailTrails, Refresh: doRefresh, LogAction: logRequestAction}})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return
}
for _, scan := range pendingScansList.ScansInfo {
@@ -157,12 +162,14 @@ func (h *Handler) RegisterCloudNodeAccountHandler(w http.ResponseWriter, r *http
}
logrus.Debugf("Pending scans for node: %+v", scanList)
}
- logrus.Debugf("Returning response: Scan List %+v cloudtrailTrails %+v Refresh %s", scanList, cloudtrailTrails, doRefresh)
+ log.Debug().Msgf("Returning response: Scan List %+v cloudtrailTrails %+v Refresh %s", scanList, cloudtrailTrails, doRefresh)
- httpext.JSON(w, http.StatusOK,
+ err = httpext.JSON(w, http.StatusOK,
model.CloudNodeAccountRegisterResp{Data: model.CloudNodeAccountRegisterRespData{Scans: scanList,
CloudtrailTrails: cloudtrailTrails, Refresh: doRefresh, LogAction: logRequestAction}})
- return
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) ListCloudNodeAccountHandler(w http.ResponseWriter, r *http.Request) {
@@ -191,7 +198,10 @@ func (h *Handler) ListCloudNodeAccountHandler(w http.ResponseWriter, r *http.Req
return
}
- httpext.JSON(w, http.StatusOK, infos)
+ err = httpext.JSON(w, http.StatusOK, infos)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListCloudNodeProvidersHandler(w http.ResponseWriter, r *http.Request) {
@@ -203,18 +213,16 @@ func (h *Handler) ListCloudNodeProvidersHandler(w http.ResponseWriter, r *http.R
return
}
- httpext.JSON(w, http.StatusOK, model.CloudNodeProvidersListResp{Providers: providers})
-}
-
-func (h *Handler) complianceError(w http.ResponseWriter, errorString string) {
- err := h.respondError(errors.New(errorString), w)
+ err = httpext.JSON(w, http.StatusOK, model.CloudNodeProvidersListResp{Providers: providers})
if err != nil {
log.Error().Msgf("%v", err)
- w.WriteHeader(http.StatusInternalServerError)
- w.Write([]byte(errorString))
}
}
+func (h *Handler) complianceError(w http.ResponseWriter, errorString string) {
+ h.respondError(errors.New(errorString), w)
+}
+
func (h *Handler) extractCloudNodeDetails(w http.ResponseWriter, r *http.Request) (model.CloudNodeAccountRegisterReq, error) {
defer r.Body.Close()
var req model.CloudNodeAccountRegisterReq
@@ -245,5 +253,5 @@ func (h *Handler) CachePostureProviders(ctx context.Context) error {
if err != nil {
return err
}
- return worker.Enqueue(utils.CachePostureProviders, []byte{})
+ return worker.Enqueue(utils.CachePostureProviders, []byte{}, utils.CritialTaskOpts()...)
}
diff --git a/deepfence_server/handler/cloud_node_controls.go b/deepfence_server/handler/cloud_node_controls.go
index 1b77b689cc..7981c23d5b 100644
--- a/deepfence_server/handler/cloud_node_controls.go
+++ b/deepfence_server/handler/cloud_node_controls.go
@@ -9,6 +9,7 @@ import (
api_messages "github.com/deepfence/ThreatMapper/deepfence_server/constants/api-messages"
"github.com/deepfence/ThreatMapper/deepfence_server/controls"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/log"
"github.com/deepfence/ThreatMapper/deepfence_server/model"
)
@@ -36,9 +37,12 @@ func (h *Handler) GetCloudNodeControls(w http.ResponseWriter, r *http.Request) {
return
}
- httpext.JSON(w, http.StatusOK, model.CloudNodeControlResp{
+ err = httpext.JSON(w, http.StatusOK, model.CloudNodeControlResp{
Controls: controls,
})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) EnableCloudNodeControls(w http.ResponseWriter, r *http.Request) {
@@ -64,7 +68,10 @@ func (h *Handler) EnableCloudNodeControls(w http.ResponseWriter, r *http.Request
return
}
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessCloudControlsEnabled})
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessCloudControlsEnabled})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) DisableCloudNodeControls(w http.ResponseWriter, r *http.Request) {
@@ -90,5 +97,9 @@ func (h *Handler) DisableCloudNodeControls(w http.ResponseWriter, r *http.Reques
return
}
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessCloudControlsDisabled})
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessCloudControlsDisabled})
+
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
diff --git a/deepfence_server/handler/common.go b/deepfence_server/handler/common.go
index 17b887f51c..616d546df5 100644
--- a/deepfence_server/handler/common.go
+++ b/deepfence_server/handler/common.go
@@ -25,11 +25,14 @@ const (
func (h *Handler) Ping(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
- w.Write([]byte("pong"))
+ _, _ = w.Write([]byte("pong"))
}
func (h *Handler) EULAHandler(w http.ResponseWriter, r *http.Request) {
- httpext.JSON(w, http.StatusOK, model.EULAResponse)
+ err := httpext.JSON(w, http.StatusOK, model.EULAResponse)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) OpenApiDocsHandler(w http.ResponseWriter, r *http.Request) {
@@ -38,7 +41,10 @@ func (h *Handler) OpenApiDocsHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
return
}
- httpext.JSONBytes(w, http.StatusOK, apiDocs)
+ err = httpext.JSONBytes(w, http.StatusOK, apiDocs)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func respondWith(ctx context.Context, w http.ResponseWriter, code int, response interface{}) {
@@ -122,7 +128,7 @@ func isTransientError(err error) bool {
return false
}
-func (h *Handler) respondWithErrorCode(err error, w http.ResponseWriter, code int) error {
+func (h *Handler) respondWithErrorCode(err error, w http.ResponseWriter, code int) {
var errorFields map[string]string
var errMsg string
if code == http.StatusBadRequest {
@@ -131,13 +137,17 @@ func (h *Handler) respondWithErrorCode(err error, w http.ResponseWriter, code in
errMsg = err.Error()
}
if len(errorFields) > 0 {
- return httpext.JSON(w, code, model.ErrorResponse{Message: "", ErrorFields: errorFields})
+ err = httpext.JSON(w, code, model.ErrorResponse{Message: "", ErrorFields: errorFields})
} else {
- return httpext.JSON(w, code, model.ErrorResponse{Message: errMsg, ErrorFields: errorFields})
+ err = httpext.JSON(w, code, model.ErrorResponse{Message: errMsg, ErrorFields: errorFields})
+ }
+ if err != nil {
+ log.Error().Msg(err.Error())
+ w.WriteHeader(http.StatusInternalServerError)
}
}
-func (h *Handler) respondError(err error, w http.ResponseWriter) error {
+func (h *Handler) respondError(err error, w http.ResponseWriter) {
code := http.StatusInternalServerError
var errorFields map[string]string
errMsg := err.Error()
@@ -172,8 +182,12 @@ func (h *Handler) respondError(err error, w http.ResponseWriter) error {
}
if len(errorFields) > 0 {
- return httpext.JSON(w, code, model.ErrorResponse{Message: "", ErrorFields: errorFields, ErrorIndex: errorIndex})
+ err = httpext.JSON(w, code, model.ErrorResponse{Message: "", ErrorFields: errorFields, ErrorIndex: errorIndex})
} else {
- return httpext.JSON(w, code, model.ErrorResponse{Message: errMsg, ErrorFields: errorFields, ErrorIndex: errorIndex})
+ err = httpext.JSON(w, code, model.ErrorResponse{Message: errMsg, ErrorFields: errorFields, ErrorIndex: errorIndex})
+ }
+ if err != nil {
+ log.Error().Msg(err.Error())
+ w.WriteHeader(http.StatusInternalServerError)
}
}
diff --git a/deepfence_server/handler/container_registry.go b/deepfence_server/handler/container_registry.go
index be379076d9..0138ed31e1 100644
--- a/deepfence_server/handler/container_registry.go
+++ b/deepfence_server/handler/container_registry.go
@@ -65,8 +65,10 @@ func (h *Handler) ListRegistry(w http.ResponseWriter, r *http.Request) {
registriesResponse = append(registriesResponse, registryResponse)
}
- httpext.JSON(w, http.StatusOK, registriesResponse)
-
+ err = httpext.JSON(w, http.StatusOK, registriesResponse)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) AddRegistry(w http.ResponseWriter, r *http.Request) {
@@ -104,7 +106,10 @@ func (h *Handler) AddRegistry(w http.ResponseWriter, r *http.Request) {
// validate if registry credential is correct
if !registry.IsValidCredential() {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryAuthFailed})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryAuthFailed})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -123,7 +128,11 @@ func (h *Handler) AddRegistry(w http.ResponseWriter, r *http.Request) {
return
}
if registryExists {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryExists})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryExists})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
+
return
}
@@ -171,7 +180,10 @@ func (h *Handler) AddRegistry(w http.ResponseWriter, r *http.Request) {
req.Secret = map[string]interface{}{}
h.AuditUserActivity(r, EVENT_REGISTRY, ACTION_CREATE, req, true)
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessRegistryCreated})
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessRegistryCreated})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
// update registry
@@ -187,7 +199,10 @@ func (h *Handler) UpdateRegistry(w http.ResponseWriter, r *http.Request) {
idStr := chi.URLParam(r, "registry_id")
if idStr == "" {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryIdMissing})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryIdMissing})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -214,7 +229,10 @@ func (h *Handler) UpdateRegistry(w http.ResponseWriter, r *http.Request) {
return
}
if !registryExists {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryNotExists})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryNotExists})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -267,7 +285,10 @@ func (h *Handler) UpdateRegistry(w http.ResponseWriter, r *http.Request) {
// validate if registry credential is correct
if !registry.IsValidCredential() {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryAuthFailed})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryAuthFailed})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -294,7 +315,10 @@ func (h *Handler) UpdateRegistry(w http.ResponseWriter, r *http.Request) {
req.Secret = map[string]interface{}{}
h.AuditUserActivity(r, EVENT_REGISTRY, ACTION_UPDATE, req, true)
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessRegistryUpdated})
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessRegistryUpdated})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) AddGoogleContainerRegistry(w http.ResponseWriter, r *http.Request) {
@@ -313,7 +337,10 @@ func (h *Handler) AddGoogleContainerRegistry(w http.ResponseWriter, r *http.Requ
defer file.Close()
if (fileHeader.Header.Get("Content-Type")) != "application/json" {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: "uploaded file is not json"})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: "uploaded file is not json"})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -325,13 +352,19 @@ func (h *Handler) AddGoogleContainerRegistry(w http.ResponseWriter, r *http.Requ
registryName := r.FormValue("name")
if registryName == "" {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: "registry name cannot be empty"})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: "registry name cannot be empty"})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
registryURL := r.FormValue("registry_url")
if registryName == "" {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: "registry url cannot be empty"})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: "registry url cannot be empty"})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -374,7 +407,10 @@ func (h *Handler) AddGoogleContainerRegistry(w http.ResponseWriter, r *http.Requ
// validate if registry credential is correct
if !registry.IsValidCredential() {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryAuthFailed})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryAuthFailed})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -393,7 +429,10 @@ func (h *Handler) AddGoogleContainerRegistry(w http.ResponseWriter, r *http.Requ
return
}
if registryExists {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryExists})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrRegistryExists})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -448,7 +487,10 @@ func (h *Handler) AddGoogleContainerRegistry(w http.ResponseWriter, r *http.Requ
h.AuditUserActivity(r, EVENT_REGISTRY, ACTION_CREATE, req, true)
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessRegistryCreated})
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessRegistryCreated})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) DeleteRegistry(w http.ResponseWriter, r *http.Request) {
@@ -519,11 +561,15 @@ func (h *Handler) RefreshRegistry(w http.ResponseWriter, r *http.Request) {
}
}
if len(syncErrs) > 0 {
- httpext.JSON(w, http.StatusInternalServerError,
+ err = httpext.JSON(w, http.StatusInternalServerError,
model.MessageResponse{Message: strings.Join(syncErrs, ",")})
+ } else {
+ err = httpext.JSON(w, http.StatusOK,
+ model.MessageResponse{Message: "started sync registry"})
+ }
+ if err != nil {
+ log.Error().Msgf("%v", err)
}
- httpext.JSON(w, http.StatusOK,
- model.MessageResponse{Message: "started sync registry"})
}
func (h *Handler) getImages(w http.ResponseWriter, r *http.Request) ([]model.ContainerImage, error) {
@@ -556,7 +602,10 @@ func (h *Handler) ListImages(w http.ResponseWriter, r *http.Request) {
images, err := h.getImages(w, r)
if err == nil {
- httpext.JSON(w, http.StatusOK, images)
+ err = httpext.JSON(w, http.StatusOK, images)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
@@ -564,9 +613,12 @@ func (h *Handler) CountImages(w http.ResponseWriter, r *http.Request) {
images, err := h.getImages(w, r)
if err == nil {
- httpext.JSON(w, http.StatusOK, model.RegistryCountResp{
+ err = httpext.JSON(w, http.StatusOK, model.RegistryCountResp{
Count: len(images),
})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
@@ -597,27 +649,27 @@ func (h *Handler) getImageStubs(w http.ResponseWriter, r *http.Request) ([]model
func (h *Handler) ListImageStubs(w http.ResponseWriter, r *http.Request) {
images, err := h.getImageStubs(w, r)
if err == nil {
- httpext.JSON(w, http.StatusOK, images)
+ err = httpext.JSON(w, http.StatusOK, images)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
func (h *Handler) CountImageStubs(w http.ResponseWriter, r *http.Request) {
images, err := h.getImageStubs(w, r)
if err == nil {
- httpext.JSON(w, http.StatusOK, model.RegistryCountResp{
+ err = httpext.JSON(w, http.StatusOK, model.RegistryCountResp{
Count: len(images),
})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
-func getIntPointer(val int32) *int32 {
- return &val
-}
-
func (h *Handler) RegistrySummary(w http.ResponseWriter, r *http.Request) {
- counts := model.Summary{}
-
req := model.RegistryIDPathReq{
RegistryId: chi.URLParam(r, "registry_id"),
}
@@ -628,7 +680,7 @@ func (h *Handler) RegistrySummary(w http.ResponseWriter, r *http.Request) {
}
// count registry resource
- counts, err = model.RegistrySummary(r.Context(), mo.Some(req.RegistryId), mo.None[string]())
+ counts, err := model.RegistrySummary(r.Context(), mo.Some(req.RegistryId), mo.None[string]())
if err != nil {
log.Error().Msgf("failed registry summary: %v", err)
h.respondError(err, w)
@@ -637,13 +689,14 @@ func (h *Handler) RegistrySummary(w http.ResponseWriter, r *http.Request) {
log.Info().Msgf("registry %s summary %+v", req.RegistryId, counts)
- httpext.JSON(w, http.StatusOK, counts)
+ err = httpext.JSON(w, http.StatusOK, counts)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) SummaryByRegistryType(w http.ResponseWriter, r *http.Request) {
- counts := model.Summary{}
-
req := model.RegistryTypeReq{
RegistryType: chi.URLParam(r, "registry_type"),
}
@@ -654,7 +707,7 @@ func (h *Handler) SummaryByRegistryType(w http.ResponseWriter, r *http.Request)
}
// count registry resource
- counts, err = model.RegistrySummary(r.Context(), mo.None[string](), mo.Some(req.RegistryType))
+ counts, err := model.RegistrySummary(r.Context(), mo.None[string](), mo.Some(req.RegistryType))
if err != nil {
log.Error().Msgf("failed registry summary: %v", err)
h.respondError(err, w)
@@ -663,13 +716,14 @@ func (h *Handler) SummaryByRegistryType(w http.ResponseWriter, r *http.Request)
log.Info().Msgf("registries %s summary %+v", req.RegistryType, counts)
- httpext.JSON(w, http.StatusOK, counts)
+ err = httpext.JSON(w, http.StatusOK, counts)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) Summary(w http.ResponseWriter, r *http.Request) {
- counts := model.RegistrySummaryAllResp{}
-
// count registry resource
counts, err := model.RegistrySummaryAll(r.Context())
if err != nil {
@@ -680,7 +734,10 @@ func (h *Handler) Summary(w http.ResponseWriter, r *http.Request) {
log.Info().Msgf("all registries summary %+v", counts)
- httpext.JSON(w, http.StatusOK, counts)
+ err = httpext.JSON(w, http.StatusOK, counts)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) SyncRegistry(rCtx context.Context, pgID int32) error {
@@ -699,7 +756,7 @@ func (h *Handler) SyncRegistry(rCtx context.Context, pgID int32) error {
return err
}
- err = worker.Enqueue(utils.SyncRegistryTask, payload)
+ err = worker.Enqueue(utils.SyncRegistryTask, payload, utils.CritialTaskOpts()...)
if err != nil {
log.Error().Msgf("cannot publish message: %v", err)
return err
diff --git a/deepfence_server/handler/diagnosis.go b/deepfence_server/handler/diagnosis.go
index 666f4ea3e7..dde2e7f151 100644
--- a/deepfence_server/handler/diagnosis.go
+++ b/deepfence_server/handler/diagnosis.go
@@ -7,6 +7,7 @@ import (
"github.com/deepfence/ThreatMapper/deepfence_server/diagnosis"
agentdiagnosis "github.com/deepfence/ThreatMapper/deepfence_server/diagnosis/agent-diagnosis"
cloudscannerdiagnosis "github.com/deepfence/ThreatMapper/deepfence_server/diagnosis/cloudscanner-diagnosis"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/log"
"github.com/go-chi/chi/v5"
httpext "github.com/go-playground/pkg/v5/net/http"
)
@@ -137,5 +138,8 @@ func (h *Handler) GetDiagnosticLogs(w http.ResponseWriter, r *http.Request) {
h.respondError(&BadDecoding{err}, w)
return
}
- httpext.JSON(w, http.StatusOK, resp)
+ err = httpext.JSON(w, http.StatusOK, resp)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
diff --git a/deepfence_server/handler/export_reports.go b/deepfence_server/handler/export_reports.go
index b3fe0c37cf..be494a7abe 100644
--- a/deepfence_server/handler/export_reports.go
+++ b/deepfence_server/handler/export_reports.go
@@ -172,7 +172,10 @@ func (h *Handler) GetReport(w http.ResponseWriter, r *http.Request) {
var report model.ExportReport
utils.FromMap(da.Props, &report)
- httpext.JSON(w, http.StatusOK, report)
+ err = httpext.JSON(w, http.StatusOK, report)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) ListReports(w http.ResponseWriter, r *http.Request) {
@@ -234,7 +237,10 @@ func (h *Handler) ListReports(w http.ResponseWriter, r *http.Request) {
return reports[i].CreatedAt > reports[j].CreatedAt
})
- httpext.JSON(w, http.StatusOK, reports)
+ err = httpext.JSON(w, http.StatusOK, reports)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) GenerateReport(w http.ResponseWriter, r *http.Request) {
@@ -317,7 +323,7 @@ func (h *Handler) GenerateReport(w http.ResponseWriter, r *http.Request) {
return
}
- err = worker.Enqueue(utils.ReportGeneratorTask, payload)
+ err = worker.Enqueue(utils.ReportGeneratorTask, payload, utils.DefaultTaskOpts()...)
if err != nil {
log.Error().Msgf("failed to publish task: %+v", err)
h.respondError(err, w)
@@ -326,5 +332,8 @@ func (h *Handler) GenerateReport(w http.ResponseWriter, r *http.Request) {
h.AuditUserActivity(r, EVENT_REPORTS, ACTION_CREATE, req, true)
- httpext.JSON(w, http.StatusOK, model.GenerateReportResp{ReportID: report_id})
+ err = httpext.JSON(w, http.StatusOK, model.GenerateReportResp{ReportID: report_id})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
diff --git a/deepfence_server/handler/integration.go b/deepfence_server/handler/integration.go
index 4f60f0924d..23ee3f7eff 100644
--- a/deepfence_server/handler/integration.go
+++ b/deepfence_server/handler/integration.go
@@ -61,7 +61,10 @@ func (h *Handler) AddIntegration(w http.ResponseWriter, r *http.Request) {
return
}
if integrationExists {
- httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrIntegrationExists})
+ err = httpext.JSON(w, http.StatusBadRequest, model.ErrorResponse{Message: api_messages.ErrIntegrationExists})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return
}
@@ -89,21 +92,20 @@ func (h *Handler) AddIntegration(w http.ResponseWriter, r *http.Request) {
h.AuditUserActivity(r, EVENT_INTEGRATION, ACTION_CREATE, req, true)
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessIntegrationCreated})
-
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessIntegrationCreated})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) GetIntegrations(w http.ResponseWriter, r *http.Request) {
- defer r.Body.Close()
- var req model.IntegrationListReq
- httpext.DecodeJSON(r, httpext.NoQueryParams, MaxPostRequestSize, &req)
-
ctx := r.Context()
pgClient, err := directory.PostgresClient(ctx)
if err != nil {
h.respondError(&InternalServerError{err}, w)
return
}
+ req := model.IntegrationListReq{}
integrations, err := req.GetIntegrations(ctx, pgClient)
if err != nil {
log.Error().Msgf(err.Error())
@@ -147,7 +149,10 @@ func (h *Handler) GetIntegrations(w http.ResponseWriter, r *http.Request) {
integrationList = append(integrationList, newIntegration)
}
- httpext.JSON(w, http.StatusOK, integrationList)
+ err = httpext.JSON(w, http.StatusOK, integrationList)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func (h *Handler) DeleteIntegration(w http.ResponseWriter, r *http.Request) {
@@ -155,6 +160,11 @@ func (h *Handler) DeleteIntegration(w http.ResponseWriter, r *http.Request) {
// id to int32
idInt, err := strconv.ParseInt(id, 10, 32)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ h.respondError(&BadDecoding{err}, w)
+ return
+ }
ctx := r.Context()
pgClient, err := directory.PostgresClient(ctx)
diff --git a/deepfence_server/handler/kubernetes-cluster.go b/deepfence_server/handler/kubernetes-cluster.go
index 9d0c55ebe9..3214708145 100644
--- a/deepfence_server/handler/kubernetes-cluster.go
+++ b/deepfence_server/handler/kubernetes-cluster.go
@@ -36,8 +36,11 @@ func (h *Handler) GetKubernetesClusterControls(w http.ResponseWriter, r *http.Re
}
}
- httpext.JSON(w, http.StatusOK, ctl.AgentControls{
+ err = httpext.JSON(w, http.StatusOK, ctl.AgentControls{
BeatRateSec: 30,
Commands: actions,
})
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
diff --git a/deepfence_server/handler/lookup_reports.go b/deepfence_server/handler/lookup_reports.go
index f8c02c8f7a..cbbc4af34f 100644
--- a/deepfence_server/handler/lookup_reports.go
+++ b/deepfence_server/handler/lookup_reports.go
@@ -16,6 +16,11 @@ func getGeneric[T any](h *Handler, w http.ResponseWriter, r *http.Request, gette
defer r.Body.Close()
var req reporters_lookup.LookupFilter
err := httpext.DecodeJSON(r, httpext.NoQueryParams, MaxPostRequestSize, &req)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ http.Error(w, "Error processing request body", http.StatusBadRequest)
+ return
+ }
hosts, err := getter(r.Context(), req)
if err != nil {
@@ -34,6 +39,11 @@ func (h *Handler) GetPods(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var req reporters_lookup.LookupFilter
err := httpext.DecodeJSON(r, httpext.NoQueryParams, MaxPostRequestSize, &req)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ http.Error(w, "Error processing request body", http.StatusBadRequest)
+ return
+ }
pods, err := reporters_lookup.GetPodsReport(r.Context(), req)
if err != nil {
@@ -47,7 +57,7 @@ func (h *Handler) GetPods(w http.ResponseWriter, r *http.Request) {
Filters: reporters.FieldsFilters{
ContainsFilter: reporters.ContainsFilter{
FieldsValues: map[string][]interface{}{
- "pod_name": []interface{}{pods[i].PodName},
+ "pod_name": {pods[i].PodName},
},
},
},
diff --git a/deepfence_server/handler/scan_reports.go b/deepfence_server/handler/scan_reports.go
index ac3751181d..44e4ec77ba 100644
--- a/deepfence_server/handler/scan_reports.go
+++ b/deepfence_server/handler/scan_reports.go
@@ -58,7 +58,7 @@ func cloudComplianceScanId(nodeId string) string {
func bulkScanId() string {
random_id := uuid.New()
- return fmt.Sprintf("%s", random_id.String())
+ return random_id.String()
}
func GetImageFromId(ctx context.Context, node_id string) (string, string, error) {
@@ -274,7 +274,10 @@ func (h *Handler) DiffAddVulnerabilityScan(w http.ResponseWriter, r *http.Reques
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Vulnerability]{New: new})
+ err = httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Vulnerability]{New: new})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) DiffAddSecretScan(w http.ResponseWriter, r *http.Request) {
@@ -291,7 +294,10 @@ func (h *Handler) DiffAddSecretScan(w http.ResponseWriter, r *http.Request) {
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Secret]{New: new})
+ err = httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Secret]{New: new})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) DiffAddComplianceScan(w http.ResponseWriter, r *http.Request) {
@@ -308,7 +314,10 @@ func (h *Handler) DiffAddComplianceScan(w http.ResponseWriter, r *http.Request)
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Compliance]{New: new})
+ err = httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Compliance]{New: new})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) DiffAddMalwareScan(w http.ResponseWriter, r *http.Request) {
@@ -325,7 +334,10 @@ func (h *Handler) DiffAddMalwareScan(w http.ResponseWriter, r *http.Request) {
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Malware]{New: new})
+ err = httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.Malware]{New: new})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) DiffAddCloudComplianceScan(w http.ResponseWriter, r *http.Request) {
@@ -342,7 +354,10 @@ func (h *Handler) DiffAddCloudComplianceScan(w http.ResponseWriter, r *http.Requ
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.CloudCompliance]{New: new})
+ err = httpext.JSON(w, http.StatusOK, model.ScanCompareRes[model.CloudCompliance]{New: new})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) StartSecretScanHandler(w http.ResponseWriter, r *http.Request) {
@@ -430,7 +445,7 @@ func (h *Handler) StartComplianceScanHandler(w http.ResponseWriter, r *http.Requ
if scanTrigger.NodeType == controls.ResourceTypeToString(controls.CloudAccount) ||
scanTrigger.NodeType == controls.ResourceTypeToString(controls.KubernetesCluster) ||
scanTrigger.NodeType == controls.ResourceTypeToString(controls.Host) {
- scanIds, bulkId, err = StartMultiCloudComplianceScan(ctx, nodes, reqs.BenchmarkTypes)
+ scanIds, bulkId, err = StartMultiCloudComplianceScan(ctx, nodes, reqs.BenchmarkTypes, reqs.IsPriority)
scanStatusType = utils.CLOUD_COMPLIANCE_SCAN_STATUS
} else {
scanIds, bulkId, err = startMultiComplianceScan(ctx, nodes, reqs.BenchmarkTypes)
@@ -502,11 +517,12 @@ func NewScanStatus(scanId, status, message string) map[string]interface{} {
}
func (h *Handler) SendScanStatus(
- ctx context.Context, scanStatusType string, status map[string]interface{}) error {
+ ctx context.Context, scanStatusType string, status map[string]interface{}) {
tenantID, err := directory.ExtractNamespace(ctx)
if err != nil {
- return err
+ log.Error().Msg(err.Error())
+ return
}
rh := []kgo.RecordHeader{
@@ -516,15 +532,13 @@ func (h *Handler) SendScanStatus(
cb, err := json.Marshal(status)
if err != nil {
log.Error().Msg(err.Error())
- } else {
- h.IngestChan <- &kgo.Record{
- Topic: scanStatusType,
- Value: cb,
- Headers: rh,
- }
+ return
+ }
+ h.IngestChan <- &kgo.Record{
+ Topic: scanStatusType,
+ Value: cb,
+ Headers: rh,
}
-
- return nil
}
func (h *Handler) StopVulnerabilityScanHandler(w http.ResponseWriter, r *http.Request) {
@@ -604,8 +618,11 @@ func (h *Handler) IngestSbomHandler(w http.ResponseWriter, r *http.Request) {
if params.ScanId == "" {
log.Error().Msgf("error scan id is empty, params: %+v", params)
- httpext.JSON(w, http.StatusBadRequest,
+ err = httpext.JSON(w, http.StatusBadRequest,
model.ErrorResponse{Message: "scan_id is required to process sbom"})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
}
@@ -617,46 +634,22 @@ func (h *Handler) IngestSbomHandler(w http.ResponseWriter, r *http.Request) {
}
sbomFile := path.Join("sbom", utils.ScanIdReplacer.Replace(params.ScanId)+".json.gz")
- info, err := mc.UploadFile(r.Context(), sbomFile, b64,
+ info, err := mc.UploadFile(r.Context(), sbomFile, b64, true,
minio.PutObjectOptions{ContentType: "application/gzip"})
-
if err != nil {
- logError := true
- if strings.Contains(err.Error(), "Already exists here") {
- /*If the file already exists, we will delete the old file and upload the new one
- File can exists in 2 conditions:
- - When the earlier scan was stuck during the scan phase
- - When the service was restarted
- - Bug/Race conditon in the worker service
- */
- log.Warn().Msg(err.Error() + ", Will try to overwrite the file: " + sbomFile)
- err = mc.DeleteFile(r.Context(), sbomFile, true, minio.RemoveObjectOptions{ForceDelete: true})
- if err == nil {
- info, err = mc.UploadFile(r.Context(), sbomFile, b64,
- minio.PutObjectOptions{ContentType: "application/gzip"})
-
- if err == nil {
- log.Info().Msgf("Successfully overwritten the file: %s", sbomFile)
- logError = false
- } else {
- log.Error().Msgf("Failed to upload the file, error is: %v", err)
- }
- } else {
- log.Error().Msgf("Failed to delete the old file, error is: %v", err)
- }
- }
-
- if logError == true {
- log.Error().Msg(err.Error())
- h.respondError(err, w)
- return
- }
+ log.Error().Err(err).Msg("failed to uplaod sbom")
+ h.respondError(err, w)
+ return
}
// check if sbom has to be scanned
if params.SkipScan {
log.Info().Msgf("skip sbom scan for id %s", params.ScanId)
- httpext.JSON(w, http.StatusOK, info)
+ err = httpext.JSON(w, http.StatusOK, info)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
+ return
}
params.SBOMFilePath = sbomFile
@@ -675,15 +668,18 @@ func (h *Handler) IngestSbomHandler(w http.ResponseWriter, r *http.Request) {
return
}
- err = worker.Enqueue(utils.ScanSBOMTask, payload)
+ err = worker.Enqueue(utils.ScanSBOMTask, payload, utils.DefaultTaskOpts()...)
if err != nil {
- log.Error().Msgf("cannot publish message:", err)
+ log.Error().Msgf("cannot publish message: %v", err)
h.respondError(err, w)
return
}
log.Info().Msgf("scan_id: %s, minio file info: %+v", params.ScanId, info)
- httpext.JSON(w, http.StatusOK, info)
+ err = httpext.JSON(w, http.StatusOK, info)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) IngestVulnerabilityReportHandler(w http.ResponseWriter, r *http.Request) {
@@ -777,7 +773,10 @@ func ingest_scan_report_kafka[T any](
// respWrite.WriteHeader(http.StatusOK)
// fmt.Fprint(respWrite, "Ok")
- httpext.JSON(respWrite, http.StatusOK, map[string]string{"status": "ok"})
+ err = httpext.JSON(respWrite, http.StatusOK, map[string]string{"status": "ok"})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) stopScan(w http.ResponseWriter, r *http.Request, tag string) {
@@ -868,7 +867,10 @@ func (h *Handler) statusScanHandler(w http.ResponseWriter, r *http.Request, scan
return
}
- httpext.JSON(w, http.StatusOK, statuses)
+ err = httpext.JSON(w, http.StatusOK, statuses)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) complianceStatusScanHandler(w http.ResponseWriter, r *http.Request, scan_type utils.Neo4jScanType) {
@@ -894,7 +896,10 @@ func (h *Handler) complianceStatusScanHandler(w http.ResponseWriter, r *http.Req
return
}
- httpext.JSON(w, http.StatusOK, statuses)
+ err = httpext.JSON(w, http.StatusOK, statuses)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListVulnerabilityScansHandler(w http.ResponseWriter, r *http.Request) {
@@ -946,7 +951,10 @@ func (h *Handler) listScansHandler(w http.ResponseWriter, r *http.Request, scan_
}
}
- httpext.JSON(w, http.StatusOK, infos)
+ err = httpext.JSON(w, http.StatusOK, infos)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) GetScanReportFields(w http.ResponseWriter, r *http.Request) {
@@ -1001,7 +1009,10 @@ func (h *Handler) GetScanReportFields(w http.ResponseWriter, r *http.Request) {
Malware: malwareFields,
}
- httpext.JSON(w, http.StatusOK, response)
+ err := httpext.JSON(w, http.StatusOK, response)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListVulnerabilityScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1015,8 +1026,11 @@ func (h *Handler) ListVulnerabilityScanResultsHandler(w http.ResponseWriter, r *
log.Error().Err(err).Msg("Counts computation issue")
}
- httpext.JSON(w, http.StatusOK, model.VulnerabilityScanResult{
+ err = httpext.JSON(w, http.StatusOK, model.VulnerabilityScanResult{
Vulnerabilities: entries, ScanResultsCommon: common, SeverityCounts: counts})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListSecretScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1031,8 +1045,11 @@ func (h *Handler) ListSecretScanResultsHandler(w http.ResponseWriter, r *http.Re
log.Error().Err(err).Msg("Counts computation issue")
}
- httpext.JSON(w, http.StatusOK, model.SecretScanResult{
+ err = httpext.JSON(w, http.StatusOK, model.SecretScanResult{
Secrets: entries, ScanResultsCommon: common, SeverityCounts: counts})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListSecretScanResultRulesHandler(w http.ResponseWriter, r *http.Request) {
@@ -1047,7 +1064,10 @@ func (h *Handler) ListSecretScanResultRulesHandler(w http.ResponseWriter, r *htt
rules = append(rules, e.Name)
}
- httpext.JSON(w, http.StatusOK, model.SecretScanResultRules{Rules: lo.Uniq(rules)})
+ err = httpext.JSON(w, http.StatusOK, model.SecretScanResultRules{Rules: lo.Uniq(rules)})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListComplianceScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1061,8 +1081,11 @@ func (h *Handler) ListComplianceScanResultsHandler(w http.ResponseWriter, r *htt
log.Error().Err(err).Msg("Counts computation issue")
}
- httpext.JSON(w, http.StatusOK, model.ComplianceScanResult{Compliances: entries, ScanResultsCommon: common,
+ err = httpext.JSON(w, http.StatusOK, model.ComplianceScanResult{Compliances: entries, ScanResultsCommon: common,
ComplianceAdditionalInfo: additionalInfo})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListMalwareScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1077,7 +1100,10 @@ func (h *Handler) ListMalwareScanResultsHandler(w http.ResponseWriter, r *http.R
log.Error().Err(err).Msg("Counts computation issue")
}
- httpext.JSON(w, http.StatusOK, model.MalwareScanResult{Malwares: entries, ScanResultsCommon: common, SeverityCounts: counts})
+ err = httpext.JSON(w, http.StatusOK, model.MalwareScanResult{Malwares: entries, ScanResultsCommon: common, SeverityCounts: counts})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListMalwareScanResultRulesHandler(w http.ResponseWriter, r *http.Request) {
@@ -1092,7 +1118,10 @@ func (h *Handler) ListMalwareScanResultRulesHandler(w http.ResponseWriter, r *ht
rules = append(rules, e.RuleName)
}
- httpext.JSON(w, http.StatusOK, model.MalwareScanResultRules{Rules: lo.Uniq(rules)})
+ err = httpext.JSON(w, http.StatusOK, model.MalwareScanResultRules{Rules: lo.Uniq(rules)})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListMalwareScanResultClassHandler(w http.ResponseWriter, r *http.Request) {
@@ -1107,7 +1136,10 @@ func (h *Handler) ListMalwareScanResultClassHandler(w http.ResponseWriter, r *ht
class = append(class, e.Class)
}
- httpext.JSON(w, http.StatusOK, model.MalwareScanResultClass{Class: lo.Uniq(class)})
+ err = httpext.JSON(w, http.StatusOK, model.MalwareScanResultClass{Class: lo.Uniq(class)})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ListCloudComplianceScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1122,8 +1154,11 @@ func (h *Handler) ListCloudComplianceScanResultsHandler(w http.ResponseWriter, r
log.Error().Err(err).Msg("Counts computation issue")
}
- httpext.JSON(w, http.StatusOK, model.CloudComplianceScanResult{Compliances: entries, ScanResultsCommon: common,
+ err = httpext.JSON(w, http.StatusOK, model.CloudComplianceScanResult{Compliances: entries, ScanResultsCommon: common,
ComplianceAdditionalInfo: additionalInfo})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) CountVulnerabilityScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1133,9 +1168,12 @@ func (h *Handler) CountVulnerabilityScanResultsHandler(w http.ResponseWriter, r
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
Count: len(entries),
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) CountSecretScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1145,9 +1183,12 @@ func (h *Handler) CountSecretScanResultsHandler(w http.ResponseWriter, r *http.R
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
Count: len(entries),
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) CountComplianceScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1157,9 +1198,12 @@ func (h *Handler) CountComplianceScanResultsHandler(w http.ResponseWriter, r *ht
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
Count: len(entries),
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) CountMalwareScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1169,9 +1213,12 @@ func (h *Handler) CountMalwareScanResultsHandler(w http.ResponseWriter, r *http.
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
Count: len(entries),
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) CountCloudComplianceScanResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1181,9 +1228,12 @@ func (h *Handler) CountCloudComplianceScanResultsHandler(w http.ResponseWriter,
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.SearchCountResp{
Count: len(entries),
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func groupSecrets(ctx context.Context) ([]reporters_search.ResultGroup, error) {
@@ -1244,9 +1294,12 @@ func (h *Handler) GroupSecretResultsHandler(w http.ResponseWriter, r *http.Reque
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.ResultGroupResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.ResultGroupResp{
Groups: groups,
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func groupMalwares(ctx context.Context, byClass bool) ([]reporters_search.ResultGroup, error) {
@@ -1315,9 +1368,12 @@ func (h *Handler) GroupMalwareResultsHandler(w http.ResponseWriter, r *http.Requ
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.ResultGroupResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.ResultGroupResp{
Groups: groups,
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) GroupMalwareClassResultsHandler(w http.ResponseWriter, r *http.Request) {
@@ -1329,9 +1385,12 @@ func (h *Handler) GroupMalwareClassResultsHandler(w http.ResponseWriter, r *http
return
}
- httpext.JSON(w, http.StatusOK, reporters_search.ResultGroupResp{
+ err = httpext.JSON(w, http.StatusOK, reporters_search.ResultGroupResp{
Groups: groups,
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) CloudComplianceFiltersHandler(w http.ResponseWriter, r *http.Request) {
@@ -1347,7 +1406,10 @@ func (h *Handler) CloudComplianceFiltersHandler(w http.ResponseWriter, r *http.R
log.Error().Msgf("%v", err)
h.respondError(err, w)
}
- httpext.JSON(w, http.StatusOK, model.FiltersResult{Filters: res})
+ err = httpext.JSON(w, http.StatusOK, model.FiltersResult{Filters: res})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) ComplianceFiltersHandler(w http.ResponseWriter, r *http.Request) {
@@ -1363,7 +1425,10 @@ func (h *Handler) ComplianceFiltersHandler(w http.ResponseWriter, r *http.Reques
log.Error().Msgf("%v", err)
h.respondError(err, w)
}
- httpext.JSON(w, http.StatusOK, model.FiltersResult{Filters: res})
+ err = httpext.JSON(w, http.StatusOK, model.FiltersResult{Filters: res})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func listScanResultsHandler[T any](w http.ResponseWriter, r *http.Request, scan_type utils.Neo4jScanType) ([]T, model.ScanResultsCommon, error) {
@@ -1595,7 +1660,10 @@ func (h *Handler) scanIdActionHandler(w http.ResponseWriter, r *http.Request, ac
"attachment; filename="+strconv.Quote(utils.ScanIdReplacer.Replace(req.ScanID)+".json"))
w.Header().Set("Content-Type", "application/octet-stream")
w.WriteHeader(http.StatusOK)
- w.Write(data)
+ _, err = w.Write(data)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
h.AuditUserActivity(r, req.ScanType, ACTION_DOWNLOAD, req, true)
case "delete":
@@ -1694,7 +1762,10 @@ func (h *Handler) GetAllNodesInScanResultBulkHandler(w http.ResponseWriter, r *h
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, resp)
+ err = httpext.JSON(w, http.StatusOK, resp)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) sbomHandler(w http.ResponseWriter, r *http.Request, action string) {
@@ -1734,7 +1805,10 @@ func (h *Handler) sbomHandler(w http.ResponseWriter, r *http.Request, action str
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, sbom)
+ err = httpext.JSON(w, http.StatusOK, sbom)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
case "download":
resp := model.DownloadReportResponse{}
sbomFile := path.Join("/sbom", utils.ScanIdReplacer.Replace(req.ScanID)+".json.gz")
@@ -1749,7 +1823,10 @@ func (h *Handler) sbomHandler(w http.ResponseWriter, r *http.Request, action str
return
}
resp.UrlLink = url
- httpext.JSON(w, http.StatusOK, resp)
+ err = httpext.JSON(w, http.StatusOK, resp)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
h.AuditUserActivity(r, EVENT_VULNERABILITY_SCAN, ACTION_DOWNLOAD, req, true)
}
}
@@ -1998,6 +2075,7 @@ func StartMultiScan(ctx context.Context,
if err != nil {
return nil, "", err
}
+ isPriority := req.IsPriority
regular, k8s, registry, pods := extractBulksNodes(req.NodeIds)
@@ -2080,6 +2158,7 @@ func StartMultiScan(ctx context.Context,
scanId,
ctl.StringToResourceType(req.NodeType),
req.NodeId,
+ isPriority,
action)
if err != nil {
@@ -2111,7 +2190,8 @@ func StartMultiScan(ctx context.Context,
return scanIds, bulkId, tx.Commit()
}
-func StartMultiCloudComplianceScan(ctx context.Context, reqs []model.NodeIdentifier, benchmarkTypes []string) ([]string, string, error) {
+func StartMultiCloudComplianceScan(ctx context.Context, reqs []model.NodeIdentifier,
+ benchmarkTypes []string, isPriority bool) ([]string, string, error) {
driver, err := directory.Neo4jClient(ctx)
if err != nil {
@@ -2139,7 +2219,8 @@ func StartMultiCloudComplianceScan(ctx context.Context, reqs []model.NodeIdentif
scanId,
benchmarkTypes,
req.NodeId,
- reqs[0].NodeType)
+ reqs[0].NodeType,
+ isPriority)
if err != nil {
log.Info().Msgf("Error in AddNewCloudComplianceScan:%v", err)
@@ -2159,9 +2240,7 @@ func StartMultiCloudComplianceScan(ctx context.Context, reqs []model.NodeIdentif
return []string{}, "", nil
}
- var bulkId string
-
- bulkId = bulkScanId()
+ bulkId := bulkScanId()
scanType := utils.NEO4J_CLOUD_COMPLIANCE_SCAN
if reqs[0].NodeType == controls.ResourceTypeToString(controls.KubernetesCluster) || reqs[0].NodeType == controls.ResourceTypeToString(controls.Host) {
scanType = utils.NEO4J_COMPLIANCE_SCAN
diff --git a/deepfence_server/handler/scheduled_tasks.go b/deepfence_server/handler/scheduled_tasks.go
index aee05d0050..7d91d7b62c 100644
--- a/deepfence_server/handler/scheduled_tasks.go
+++ b/deepfence_server/handler/scheduled_tasks.go
@@ -16,7 +16,10 @@ func (h *Handler) GetScheduledTask(w http.ResponseWriter, r *http.Request) {
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, scheduledTasks)
+ err = httpext.JSON(w, http.StatusOK, scheduledTasks)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) UpdateScheduledTask(w http.ResponseWriter, r *http.Request) {
@@ -46,6 +49,21 @@ func (h *Handler) UpdateScheduledTask(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
+func (h *Handler) DeleteCustomScheduledTask(w http.ResponseWriter, r *http.Request) {
+ id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
+ if err != nil {
+ h.respondError(&BadDecoding{err}, w)
+ return
+ }
+ defer r.Body.Close()
+ err = model.DeleteCustomSchedule(r.Context(), id)
+ if err != nil {
+ h.respondError(err, w)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
+
func (h *Handler) AddScheduledTask(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
diff --git a/deepfence_server/handler/search_reports.go b/deepfence_server/handler/search_reports.go
index b9912cc3cb..a76b9adb19 100644
--- a/deepfence_server/handler/search_reports.go
+++ b/deepfence_server/handler/search_reports.go
@@ -19,6 +19,9 @@ func (h *Handler) NodeCountHandler(w http.ResponseWriter, r *http.Request) {
return
}
err = httpext.JSON(w, http.StatusOK, counts)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
func SearchCountHandler[T reporters.CypherableAndCategorizable](w http.ResponseWriter, r *http.Request, h *Handler) {
diff --git a/deepfence_server/handler/settings.go b/deepfence_server/handler/settings.go
index a0cb03c1ce..5aa0553eb1 100644
--- a/deepfence_server/handler/settings.go
+++ b/deepfence_server/handler/settings.go
@@ -27,7 +27,7 @@ var (
invalidIntegerError = ValidatorError{
err: errors.New("value:must be integer"), skipOverwriteErrorMessage: true}
invalidEmailConfigTypeError = ValidatorError{
- err: errors.New(fmt.Sprintf("email_provider:must be %s or %s", model.EmailSettingSMTP, model.EmailSettingSES)), skipOverwriteErrorMessage: true}
+ err: fmt.Errorf("email_provider:must be %s or %s", model.EmailSettingSMTP, model.EmailSettingSES), skipOverwriteErrorMessage: true}
)
func (h *Handler) AddEmailConfiguration(w http.ResponseWriter, r *http.Request) {
@@ -84,7 +84,10 @@ func (h *Handler) AddEmailConfiguration(w http.ResponseWriter, r *http.Request)
req.AmazonAccessKey = ""
req.AmazonSecretKey = ""
h.AuditUserActivity(r, EVENT_SETTINGS, ACTION_CREATE, req, true)
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessEmailConfigCreated})
+ err = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: api_messages.SuccessEmailConfigCreated})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) GetEmailConfiguration(w http.ResponseWriter, r *http.Request) {
@@ -98,7 +101,10 @@ func (h *Handler) GetEmailConfiguration(w http.ResponseWriter, r *http.Request)
resp := []model.EmailConfigurationResp{}
setting, err := pgClient.GetSetting(ctx, model.EmailConfigurationKey)
if errors.Is(err, sql.ErrNoRows) {
- httpext.JSON(w, http.StatusOK, resp)
+ err = httpext.JSON(w, http.StatusOK, resp)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
return
} else if err != nil {
h.respondError(&InternalServerError{err}, w)
@@ -112,7 +118,10 @@ func (h *Handler) GetEmailConfiguration(w http.ResponseWriter, r *http.Request)
}
emailConfig.ID = setting.ID
resp = append(resp, emailConfig)
- httpext.JSON(w, http.StatusOK, resp)
+ err = httpext.JSON(w, http.StatusOK, resp)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) DeleteEmailConfiguration(w http.ResponseWriter, r *http.Request) {
@@ -150,7 +159,10 @@ func (h *Handler) GetGlobalSettings(w http.ResponseWriter, r *http.Request) {
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, settings)
+ err = httpext.JSON(w, http.StatusOK, settings)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) UpdateGlobalSettings(w http.ResponseWriter, r *http.Request) {
diff --git a/deepfence_server/handler/threat_graph.go b/deepfence_server/handler/threat_graph.go
index a86a1a5888..5a144729f1 100644
--- a/deepfence_server/handler/threat_graph.go
+++ b/deepfence_server/handler/threat_graph.go
@@ -33,6 +33,11 @@ func (h *Handler) GetThreatGraph(w http.ResponseWriter, r *http.Request) {
AzureFilter: reporters_graph.CloudProviderFilter{AccountIds: nil},
}
err = json.Unmarshal(body, &filters)
+ if err != nil {
+ log.Error().Msgf("Error Adding report: %v", err)
+ respondWith(ctx, w, http.StatusBadRequest, err)
+ return
+ }
reporter, err := reporters_graph.NewThreatGraphReporter(ctx)
@@ -98,5 +103,5 @@ func (h *Handler) GetIndividualThreatGraph(w http.ResponseWriter, r *http.Reques
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, individualThreatGraph)
+ _ = httpext.JSON(w, http.StatusOK, individualThreatGraph)
}
diff --git a/deepfence_server/handler/user.go b/deepfence_server/handler/user.go
index dbbfb7f575..ed2e1ed9d0 100644
--- a/deepfence_server/handler/user.go
+++ b/deepfence_server/handler/user.go
@@ -178,11 +178,14 @@ func (h *Handler) RegisterUser(w http.ResponseWriter, r *http.Request) {
}
user.Password = ""
h.AuditUserActivity(r, EVENT_AUTH, ACTION_CREATE, &user, true)
- httpext.JSON(w, http.StatusOK, model.LoginResponse{
+ err = httpext.JSON(w, http.StatusOK, model.LoginResponse{
ResponseAccessToken: *accessTokenResponse,
OnboardingRequired: model.IsOnboardingRequired(ctx),
PasswordInvalidated: user.PasswordInvalidated,
})
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
}
func (h *Handler) RegisterInvitedUser(w http.ResponseWriter, r *http.Request) {
@@ -205,6 +208,10 @@ func (h *Handler) RegisterInvitedUser(w http.ResponseWriter, r *http.Request) {
return
}
code, err := utils.UUIDFromString(registerRequest.Code)
+ if err != nil {
+ h.respondError(err, w)
+ return
+ }
userInvite, err := pgClient.GetUserInviteByCode(ctx, code)
if errors.Is(err, sql.ErrNoRows) {
h.respondError(&userInviteInvalidCodeError, w)
@@ -219,6 +226,10 @@ func (h *Handler) RegisterInvitedUser(w http.ResponseWriter, r *http.Request) {
return
}
role, err := pgClient.GetRoleByID(ctx, userInvite.RoleID)
+ if err != nil {
+ h.respondError(err, w)
+ return
+ }
user := model.User{
FirstName: registerRequest.FirstName,
LastName: registerRequest.LastName,
@@ -266,7 +277,7 @@ func (h *Handler) RegisterInvitedUser(w http.ResponseWriter, r *http.Request) {
user.Password = ""
h.AuditUserActivity(r, EVENT_AUTH, ACTION_CREATE, &user, true)
- httpext.JSON(w, http.StatusOK, model.LoginResponse{
+ _ = httpext.JSON(w, http.StatusOK, model.LoginResponse{
ResponseAccessToken: *accessTokenResponse,
OnboardingRequired: model.IsOnboardingRequired(ctx),
PasswordInvalidated: user.PasswordInvalidated,
@@ -381,7 +392,7 @@ func (h *Handler) InviteUser(w http.ResponseWriter, r *http.Request) {
h.AuditUserActivity(r, EVENT_AUTH, ACTION_INVITE, userInvite, true)
- httpext.JSON(w, http.StatusOK, model.InviteUserResponse{InviteExpiryHours: 48, InviteURL: inviteURL, Message: "Invite sent"})
+ _ = httpext.JSON(w, http.StatusOK, model.InviteUserResponse{InviteExpiryHours: 48, InviteURL: inviteURL, Message: "Invite sent"})
}
func (h *Handler) userModel(pgUser postgresql_db.GetUsersRow) model.User {
@@ -418,7 +429,7 @@ func (h *Handler) GetUsers(w http.ResponseWriter, r *http.Request) {
users[i].CurrentUser = False
}
}
- httpext.JSON(w, http.StatusOK, users)
+ _ = httpext.JSON(w, http.StatusOK, users)
}
func (h *Handler) GetUser(w http.ResponseWriter, r *http.Request) {
@@ -427,7 +438,7 @@ func (h *Handler) GetUser(w http.ResponseWriter, r *http.Request) {
h.respondWithErrorCode(err, w, statusCode)
return
}
- httpext.JSON(w, http.StatusOK, user)
+ _ = httpext.JSON(w, http.StatusOK, user)
}
func (h *Handler) GetUserByUserID(w http.ResponseWriter, r *http.Request) {
@@ -441,7 +452,7 @@ func (h *Handler) GetUserByUserID(w http.ResponseWriter, r *http.Request) {
h.respondWithErrorCode(err, w, statusCode)
return
}
- httpext.JSON(w, http.StatusOK, user)
+ _ = httpext.JSON(w, http.StatusOK, user)
}
func (h *Handler) updateUserHandler(w http.ResponseWriter, r *http.Request, ctx context.Context, pgClient *postgresql_db.Queries, user *model.User, isCurrentUser bool) {
@@ -462,6 +473,10 @@ func (h *Handler) updateUserHandler(w http.ResponseWriter, r *http.Request, ctx
user.LastName = req.LastName
if user.Role != req.Role {
activeAdminUsersCount, err := pgClient.CountActiveAdminUsers(ctx)
+ if err != nil {
+ h.respondWithErrorCode(err, w, http.StatusInternalServerError)
+ return
+ }
if user.Role == model.AdminRole && activeAdminUsersCount < 2 {
h.respondWithErrorCode(deleteLastAdminError, w, http.StatusForbidden)
return
@@ -489,11 +504,14 @@ func (h *Handler) updateUserHandler(w http.ResponseWriter, r *http.Request, ctx
return
}
if toLogout {
- LogoutHandler(ctx)
+ err = LogoutHandler(ctx)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
user.Password = ""
h.AuditUserActivity(r, EVENT_AUTH, ACTION_UPDATE, user, true)
- httpext.JSON(w, http.StatusOK, user)
+ _ = httpext.JSON(w, http.StatusOK, user)
}
func (h *Handler) UpdateUser(w http.ResponseWriter, r *http.Request) {
@@ -586,7 +604,10 @@ func (h *Handler) deleteUserHandler(w http.ResponseWriter, r *http.Request, ctx
return
}
if isCurrentUser {
- LogoutHandler(ctx)
+ err = LogoutHandler(ctx)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
user.Password = ""
@@ -640,13 +661,13 @@ func (h *Handler) ResetPasswordRequest(w http.ResponseWriter, r *http.Request) {
}
resetPasswordRequest.Email = strings.ToLower(resetPasswordRequest.Email)
if resetPasswordRequest.Email == constants.DeepfenceCommunityEmailId {
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: passwordResetResponse})
+ _ = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: passwordResetResponse})
return
}
ctx := directory.NewContextWithNameSpace(directory.FetchNamespace(resetPasswordRequest.Email))
user, statusCode, pgClient, err := model.GetUserByEmail(ctx, resetPasswordRequest.Email)
if errors.Is(err, model.UserNotFoundErr) {
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: passwordResetResponse})
+ _ = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: passwordResetResponse})
return
} else if err != nil {
h.respondWithErrorCode(err, w, statusCode)
@@ -692,7 +713,10 @@ func (h *Handler) ResetPasswordRequest(w http.ResponseWriter, r *http.Request) {
},
)
if err != nil {
- pgClient.DeletePasswordResetByUserEmail(ctx, user.Email)
+ e := pgClient.DeletePasswordResetByUserEmail(ctx, user.Email)
+ if e != nil {
+ log.Error().Msg(e.Error())
+ }
log.Error().Err(err).Msg("error rendering PasswordResetTemplate")
h.respondError(err, w)
return
@@ -706,7 +730,10 @@ func (h *Handler) ResetPasswordRequest(w http.ResponseWriter, r *http.Request) {
nil,
)
if err != nil {
- pgClient.DeletePasswordResetByUserEmail(ctx, user.Email)
+ e := pgClient.DeletePasswordResetByUserEmail(ctx, user.Email)
+ if e != nil {
+ log.Error().Msg(e.Error())
+ }
log.Error().Err(err).Msg("error sending password reset email")
h.respondError(err, w)
return
@@ -714,7 +741,7 @@ func (h *Handler) ResetPasswordRequest(w http.ResponseWriter, r *http.Request) {
h.AuditUserActivity(r, EVENT_AUTH, ACTION_RESET_PASSWORD, resetPasswordRequest, true)
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: passwordResetResponse})
+ _ = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: passwordResetResponse})
}
func (h *Handler) ResetPasswordVerification(w http.ResponseWriter, r *http.Request) {
@@ -737,6 +764,10 @@ func (h *Handler) ResetPasswordVerification(w http.ResponseWriter, r *http.Reque
return
}
code, err := utils.UUIDFromString(passwordResetVerifyRequest.Code)
+ if err != nil {
+ h.respondWithErrorCode(err, w, http.StatusInternalServerError)
+ return
+ }
passwordReset, err := pgClient.GetPasswordResetByCode(ctx, code)
if errors.Is(err, sql.ErrNoRows) {
h.respondError(&passwordResetCodeNotFoundError, w)
@@ -789,7 +820,7 @@ func (h *Handler) GetApiTokens(w http.ResponseWriter, r *http.Request) {
CreatedAt: apiToken.CreatedAt,
}
}
- httpext.JSON(w, http.StatusOK, apiTokenResponse)
+ _ = httpext.JSON(w, http.StatusOK, apiTokenResponse)
}
func (h *Handler) ResetApiToken(w http.ResponseWriter, r *http.Request) {
@@ -832,7 +863,7 @@ func (h *Handler) ResetApiToken(w http.ResponseWriter, r *http.Request) {
}
user.Password = ""
h.AuditUserActivity(r, EVENT_AUTH, ACTION_VERIFY_PASSWORD, user, true)
- httpext.JSON(w, http.StatusOK, apiTokenResponse)
+ _ = httpext.JSON(w, http.StatusOK, apiTokenResponse)
}
func (h *Handler) GetUserFromJWT(requestContext context.Context) (*model.User, int, *postgresql_db.Queries, error) {
@@ -869,5 +900,5 @@ func (h *Handler) GetApiTokenForConsoleAgent(w http.ResponseWriter, r *http.Requ
h.respondError(err, w)
return
}
- httpext.JSON(w, http.StatusOK, model.ApiAuthRequest{ApiToken: model.GetApiToken(string(directory.NonSaaSDirKey), token)})
+ _ = httpext.JSON(w, http.StatusOK, model.ApiAuthRequest{ApiToken: model.GetApiToken(string(directory.NonSaaSDirKey), token)})
}
diff --git a/deepfence_server/handler/vulnerability_database.go b/deepfence_server/handler/vulnerability_database.go
index e69d3b903e..42d26ca3df 100644
--- a/deepfence_server/handler/vulnerability_database.go
+++ b/deepfence_server/handler/vulnerability_database.go
@@ -57,5 +57,5 @@ func (h *Handler) UploadVulnerabilityDB(w http.ResponseWriter, r *http.Request)
vulnerability_db.UpdateListing(path, checksum, time.Now())
}()
- httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: path + " " + checksum})
+ _ = httpext.JSON(w, http.StatusOK, model.MessageResponse{Message: path + " " + checksum})
}
diff --git a/deepfence_server/ingesters/agent.go b/deepfence_server/ingesters/agent.go
index 362db0f256..61b339c5d0 100644
--- a/deepfence_server/ingesters/agent.go
+++ b/deepfence_server/ingesters/agent.go
@@ -78,15 +78,6 @@ type EndpointResolvers struct {
ipport_ippid map[string]string
}
-func (er *EndpointResolvers) clean() {
- for k := range er.network_map {
- delete(er.network_map, k)
- }
- for k := range er.ipport_ippid {
- delete(er.ipport_ippid, k)
- }
-}
-
type EndpointResolversCache struct {
rdb *redis2.Client
net_cache sync.Map
@@ -928,11 +919,7 @@ func (nc *neo4jIngester) runDBPusher(
log.Error().Msgf("Failed to get client: %v", err)
return
}
- session, err := driver.Session(neo4j.AccessModeWrite)
- if err != nil {
- log.Error().Msgf("Failed to open session: %v", err)
- return
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
for batches := range db_pusher {
span := telemetry.NewSpan(context.Background(), "ingester", "PushAgentReportsToDB")
@@ -944,12 +931,8 @@ func (nc *neo4jIngester) runDBPusher(
log.Warn().Msg("Renew session")
new_driver, err := directory.Neo4jClient(ctx)
if err == nil {
- new_session, err := new_driver.Session(neo4j.AccessModeWrite)
- if err == nil {
- driver = new_driver
- session.Close()
- session = new_session
- }
+ _ = session.Close()
+ session = new_driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
}
}
if batches.Retries == 2 || !isTransientError(err) {
@@ -1169,13 +1152,6 @@ func FetchPushBack(ctx context.Context) int32 {
return prev_push_back
}
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
func max(a, b int32) int32 {
if a < b {
return b
@@ -1215,11 +1191,7 @@ func UpdatePushBack(ctx context.Context, newValue *atomic.Int32, prev int32) err
return err
}
- session, err := driver.Session(neo4j.AccessModeWrite)
- if err != nil {
- log.Error().Msgf("Fail to get session for push back: %v", err)
- return err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(5 * time.Second))
if err != nil {
@@ -1252,18 +1224,16 @@ func UpdatePushBack(ctx context.Context, newValue *atomic.Int32, prev int32) err
if err != nil {
return err
}
- newValue.Store(int32(rec.Values[0].(int64)))
}
+ newValue.Store(int32(rec.Values[0].(int64)))
return tx.Commit()
}
func GetPushBack(driver neo4j.Driver) (int32, error) {
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return 0, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
+
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(5 * time.Second))
if err != nil {
return 0, err
diff --git a/deepfence_server/ingesters/common.go b/deepfence_server/ingesters/common.go
index f04866deb9..a601a3bd09 100644
--- a/deepfence_server/ingesters/common.go
+++ b/deepfence_server/ingesters/common.go
@@ -1,15 +1 @@
package ingesters
-
-import "encoding/json"
-
-func convertStructFieldToJSONString(bb map[string]interface{}, key string) map[string]interface{} {
- if val, ok := bb[key]; ok && val != nil {
- v, e := json.Marshal(val)
- if e == nil {
- bb[key] = string(v)
- } else {
- bb[key] = "error"
- }
- }
- return bb
-}
diff --git a/deepfence_server/ingesters/scan_status.go b/deepfence_server/ingesters/scan_status.go
index 1db3f26ecd..6a670c3a6c 100644
--- a/deepfence_server/ingesters/scan_status.go
+++ b/deepfence_server/ingesters/scan_status.go
@@ -62,6 +62,7 @@ func AddNewScan(tx WriteDBTransaction,
scan_id string,
node_type controls.ScanResource,
node_id string,
+ isPriority bool,
action controls.Action) error {
res, err := tx.Run(fmt.Sprintf(`
@@ -115,7 +116,7 @@ func AddNewScan(tx WriteDBTransaction,
}
}
if rec.Values[1] != nil {
- if rec.Values[1].(bool) == false {
+ if !rec.Values[1].(bool) {
return &AgentNotInstalledError{
NodeId: node_id,
}
@@ -128,14 +129,15 @@ func AddNewScan(tx WriteDBTransaction,
}
if _, err = tx.Run(fmt.Sprintf(`
- MERGE (n:%s{node_id: $scan_id, status: $status, status_message: "", retries: 0, trigger_action: $action, updated_at: TIMESTAMP(), created_at: TIMESTAMP()})
+ MERGE (n:%s{node_id: $scan_id, status: $status, status_message: "", retries: 0, trigger_action: $action, updated_at: TIMESTAMP(), created_at: TIMESTAMP(), is_priority: $is_priority})
MERGE (m:%s{node_id:$node_id})
MERGE (n)-[:SCANNED]->(m)`, scan_type, controls.ResourceTypeToNeo4j(node_type)),
map[string]interface{}{
- "scan_id": scan_id,
- "status": utils.SCAN_STATUS_STARTING,
- "node_id": node_id,
- "action": string(b)}); err != nil {
+ "scan_id": scan_id,
+ "status": utils.SCAN_STATUS_STARTING,
+ "node_id": node_id,
+ "action": string(b),
+ "is_priority": isPriority}); err != nil {
return err
}
@@ -221,7 +223,8 @@ func AddNewCloudComplianceScan(tx WriteDBTransaction,
scanId string,
benchmarkTypes []string,
nodeId string,
- nodeType string) error {
+ nodeType string,
+ isPriority bool) error {
neo4jNodeType := "CloudNode"
scanType := utils.NEO4J_CLOUD_COMPLIANCE_SCAN
@@ -291,7 +294,7 @@ func AddNewCloudComplianceScan(tx WriteDBTransaction,
}
}
if rec.Values[1] != nil {
- if rec.Values[1].(bool) == false {
+ if !rec.Values[1].(bool) {
return &AgentNotInstalledError{
NodeId: nodeId,
}
@@ -311,7 +314,7 @@ func AddNewCloudComplianceScan(tx WriteDBTransaction,
action, _ = json.Marshal(ctl.Action{ID: ctl.StartComplianceScan, RequestPayload: string(internalReq)})
}
if _, err = tx.Run(fmt.Sprintf(`
- MERGE (n:%s{node_id: $scan_id, status: $status, status_message: "", retries: 0, updated_at: TIMESTAMP(), benchmark_types: $benchmark_types, trigger_action: $action, created_at:TIMESTAMP()})
+MERGE (n:%s{node_id: $scan_id, status: $status, status_message: "", retries: 0, updated_at: TIMESTAMP(), benchmark_types: $benchmark_types, trigger_action: $action, created_at:TIMESTAMP(), is_priority: $is_priority})
MERGE (m:%s{node_id:$node_id})
MERGE (n)-[:SCANNED]->(m)`, scanType, neo4jNodeType),
map[string]interface{}{
@@ -320,6 +323,7 @@ func AddNewCloudComplianceScan(tx WriteDBTransaction,
"node_id": nodeId,
"benchmark_types": benchmarkTypes,
"action": string(action),
+ "is_priority": isPriority,
}); err != nil {
return err
}
diff --git a/deepfence_server/main.go b/deepfence_server/main.go
index bf42c11495..cf31dc8073 100644
--- a/deepfence_server/main.go
+++ b/deepfence_server/main.go
@@ -105,7 +105,7 @@ func main() {
log.Fatal().Msg(err.Error())
}
- if *resetPassword == true {
+ if *resetPassword {
if directory.IsNonSaaSDeployment() {
ctx := directory.NewContextWithNameSpace(directory.NonSaaSDirKey)
err = resetUserPassword(ctx)
diff --git a/deepfence_server/model/cloud_node.go b/deepfence_server/model/cloud_node.go
index 3bd359c88a..738d5b651f 100644
--- a/deepfence_server/model/cloud_node.go
+++ b/deepfence_server/model/cloud_node.go
@@ -235,6 +235,26 @@ func UpsertCloudComplianceNode(ctx context.Context, nodeDetails map[string]inter
return tx.Commit()
}
+func getPostureProviderCache(ctx context.Context) []PostureProvider {
+
+ var postureProvidersCache []PostureProvider
+ rdb, err := directory.RedisClient(ctx)
+ if err != nil {
+ log.Error().Msgf("GetCloudProvidersList redis : %v", err)
+ return postureProvidersCache
+ }
+ postureProvidersStr, err := rdb.Get(ctx, constants.RedisKeyPostureProviders).Result()
+ if err != nil {
+ log.Error().Msgf("GetCloudProvidersList redis : %v", err)
+ return postureProvidersCache
+ }
+ err = json.Unmarshal([]byte(postureProvidersStr), &postureProvidersCache)
+ if err != nil {
+ log.Error().Msgf("GetCloudProvidersList redis : %v", err)
+ }
+ return postureProvidersCache
+}
+
func GetCloudProvidersList(ctx context.Context) ([]PostureProvider, error) {
driver, err := directory.Neo4jClient(ctx)
if err != nil {
@@ -253,18 +273,7 @@ func GetCloudProvidersList(ctx context.Context) ([]PostureProvider, error) {
}
defer tx.Close()
- var postureProvidersCache []PostureProvider
- rdb, err := directory.RedisClient(ctx)
- if err == nil {
- postureProvidersStr, err := rdb.Get(ctx, constants.RedisKeyPostureProviders).Result()
- if err == nil {
- json.Unmarshal([]byte(postureProvidersStr), &postureProvidersCache)
- } else {
- log.Warn().Msgf("GetCloudProvidersList redis : %v", err)
- }
- } else {
- log.Warn().Msgf("GetCloudProvidersList redis : %v", err)
- }
+ postureProvidersCache := getPostureProviderCache(ctx)
postureProviders := []PostureProvider{
{Name: PostureProviderAWS, NodeLabel: "Accounts"},
@@ -292,7 +301,7 @@ func GetCloudProvidersList(ctx context.Context) ([]PostureProvider, error) {
records, err := r.Collect()
if err == nil {
for _, record := range records {
- if record.Values[0].(bool) == true {
+ if record.Values[0].(bool) {
postureProviders[providersIndex[PostureProviderLinux]].NodeCount = record.Values[1].(int64)
} else {
postureProviders[providersIndex[PostureProviderLinux]].NodeCountInactive = record.Values[1].(int64)
@@ -312,7 +321,7 @@ func GetCloudProvidersList(ctx context.Context) ([]PostureProvider, error) {
records, err := r.Collect()
if err == nil {
for _, record := range records {
- if record.Values[0].(bool) == true {
+ if record.Values[0].(bool) {
postureProviders[providersIndex[PostureProviderKubernetes]].NodeCount = record.Values[1].(int64)
} else {
postureProviders[providersIndex[PostureProviderKubernetes]].NodeCountInactive = record.Values[1].(int64)
@@ -335,7 +344,7 @@ func GetCloudProvidersList(ctx context.Context) ([]PostureProvider, error) {
if slices.Contains([]string{PostureProviderAWSOrg, PostureProviderGCPOrg}, provider) {
continue
}
- if record.Values[1].(bool) == true {
+ if record.Values[1].(bool) {
postureProviders[providersIndex[provider]].NodeCount = record.Values[2].(int64)
} else {
postureProviders[providersIndex[provider]].NodeCountInactive = record.Values[2].(int64)
@@ -448,6 +457,9 @@ func GetCloudComplianceNodesList(ctx context.Context, cloudProvider string, fw F
RETURN COUNT(*)`, neo4jNodeType),
map[string]interface{}{"cloud_provider": cloudProvider})
}
+ if err != nil {
+ return CloudNodeAccountsListResp{Total: 0}, err
+ }
countRec, err := countRes.Single()
if err != nil {
@@ -480,7 +492,7 @@ func GetActiveCloudControls(ctx context.Context, complianceTypes []string, cloud
var res neo4j.Result
res, err = tx.Run(`
- MATCH (n:CloudComplianceBenchmark) -[:INCLUDES]-> (m:CloudComplianceControl)
+ MATCH (n:CloudComplianceBenchmark) -[:PARENT]-> (m:CloudComplianceControl)
WHERE m.active = true
AND m.disabled = false
AND m.compliance_type IN $compliance_types
diff --git a/deepfence_server/model/integration.go b/deepfence_server/model/integration.go
index 3d633c36c8..da36251f0e 100644
--- a/deepfence_server/model/integration.go
+++ b/deepfence_server/model/integration.go
@@ -39,7 +39,7 @@ func (i *IntegrationAddReq) IntegrationExists(ctx context.Context, pgClient *pos
}
var config map[string]interface{}
- var found = false
+ var found bool
for _, integration := range integrations {
// json.rawmessage to map[string]interface{}
diff --git a/deepfence_server/model/registry.go b/deepfence_server/model/registry.go
index 32b5ac8b4d..013fdb1aa7 100644
--- a/deepfence_server/model/registry.go
+++ b/deepfence_server/model/registry.go
@@ -213,11 +213,17 @@ func (ra *RegistryAddReq) CreateRegistry(ctx context.Context, rContext context.C
NonSecret: bNonSecret, // rawNonSecretJSON,
Extras: bExtras, // rawExtrasJSON,
})
+ if err != nil {
+ return 0, err
+ }
cr, err := pgClient.GetContainerRegistryByTypeAndName(ctx, postgresqlDb.GetContainerRegistryByTypeAndNameParams{
RegistryType: ra.RegistryType,
Name: ra.Name,
})
+ if err != nil {
+ return 0, err
+ }
driver, err := directory.Neo4jClient(rContext)
if err != nil {
diff --git a/deepfence_server/model/scans.go b/deepfence_server/model/scans.go
index 9626406271..b20f1138ee 100644
--- a/deepfence_server/model/scans.go
+++ b/deepfence_server/model/scans.go
@@ -57,8 +57,9 @@ type ScanFilter struct {
}
type ScanTriggerCommon struct {
- NodeIds []NodeIdentifier `json:"node_ids" required:"true"`
- Filters ScanFilter `json:"filters" required:"true"`
+ NodeIds []NodeIdentifier `json:"node_ids" required:"true"`
+ Filters ScanFilter `json:"filters" required:"true"`
+ IsPriority bool `json:"is_priority"`
}
type NodeIdentifier struct {
diff --git a/deepfence_server/model/scheduled_tasks.go b/deepfence_server/model/scheduled_tasks.go
index 4451f0fe50..71a2c23a47 100644
--- a/deepfence_server/model/scheduled_tasks.go
+++ b/deepfence_server/model/scheduled_tasks.go
@@ -31,11 +31,20 @@ var (
)
type AddScheduledTaskRequest struct {
- NodeType string `json:"node_type"`
- Action string `json:"action"`
+ Action string `json:"action" validate:"required,oneof=SecretScan VulnerabilityScan MalwareScan ComplianceScan CloudComplianceScan" required:"true" enum:"SecretScan,VulnerabilityScan,MalwareScan,ComplianceScan,CloudComplianceScan"`
Description string `json:"description"`
CronExpr string `json:"cron_expr"`
- Filters string `json:"filters"`
+ ScheduleTaskPayload
+}
+
+type ScheduleTaskPayload struct {
+ ScanTriggerCommon
+ ScanConfigLanguages []VulnerabilityScanConfigLanguage `json:"scan_config" required:"true"`
+ ComplianceBenchmarkTypes
+}
+
+type ScheduleJobId struct {
+ ID int64 `path:"id"`
}
type UpdateScheduledTaskRequest struct {
@@ -76,15 +85,30 @@ func UpdateScheduledTask(ctx context.Context, id int64, updateScheduledTask Upda
})
}
+func DeleteCustomSchedule(ctx context.Context, id int64) error {
+ pgClient, err := directory.PostgresClient(ctx)
+ if err != nil {
+ return err
+ }
+ return pgClient.DeleteCustomSchedule(ctx, id)
+}
+
func AddScheduledTask(ctx context.Context, req AddScheduledTaskRequest) error {
pgClient, err := directory.PostgresClient(ctx)
if err != nil {
return err
}
- payload := make(map[string]string)
- payload["node_type"] = req.NodeType
- payload["filters"] = req.Filters
- payloadJson, _ := json.Marshal(payload)
+
+ payload := ScheduleTaskPayload{}
+ payload.NodeIds = req.NodeIds
+ payload.Filters = req.Filters
+ payload.ScanConfigLanguages = req.ScanConfigLanguages
+ payload.BenchmarkTypes = req.BenchmarkTypes
+ payload.IsPriority = req.IsPriority
+ payloadJson, err := json.Marshal(payload)
+ if err != nil {
+ return err
+ }
params := postgresqlDb.CreateScheduleParams{}
params.Action = req.Action
diff --git a/deepfence_server/model/setting.go b/deepfence_server/model/setting.go
index 942c36db56..7c6e1ae0c1 100644
--- a/deepfence_server/model/setting.go
+++ b/deepfence_server/model/setting.go
@@ -3,12 +3,12 @@ package model
import (
"context"
"crypto/aes"
+ "crypto/rand"
"database/sql"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
- "math/rand"
"time"
"github.com/deepfence/ThreatMapper/deepfence_server/constants/common"
diff --git a/deepfence_server/model/user.go b/deepfence_server/model/user.go
index c11a989fba..cda7f02376 100644
--- a/deepfence_server/model/user.go
+++ b/deepfence_server/model/user.go
@@ -300,6 +300,9 @@ func (u *User) CompareHashAndPassword(ctx context.Context, pgClient *postgresqlD
func GetUserByID(ctx context.Context, userID int64) (*User, int, *postgresqlDb.Queries, error) {
user := User{ID: userID}
pgClient, err := directory.PostgresClient(ctx)
+ if err != nil {
+ return nil, 0, nil, err
+ }
err = user.LoadFromDbByID(ctx, pgClient)
if errors.Is(err, sql.ErrNoRows) {
return nil, http.StatusNotFound, pgClient, errors.New(utils.ErrorUserNotFound)
diff --git a/deepfence_server/pkg/integration/aws-security-hub/awssecurityhub.go b/deepfence_server/pkg/integration/aws-security-hub/awssecurityhub.go
index 31be35a4c4..fd8fc8bb4a 100644
--- a/deepfence_server/pkg/integration/aws-security-hub/awssecurityhub.go
+++ b/deepfence_server/pkg/integration/aws-security-hub/awssecurityhub.go
@@ -4,10 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
- "github.com/aws/aws-sdk-go/service/sts"
"strings"
"time"
+ "github.com/aws/aws-sdk-go/service/sts"
+
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
@@ -143,11 +144,7 @@ func getResourceForVulnerability(ctx context.Context, scanID, region, accountID
return nil, err
}
- session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
- if err != nil {
- log.Error().Msg(err.Error())
- return nil, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -173,18 +170,22 @@ func getResourceForVulnerability(ctx context.Context, scanID, region, accountID
return nil, err
}
- if len(records) > 0 {
- for _, rec := range records {
- if rec.Values[0].(string) != "aws" {
- return nil, fmt.Errorf("not aws")
- }
- return []*securityhub.Resource{
- {
- Type: aws.String("AwsEc2Instance"),
- Id: aws.String(fmt.Sprintf("arn:aws:ec2:%s:%s:instance/%s", region, rec.Values[2].(string), rec.Values[1].(string))),
- },
- }, nil
+ if len(records) > 1 {
+ log.Error().Msgf("Too many results: %v", len(records))
+ return nil, fmt.Errorf("not aws")
+ }
+
+ if len(records) == 1 {
+ rec := records[0]
+ if rec.Values[0].(string) != "aws" {
+ return nil, fmt.Errorf("not aws")
}
+ return []*securityhub.Resource{
+ {
+ Type: aws.String("AwsEc2Instance"),
+ Id: aws.String(fmt.Sprintf("arn:aws:ec2:%s:%s:instance/%s", region, rec.Values[2].(string), rec.Values[1].(string))),
+ },
+ }, nil
}
// query for containerImage
@@ -202,18 +203,22 @@ func getResourceForVulnerability(ctx context.Context, scanID, region, accountID
return nil, err
}
- if len(records) > 0 {
- for _, rec := range records {
- if rec.Values[1].(string) != "ecr" {
- return nil, fmt.Errorf("not aws")
- }
- return []*securityhub.Resource{
- {
- Type: aws.String("AwsEcrContainerImage"),
- Id: aws.String(fmt.Sprintf("arn:aws:ecr:%s:%s:repository/%s", region, accountID, rec.Values[0].(string))),
- },
- }, nil
+ if len(records) > 1 {
+ log.Error().Msgf("Too many results: %v", len(records))
+ return nil, fmt.Errorf("not aws")
+ }
+
+ if len(records) == 1 {
+ rec := records[0]
+ if rec.Values[1].(string) != "ecr" {
+ return nil, fmt.Errorf("not aws")
}
+ return []*securityhub.Resource{
+ {
+ Type: aws.String("AwsEcrContainerImage"),
+ Id: aws.String(fmt.Sprintf("arn:aws:ecr:%s:%s:repository/%s", region, accountID, rec.Values[0].(string))),
+ },
+ }, nil
}
return nil, fmt.Errorf("not aws")
}
@@ -255,18 +260,22 @@ func getResourceForCompliance(ctx context.Context, scanID, region, accountID str
return nil, err
}
- if len(records) > 0 {
- for _, rec := range records {
- if rec.Values[0].(string) != "aws" {
- return nil, fmt.Errorf("not aws")
- }
- return []*securityhub.Resource{
- {
- Type: aws.String("AwsEc2Instance"),
- Id: aws.String(fmt.Sprintf("arn:aws:ec2:%s:%s:instance/%s", region, rec.Values[2].(string), rec.Values[1].(string))),
- },
- }, nil
+ if len(records) > 1 {
+ log.Error().Msgf("Too many results: %v", len(records))
+ return nil, fmt.Errorf("not aws")
+ }
+
+ if len(records) == 1 {
+ rec := records[0]
+ if rec.Values[0].(string) != "aws" {
+ return nil, fmt.Errorf("not aws")
}
+ return []*securityhub.Resource{
+ {
+ Type: aws.String("AwsEc2Instance"),
+ Id: aws.String(fmt.Sprintf("arn:aws:ec2:%s:%s:instance/%s", region, rec.Values[2].(string), rec.Values[1].(string))),
+ },
+ }, nil
}
return nil, fmt.Errorf("not aws")
}
diff --git a/deepfence_server/pkg/integration/slack/slack.go b/deepfence_server/pkg/integration/slack/slack.go
index d532fec47e..ae1428e542 100644
--- a/deepfence_server/pkg/integration/slack/slack.go
+++ b/deepfence_server/pkg/integration/slack/slack.go
@@ -171,8 +171,12 @@ func (s Slack) SendNotification(ctx context.Context, message string, extras map[
errorMsg := ""
if resp.Body != nil {
buf := new(bytes.Buffer)
- buf.ReadFrom(resp.Body)
- errorMsg = buf.String()
+ _, err = buf.ReadFrom(resp.Body)
+ if err != nil {
+ errorMsg = err.Error()
+ } else {
+ errorMsg = buf.String()
+ }
}
resp.Body.Close()
return fmt.Errorf("failed to send notification batch %d, status code: %d , error: %s", i+1, resp.StatusCode, errorMsg)
diff --git a/deepfence_server/pkg/integration/splunk/splunk.go b/deepfence_server/pkg/integration/splunk/splunk.go
index d70680ae0b..3a1421f6b1 100644
--- a/deepfence_server/pkg/integration/splunk/splunk.go
+++ b/deepfence_server/pkg/integration/splunk/splunk.go
@@ -93,33 +93,27 @@ func (s Splunk) Sender(in chan []byte, wg *sync.WaitGroup) {
defer wg.Done()
authToken := "Splunk " + s.Config.Token
-SenderLoop:
for {
- var data []byte
- var ok bool
-
- select {
- case data, ok = <-in:
- if !ok {
- break SenderLoop
- }
+ data, ok := <-in
+ if !ok {
+ break
}
req, err := http.NewRequest("POST", s.Config.EndpointURL, bytes.NewReader(data))
if err != nil {
- log.Info().Msgf("Failed to create HTTP request: %v", err)
- continue SenderLoop
+ log.Error().Msgf("Failed to create HTTP request: %v", err)
+ continue
}
req.Header.Set("Authorization", authToken)
resp, err := s.client.Do(req)
if err != nil {
- log.Info().Msgf("Failed to send data to Splunk: %v", err)
- continue SenderLoop
+ log.Error().Msgf("Failed to send data to Splunk: %v", err)
+ continue
}
// Check the response status code
if resp.StatusCode != http.StatusOK {
- log.Info().Msgf("Failed to send data to Splunk %s", resp.Status)
+ log.Error().Msgf("Failed to send data to Splunk %s", resp.Status)
}
resp.Body.Close()
}
diff --git a/deepfence_server/pkg/integration/teams/teams.go b/deepfence_server/pkg/integration/teams/teams.go
index b7a1cad39c..87c2d5c897 100644
--- a/deepfence_server/pkg/integration/teams/teams.go
+++ b/deepfence_server/pkg/integration/teams/teams.go
@@ -108,34 +108,29 @@ func (t Teams) enqueueNotification(payloads []map[string]interface{},
func (t Teams) Sender(in chan *Payload, wg *sync.WaitGroup) {
defer wg.Done()
- var payload *Payload
- var ok bool
-SenderLoop:
for {
- select {
- case payload, ok = <-in:
- if !ok {
- break SenderLoop
- }
+ payload, ok := <-in
+ if !ok {
+ break
}
payloadBytes, err := json.Marshal(payload)
if err != nil {
- continue SenderLoop
+ continue
}
req, err := http.NewRequest("POST", t.Config.WebhookURL, bytes.NewBuffer(payloadBytes))
if err != nil {
log.Info().Msgf("Failed to create HTTP request: %v", err)
- continue SenderLoop
+ continue
}
req.Header.Set("Content-Type", "application/json")
resp, err := t.client.Do(req)
if err != nil {
log.Info().Msgf("Failed to send data to Teams: %v", err)
- continue SenderLoop
+ continue
}
if resp.StatusCode != http.StatusOK {
diff --git a/deepfence_server/pkg/integration/teams/types.go b/deepfence_server/pkg/integration/teams/types.go
index 3bb953add3..6506b585dc 100644
--- a/deepfence_server/pkg/integration/teams/types.go
+++ b/deepfence_server/pkg/integration/teams/types.go
@@ -25,7 +25,7 @@ type Config struct {
type Payload struct {
CardType string `json:"@type"`
Context string `json:"@context"`
- Markdown bool `json:"markdown,bool"`
+ Markdown bool `json:"markdown"`
Text string `json:"text,omitempty"`
Title string `json:"title,omitempty"`
Summary string `json:"summary,omitempty"`
diff --git a/deepfence_server/pkg/registry/dockerhub/docker.go b/deepfence_server/pkg/registry/dockerhub/docker.go
index 028e54f992..4ed10472db 100644
--- a/deepfence_server/pkg/registry/dockerhub/docker.go
+++ b/deepfence_server/pkg/registry/dockerhub/docker.go
@@ -87,8 +87,14 @@ func (d *RegistryDockerHub) FetchImagesFromRegistry() ([]model.IngestedContainer
// getters
func (d *RegistryDockerHub) GetSecret() map[string]interface{} {
var secret map[string]interface{}
- b, _ := json.Marshal(d.Secret)
- json.Unmarshal(b, &secret)
+ b, err := json.Marshal(d.Secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
+ err = json.Unmarshal(b, &secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return secret
}
diff --git a/deepfence_server/pkg/registry/dockerprivate/client.go b/deepfence_server/pkg/registry/dockerprivate/client.go
index e37497688e..9ddaa44e83 100644
--- a/deepfence_server/pkg/registry/dockerprivate/client.go
+++ b/deepfence_server/pkg/registry/dockerprivate/client.go
@@ -65,22 +65,26 @@ func listCatalogRegistryV2(url, userName, password string) ([]string, error) {
resp, err := client.Do(req)
if err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("error bad status code %d", resp.StatusCode)
log.Error().Msg(err.Error())
+ return nil, err
}
var repos ReposResp
- if err := json.Unmarshal(body, &repos); err != nil {
+ if err = json.Unmarshal(body, &repos); err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
repositories = append(repositories, repos.Repositories...)
diff --git a/deepfence_server/pkg/registry/dockerprivate/dockerprivate.go b/deepfence_server/pkg/registry/dockerprivate/dockerprivate.go
index de0db5cadc..d6815a6aaf 100644
--- a/deepfence_server/pkg/registry/dockerprivate/dockerprivate.go
+++ b/deepfence_server/pkg/registry/dockerprivate/dockerprivate.go
@@ -81,8 +81,14 @@ func (d *RegistryDockerPrivate) FetchImagesFromRegistry() ([]model.IngestedConta
// getters
func (d *RegistryDockerPrivate) GetSecret() map[string]interface{} {
var secret map[string]interface{}
- b, _ := json.Marshal(d.Secret)
- json.Unmarshal(b, &secret)
+ b, err := json.Marshal(d.Secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
+ err = json.Unmarshal(b, &secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return secret
}
diff --git a/deepfence_server/pkg/registry/ecr/ecr.go b/deepfence_server/pkg/registry/ecr/ecr.go
index 02684dc6c9..6867e39dc7 100644
--- a/deepfence_server/pkg/registry/ecr/ecr.go
+++ b/deepfence_server/pkg/registry/ecr/ecr.go
@@ -6,6 +6,7 @@ import (
"github.com/deepfence/ThreatMapper/deepfence_server/model"
"github.com/deepfence/ThreatMapper/deepfence_utils/encryption"
"github.com/go-playground/validator/v10"
+ "github.com/rs/zerolog/log"
)
func New(requestByte []byte) (*RegistryECR, error) {
@@ -56,8 +57,14 @@ func (e *RegistryECR) FetchImagesFromRegistry() ([]model.IngestedContainerImage,
// getters
func (e *RegistryECR) GetSecret() map[string]interface{} {
var secret map[string]interface{}
- b, _ := json.Marshal(e.Secret)
- json.Unmarshal(b, &secret)
+ b, err := json.Marshal(e.Secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
+ err = json.Unmarshal(b, &secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return secret
}
diff --git a/deepfence_server/pkg/registry/gcr/client.go b/deepfence_server/pkg/registry/gcr/client.go
index 78fe729468..d20faa6c41 100644
--- a/deepfence_server/pkg/registry/gcr/client.go
+++ b/deepfence_server/pkg/registry/gcr/client.go
@@ -57,22 +57,26 @@ func listCatalogRegistryV2(url, namespace, userName, password string) ([]string,
resp, err := client.Do(req)
if err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("error bad status code %d", resp.StatusCode)
log.Error().Msg(err.Error())
+ return nil, err
}
var repos ReposResp
- if err := json.Unmarshal(body, &repos); err != nil {
+ if err = json.Unmarshal(body, &repos); err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
repositories = append(repositories, repos.Repositories...)
diff --git a/deepfence_server/pkg/registry/gitlab/gitlab.go b/deepfence_server/pkg/registry/gitlab/gitlab.go
index 9a66555fd3..5f30385fe1 100644
--- a/deepfence_server/pkg/registry/gitlab/gitlab.go
+++ b/deepfence_server/pkg/registry/gitlab/gitlab.go
@@ -5,6 +5,7 @@ import (
"github.com/deepfence/ThreatMapper/deepfence_server/model"
"github.com/deepfence/ThreatMapper/deepfence_utils/encryption"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/log"
"github.com/go-playground/validator/v10"
)
@@ -52,8 +53,14 @@ func (e *RegistryGitlab) FetchImagesFromRegistry() ([]model.IngestedContainerIma
// getters
func (e *RegistryGitlab) GetSecret() map[string]interface{} {
var secret map[string]interface{}
- b, _ := json.Marshal(e.Secret)
- json.Unmarshal(b, &secret)
+ b, err := json.Marshal(e.Secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
+ err = json.Unmarshal(b, &secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return secret
}
diff --git a/deepfence_server/pkg/registry/jfrog/client.go b/deepfence_server/pkg/registry/jfrog/client.go
index d74b39dfeb..4866fe3089 100644
--- a/deepfence_server/pkg/registry/jfrog/client.go
+++ b/deepfence_server/pkg/registry/jfrog/client.go
@@ -63,22 +63,26 @@ func listCatalogRegistryV2(url, repository, userName, password string) ([]string
resp, err := client.Do(req)
if err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("error bad status code %d", resp.StatusCode)
log.Error().Msg(err.Error())
+ return nil, err
}
var repos ReposResp
- if err := json.Unmarshal(body, &repos); err != nil {
+ if err = json.Unmarshal(body, &repos); err != nil {
log.Error().Msg(err.Error())
+ return nil, err
}
repositories = append(repositories, repos.Repositories...)
diff --git a/deepfence_server/pkg/registry/jfrog/jfrog.go b/deepfence_server/pkg/registry/jfrog/jfrog.go
index 95c8f11cdf..7c496076a4 100644
--- a/deepfence_server/pkg/registry/jfrog/jfrog.go
+++ b/deepfence_server/pkg/registry/jfrog/jfrog.go
@@ -82,8 +82,14 @@ func (d *RegistryJfrog) FetchImagesFromRegistry() ([]model.IngestedContainerImag
// getters
func (d *RegistryJfrog) GetSecret() map[string]interface{} {
var secret map[string]interface{}
- b, _ := json.Marshal(d.Secret)
- json.Unmarshal(b, &secret)
+ b, err := json.Marshal(d.Secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
+ err = json.Unmarshal(b, &secret)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
return secret
}
diff --git a/deepfence_server/pkg/scope/report/map_helpers.go b/deepfence_server/pkg/scope/report/map_helpers.go
index a290150282..bdd2914205 100644
--- a/deepfence_server/pkg/scope/report/map_helpers.go
+++ b/deepfence_server/pkg/scope/report/map_helpers.go
@@ -10,31 +10,6 @@ import (
// Helper functions for ps.Map, without considering what is inside
-// Return a new map containing all the elements of the two input maps
-// and where the same Key is in both, pick 'b' where prefer(a,b) is true
-func mergeMaps(m, n *ps.Tree, prefer func(a, b interface{}) bool) *ps.Tree {
- switch {
- case m == nil:
- return n
- case n == nil:
- return m
- case m.Size() < n.Size():
- m, n = n, m
- }
-
- n.ForEach(func(Key string, val interface{}) {
- if existingVal, found := m.Lookup(Key); found {
- if prefer(existingVal, val) {
- m = m.Set(Key, val)
- }
- } else {
- m = m.Set(Key, val)
- }
- })
-
- return m
-}
-
func mapEqual(m, n *ps.Tree, equalf func(a, b interface{}) bool) bool {
var mSize, nSize int
if m != nil {
diff --git a/deepfence_server/pkg/scope/report/map_keys.go b/deepfence_server/pkg/scope/report/map_keys.go
index ad1e41118c..27362c8a98 100644
--- a/deepfence_server/pkg/scope/report/map_keys.go
+++ b/deepfence_server/pkg/scope/report/map_keys.go
@@ -125,123 +125,3 @@ const (
WeavePeerName = "weave_peer_name"
WeavePeerNickName = "weave_peer_nick_name"
)
-
-/* Lookup table to allow msgpack/json decoder to avoid heap allocation
- for common ps.Map keys. The map is static so we don't have to lock
- access from multiple threads and don't have to worry about it
- getting clogged with values that are only used once.
-*/
-var commonKeys = map[string]string{
- Endpoint: Endpoint,
- Process: Process,
- Container: Container,
- Pod: Pod,
- Service: Service,
- ContainerImage: ContainerImage,
- Host: Host,
- Overlay: Overlay,
-
- HostNodeID: HostNodeID,
- ControlProbeID: ControlProbeID,
- DoesNotMakeConnections: DoesNotMakeConnections,
-
- ReverseDNSNames: ReverseDNSNames,
- SnoopedDNSNames: SnoopedDNSNames,
- CopyOf: CopyOf,
-
- PID: PID,
- Name: Name,
- PPID: PPID,
- Cmdline: Cmdline,
- Threads: Threads,
- OpenFiles: OpenFiles,
-
- DockerContainerID: DockerContainerID,
- DockerImageID: DockerImageID,
- DockerImageName: DockerImageName,
- DockerImageTag: DockerImageTag,
- DockerImageSize: DockerImageSize,
- DockerImageVirtualSize: DockerImageVirtualSize,
- DockerIsInHostNetwork: DockerIsInHostNetwork,
- DockerServiceName: DockerServiceName,
- DockerStackNamespace: DockerStackNamespace,
- DockerStopContainer: DockerStopContainer,
- DockerStartContainer: DockerStartContainer,
- DockerRestartContainer: DockerRestartContainer,
- DockerPauseContainer: DockerPauseContainer,
- DockerUnpauseContainer: DockerUnpauseContainer,
- DockerRemoveContainer: DockerRemoveContainer,
- DockerAttachContainer: DockerAttachContainer,
- DockerExecContainer: DockerExecContainer,
- DockerContainerName: DockerContainerName,
- DockerContainerCommand: DockerContainerCommand,
- DockerContainerPorts: DockerContainerPorts,
- DockerContainerCreated: DockerContainerCreated,
- DockerContainerNetworks: DockerContainerNetworks,
- DockerContainerIPs: DockerContainerIPs,
- DockerContainerHostname: DockerContainerHostname,
- DockerContainerIPsWithScopes: DockerContainerIPsWithScopes,
- DockerContainerState: DockerContainerState,
- DockerContainerStateHuman: DockerContainerStateHuman,
- DockerContainerUptime: DockerContainerUptime,
- DockerContainerRestartCount: DockerContainerRestartCount,
- DockerContainerNetworkMode: DockerContainerNetworkMode,
-
- KubernetesName: KubernetesName,
- KubernetesNamespace: KubernetesNamespace,
- KubernetesCreated: KubernetesCreated,
- KubernetesIP: KubernetesIP,
- KubernetesObservedGeneration: KubernetesObservedGeneration,
- KubernetesReplicas: KubernetesReplicas,
- KubernetesDesiredReplicas: KubernetesDesiredReplicas,
- KubernetesNodeType: KubernetesNodeType,
- KubernetesGetLogs: KubernetesGetLogs,
- KubernetesDeletePod: KubernetesDeletePod,
- KubernetesScaleUp: KubernetesScaleUp,
- KubernetesScaleDown: KubernetesScaleDown,
- KubernetesUpdatedReplicas: KubernetesUpdatedReplicas,
- KubernetesAvailableReplicas: KubernetesAvailableReplicas,
- KubernetesUnavailableReplicas: KubernetesUnavailableReplicas,
- KubernetesStrategy: KubernetesStrategy,
- KubernetesFullyLabeledReplicas: KubernetesFullyLabeledReplicas,
- KubernetesState: KubernetesState,
- KubernetesIsInHostNetwork: KubernetesIsInHostNetwork,
- KubernetesRestartCount: KubernetesRestartCount,
- KubernetesMisscheduledReplicas: KubernetesMisscheduledReplicas,
- KubernetesPublicIP: KubernetesPublicIP,
- KubernetesSchedule: KubernetesSchedule,
- KubernetesSuspended: KubernetesSuspended,
- KubernetesLastScheduled: KubernetesLastScheduled,
- KubernetesActiveJobs: KubernetesActiveJobs,
- KubernetesType: KubernetesType,
- KubernetesPorts: KubernetesPorts,
-
- ECSCluster: ECSCluster,
- ECSCreatedAt: ECSCreatedAt,
- ECSTaskFamily: ECSTaskFamily,
- ECSServiceDesiredCount: ECSServiceDesiredCount,
- ECSServiceRunningCount: ECSServiceRunningCount,
- ECSScaleUp: ECSScaleUp,
- ECSScaleDown: ECSScaleDown,
-
- Timestamp: Timestamp,
- HostName: HostName,
- HostLocalNetworks: HostLocalNetworks,
- OS: OS,
- KernelVersion: KernelVersion,
- Uptime: Uptime,
- Load1: Load1,
- HostCPUUsage: HostCPUUsage,
- HostMemoryUsage: HostMemoryUsage,
- ScopeVersion: ScopeVersion,
-
- WeavePeerName: WeavePeerName,
- WeavePeerNickName: WeavePeerNickName,
-}
-
-func lookupCommonKey(b []byte) string {
- if key, ok := commonKeys[string(b)]; ok {
- return key
- }
- return string(b)
-}
diff --git a/deepfence_server/pkg/scope/report/networks.go b/deepfence_server/pkg/scope/report/networks.go
index 4315f131f9..fa3e6be782 100644
--- a/deepfence_server/pkg/scope/report/networks.go
+++ b/deepfence_server/pkg/scope/report/networks.go
@@ -5,6 +5,7 @@ import (
"net"
"strings"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/log"
"github.com/k-sone/critbitgo"
)
@@ -82,7 +83,10 @@ func AddLocalBridge(name string) error {
}
for _, ipnet := range ipv4Nets(addrs) {
- LocalNetworks.Add(ipnet)
+ err = LocalNetworks.Add(ipnet)
+ if err != nil {
+ log.Error().Msgf("Add local bridge err: %v", err)
+ }
}
return nil
diff --git a/deepfence_server/pkg/scope/report/ps/map.go b/deepfence_server/pkg/scope/report/ps/map.go
index 6ba142ad1d..8b51dd77e5 100644
--- a/deepfence_server/pkg/scope/report/ps/map.go
+++ b/deepfence_server/pkg/scope/report/ps/map.go
@@ -155,8 +155,7 @@ func (self *Tree3) IsNil() bool {
// clone returns an exact duplicate of a Tree3 node
func (self *Tree3) clone() *Tree3 {
- var m Tree3
- m = *self
+ m := *self
return &m
}
@@ -394,17 +393,6 @@ func (m *Tree3) isLeaf() bool {
return m.Size() == 1
}
-// returns the number of child subtrees we have
-func (m *Tree3) subtreeCount() int {
- Count := 0
- for _, t := range m.Children {
- if t != nilMap {
- Count++
- }
- }
- return Count
-}
-
func (m *Tree3) Lookup(Key string) (interface{}, bool) {
Hash := hashKey(Key)
return lookupLowLevel(m, Hash, Hash)
diff --git a/deepfence_server/pkg/scope/report/report.go b/deepfence_server/pkg/scope/report/report.go
index ac4f55a849..6908359d27 100644
--- a/deepfence_server/pkg/scope/report/report.go
+++ b/deepfence_server/pkg/scope/report/report.go
@@ -254,14 +254,14 @@ type TopologyNode struct {
Sets *Sets `json:"sets,omitempty"`
}
-func (t TopologyNode) Merge(o TopologyNode) {
+func (t *TopologyNode) Merge(o TopologyNode) {
t.Metadata = o.Metadata
t.Adjacency = o.Adjacency
t.Parents = o.Parents
t.Sets = o.Sets
}
-func (t TopologyNode) UnsafeMerge(o TopologyNode) {
+func (t *TopologyNode) UnsafeMerge(o TopologyNode) {
t.Metadata = o.Metadata
t.Adjacency = o.Adjacency
t.Parents = o.Parents
@@ -381,7 +381,7 @@ func (t TopologySets) UnsafeUnMerge(o TopologySets) {
}
-func (p Parent) Merge(o Parent) {
+func (p *Parent) Merge(o Parent) {
p.CloudProvider = o.CloudProvider
p.CloudRegion = o.CloudRegion
p.KubernetesCluster = o.KubernetesCluster
diff --git a/deepfence_server/pkg/sendemail/sendemail.go b/deepfence_server/pkg/sendemail/sendemail.go
index b61326967e..291f88e541 100644
--- a/deepfence_server/pkg/sendemail/sendemail.go
+++ b/deepfence_server/pkg/sendemail/sendemail.go
@@ -75,14 +75,14 @@ func (c *emailSenderCommon) getEmailBody(from string, recipients []string, subje
if withAttachments {
buf.WriteString(fmt.Sprintf("Content-Type: multipart/mixed; boundary=%s\r\n", boundary))
buf.WriteString(fmt.Sprintf("--%s\r\n", boundary))
- } else if isPlainText == true {
+ } else if isPlainText {
buf.WriteString("Content-Type: text/plain; charset=utf-8\r\n")
} else {
buf.WriteString("Content-Type: text/html; charset=utf-8\r\n")
}
buf.WriteString("\r\n")
- if isPlainText == true {
+ if isPlainText {
buf.WriteString(text)
} else {
buf.WriteString(html)
diff --git a/deepfence_server/reporters/filter.go b/deepfence_server/reporters/filter.go
index 9599465b8e..f1f211fff4 100644
--- a/deepfence_server/reporters/filter.go
+++ b/deepfence_server/reporters/filter.go
@@ -157,7 +157,7 @@ func formatOrderField(format string, input []OrderSpec, ignoreOrder bool, ignore
if _, has := severity_fields[input[i].FieldName]; has && !ignoreSort {
fieldName := "severity" + strconv.Itoa(sevSortFieldsNum)
sevSortFieldsNum += 1
- orderByEntry = fmt.Sprintf("%s", extractOrderDescFormattedField(fieldName, input[i].Descending && !ignoreOrder))
+ orderByEntry = extractOrderDescFormattedField(fieldName, input[i].Descending && !ignoreOrder)
} else {
fieldName := input[i].FieldName
orderByEntry = fmt.Sprintf(format, extractOrderDescFormattedField(fieldName, input[i].Descending && !ignoreOrder))
@@ -279,7 +279,7 @@ func FieldFilterCypher(nodeName string, fields []string) string {
for i := range fields {
if fields[i] != "" {
if nodeName == "" {
- tmp = append(tmp, fmt.Sprintf("%s", fields[i]))
+ tmp = append(tmp, fields[i])
} else {
tmp = append(tmp, fmt.Sprintf("%s.%s", nodeName, fields[i]))
}
diff --git a/deepfence_server/reporters/graph/threat_graph.go b/deepfence_server/reporters/graph/threat_graph.go
index 97db114cdd..22cb487dab 100644
--- a/deepfence_server/reporters/graph/threat_graph.go
+++ b/deepfence_server/reporters/graph/threat_graph.go
@@ -118,8 +118,7 @@ func (tc *ThreatGraphReporter) GetRawThreatGraph(ctx context.Context, filters Th
return nil, err
}
- session, err := driver.Session(neo4j.AccessModeWrite)
-
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
if err != nil {
return nil, err
}
@@ -220,15 +219,15 @@ func (tc *ThreatGraphReporter) GetRawThreatGraph(ctx context.Context, filters Th
if cloudAccountIdsFilterSet {
switch cloudProvider {
case CLOUD_AWS:
- if awsAccountIdsFilterSet == false {
+ if !awsAccountIdsFilterSet {
continue
}
case CLOUD_GCP:
- if gcpAccountIdsFilterSet == false {
+ if !gcpAccountIdsFilterSet {
continue
}
case CLOUD_AZURE:
- if azureAccountIdsFilterSet == false {
+ if !azureAccountIdsFilterSet {
continue
}
case CLOUD_PRIVATE:
@@ -237,7 +236,7 @@ func (tc *ThreatGraphReporter) GetRawThreatGraph(ctx context.Context, filters Th
}
var res neo4j.Result
if cloudProvider != CLOUD_PRIVATE {
- if res, err = tx.Run(`
+ res, err = tx.Run(`
CALL apoc.nodes.group(['ThreatCloudResource','ThreatNode'], ['node_type', 'depth', 'cloud_provider'],
[{`+"`*`"+`: 'count', sum_cve: 'sum', sum_secrets: 'sum', sum_compliance: 'sum', sum_cloud_compliance: 'sum',
node_id:'collect', vulnerabilities_count: 'collect', secrets_count:'collect', compliances_count:'collect', cloud_compliances_count: 'collect'},
@@ -245,10 +244,9 @@ func (tc *ThreatGraphReporter) GetRawThreatGraph(ctx context.Context, filters Th
YIELD node, relationships
WHERE apoc.any.property(node, 'cloud_provider') = '`+cloudProvider+`'
RETURN node, relationships
- `, map[string]interface{}{}); err != nil {
- }
+ `, map[string]interface{}{})
} else if !filters.CloudResourceOnly {
- if res, err = tx.Run(`
+ res, err = tx.Run(`
CALL apoc.nodes.group(['ThreatNode'], ['node_type', 'depth', 'cloud_provider'],
[{`+"`*`"+`: 'count', sum_cve: 'sum', sum_secrets: 'sum', sum_compliance: 'sum', sum_cloud_compliance: 'sum',
node_id:'collect', vulnerabilities_count: 'collect', secrets_count:'collect', compliances_count:'collect', cloud_compliances_count:'collect'},
@@ -257,8 +255,7 @@ func (tc *ThreatGraphReporter) GetRawThreatGraph(ctx context.Context, filters Th
WHERE NOT apoc.any.property(node, 'cloud_provider') IN ['aws', 'gcp', 'azure']
AND apoc.any.property(node, 'cloud_provider') <> 'internet'
RETURN node, relationships
- `, map[string]interface{}{}); err != nil {
- }
+ `, map[string]interface{}{})
} else {
continue
}
@@ -309,19 +306,19 @@ type AttackPaths struct {
func record2struct(node dbtype.Node) AttackPathData {
record := node.Props
- Node_type, _ := record["node_type"]
- depth, _ := record["depth"]
- cloud_provider, _ := record["cloud_provider"]
- sum_sum_cve_, _ := record["sum_sum_cve"]
- sum_sum_secrets_, _ := record["sum_sum_secrets"]
- sum_sum_compliance_, _ := record["sum_sum_compliance"]
- sum_sum_cloud_compliance_, _ := record["sum_sum_cloud_compliance"]
- node_count, _ := record["count_*"]
- collect_node_id_, _ := record["collect_node_id"]
- collect_num_cve_, _ := record["collect_vulnerabilities_count"]
- collect_num_secrets_, _ := record["collect_secrets_count"]
- collect_num_compliance_, _ := record["collect_compliances_count"]
- collect_num_cloud_compliance_, _ := record["collect_cloud_compliances_count"]
+ Node_type := record["node_type"]
+ depth := record["depth"]
+ cloud_provider := record["cloud_provider"]
+ sum_sum_cve_ := record["sum_sum_cve"]
+ sum_sum_secrets_ := record["sum_sum_secrets"]
+ sum_sum_compliance_ := record["sum_sum_compliance"]
+ sum_sum_cloud_compliance_ := record["sum_sum_cloud_compliance"]
+ node_count := record["count_*"]
+ collect_node_id_ := record["collect_node_id"]
+ collect_num_cve_ := record["collect_vulnerabilities_count"]
+ collect_num_secrets_ := record["collect_secrets_count"]
+ collect_num_compliance_ := record["collect_compliances_count"]
+ collect_num_cloud_compliance_ := record["collect_cloud_compliances_count"]
collect_node_id := []string{}
for _, v := range collect_node_id_.([]interface{}) {
@@ -402,9 +399,7 @@ type AttackPathData struct {
func getThreatNodeId(apd AttackPathData) string {
h := sha256.New()
v := []string{}
- for i := range apd.collect_node_id {
- v = append(v, apd.collect_node_id[i])
- }
+ v = append(v, apd.collect_node_id...)
sort.Strings(v)
for _, s := range v {
diff --git a/deepfence_server/reporters/graph/threat_graph_nodes.go b/deepfence_server/reporters/graph/threat_graph_nodes.go
index 981fd33066..645f159f7a 100644
--- a/deepfence_server/reporters/graph/threat_graph_nodes.go
+++ b/deepfence_server/reporters/graph/threat_graph_nodes.go
@@ -67,11 +67,7 @@ func GetIndividualThreatGraph[T reporters.Cypherable](ctx context.Context, graph
return individualThreatGraph, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
-
- if err != nil {
- return individualThreatGraph, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(60 * time.Second))
diff --git a/deepfence_server/reporters/graph/topology_reporter.go b/deepfence_server/reporters/graph/topology_reporter.go
index 568684067b..c7b3c4817a 100644
--- a/deepfence_server/reporters/graph/topology_reporter.go
+++ b/deepfence_server/reporters/graph/topology_reporter.go
@@ -892,10 +892,8 @@ func (nc *neo4jTopologyReporter) getContainerGraph(
if err != nil {
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -933,10 +931,8 @@ func (nc *neo4jTopologyReporter) getPodGraph(
if err != nil {
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -973,20 +969,23 @@ func (nc *neo4jTopologyReporter) getKubernetesGraph(ctx context.Context, filters
if err != nil {
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
if !filters.SkipConnections {
connTx, err := session.BeginTransaction(neo4j.WithTxTimeout(10 * time.Second))
- res.Connections, err = nc.GetKubernetesConnections(connTx, kubernetes_filter, host_filter)
if err != nil {
- log.Error().Msgf("Topology get connections: %v", err)
+ log.Error().Msgf("Topology get transaction: %v", err)
res.SkippedConnections = true
+ } else {
+ res.Connections, err = nc.GetKubernetesConnections(connTx, kubernetes_filter, host_filter)
+ if err != nil {
+ log.Error().Msgf("Topology get connections: %v", err)
+ res.SkippedConnections = true
+ }
+ connTx.Close()
}
- connTx.Close()
} else {
res.SkippedConnections = true
}
@@ -1032,20 +1031,23 @@ func (nc *neo4jTopologyReporter) getHostGraph(ctx context.Context, filters Topol
if err != nil {
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
if !filters.SkipConnections {
connTx, err := session.BeginTransaction(neo4j.WithTxTimeout(10 * time.Second))
- res.Connections, err = nc.GetHostConnections(connTx, nil, nil)
if err != nil {
- log.Error().Msgf("Topology get connections: %v", err)
+ log.Error().Msgf("Topology get transaction: %v", err)
res.SkippedConnections = true
+ } else {
+ res.Connections, err = nc.GetHostConnections(connTx, nil, nil)
+ if err != nil {
+ log.Error().Msgf("Topology get connections: %v", err)
+ res.SkippedConnections = true
+ }
+ connTx.Close()
}
- connTx.Close()
} else {
res.SkippedConnections = true
}
@@ -1095,10 +1097,8 @@ func (nc *neo4jTopologyReporter) getGraph(ctx context.Context, filters TopologyF
if err != nil {
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
if !filters.SkipConnections {
@@ -1196,10 +1196,8 @@ func GetTopologyDelta(ctx context.Context,
return deltaResp, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return deltaResp, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
+ defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
if err != nil {
@@ -1224,10 +1222,10 @@ func GetTopologyDelta(ctx context.Context,
nodeType := record.Values[1].(string)
if isAdd {
deltaResp.Additions = append(deltaResp.Additions,
- model.NodeIdentifier{nodeid, nodeType})
+ model.NodeIdentifier{NodeId: nodeid, NodeType: nodeType})
} else {
deltaResp.Deletions = append(deltaResp.Deletions,
- model.NodeIdentifier{nodeid, nodeType})
+ model.NodeIdentifier{NodeId: nodeid, NodeType: nodeType})
}
ts := record.Values[2].(int64)
if ts > maxTime {
@@ -1259,7 +1257,7 @@ func GetTopologyDelta(ctx context.Context,
}
}
- if deltaReq.Addition == true {
+ if deltaReq.Addition {
additionQuery := `MATCH (n) WHERE ` + nodeTypeQueryStr + `
AND n.active=true AND n.created_at > %d
RETURN n.node_id, n.node_type, n.created_at`
@@ -1271,7 +1269,7 @@ func GetTopologyDelta(ctx context.Context,
}
}
- if deltaReq.Deletion == true {
+ if deltaReq.Deletion {
deletionQuery := `MATCH (n) WHERE ` + nodeTypeQueryStr + `
AND n.active=false AND n.updated_at > %d
RETURN n.node_id, n.node_type, n.updated_at`
diff --git a/deepfence_server/reporters/lookup/lookup.go b/deepfence_server/reporters/lookup/lookup.go
index 0ac977cb84..ecace5cb59 100644
--- a/deepfence_server/reporters/lookup/lookup.go
+++ b/deepfence_server/reporters/lookup/lookup.go
@@ -55,7 +55,7 @@ func GetHostsReport(ctx context.Context, filter LookupFilter) ([]model.Host, err
}
var index int
- if getProcesses == true {
+ if getProcesses {
processes, matched, err := getHostProcesses(ctx, hostIds)
if err == nil {
for _, process := range processes {
@@ -64,7 +64,7 @@ func GetHostsReport(ctx context.Context, filter LookupFilter) ([]model.Host, err
}
}
}
- if getContainers == true {
+ if getContainers {
containers, matched, err := getHostContainers(ctx, hostIds)
if err == nil {
for _, container := range containers {
@@ -73,7 +73,7 @@ func GetHostsReport(ctx context.Context, filter LookupFilter) ([]model.Host, err
}
}
}
- if getContainerImages == true {
+ if getContainerImages {
containerImages, matched, err := getHostContainerImages(ctx, hostIds)
if err == nil {
for _, containerImage := range containerImages {
@@ -82,7 +82,7 @@ func GetHostsReport(ctx context.Context, filter LookupFilter) ([]model.Host, err
}
}
}
- if getPods == true {
+ if getPods {
pods, matched, err := getHostPods(ctx, hostIds)
if err == nil {
for _, pod := range pods {
@@ -91,7 +91,7 @@ func GetHostsReport(ctx context.Context, filter LookupFilter) ([]model.Host, err
}
}
}
- if getConnections == true {
+ if getConnections {
inboundConnections, outboundConnections, err := getNodeConnections[model.Host](ctx, hostIds)
if err == nil {
for _, conn := range inboundConnections {
@@ -137,7 +137,7 @@ func GetContainersReport(ctx context.Context, filter LookupFilter) ([]model.Cont
}
var index int
- if getProcesses == true {
+ if getProcesses {
processes, matched, err := getContainerProcesses(ctx, containerIds)
if err == nil {
for _, process := range processes {
@@ -146,7 +146,7 @@ func GetContainersReport(ctx context.Context, filter LookupFilter) ([]model.Cont
}
}
}
- if getContainerImages == true {
+ if getContainerImages {
images, matched, err := getContainerContainerImages(ctx, containerIds)
if err == nil {
for _, image := range images {
@@ -194,7 +194,7 @@ func GetContainerImagesReport(ctx context.Context, filter LookupFilter) ([]model
}
var index int
- if getContainers == true {
+ if getContainers {
containers, matched, err := getContainerImageContainers(ctx, imagesIds)
if err == nil {
for _, container := range containers {
@@ -226,7 +226,7 @@ func GetKubernetesClustersReport(ctx context.Context, filter LookupFilter) ([]mo
}
var index int
- if getHosts == true {
+ if getHosts {
hosts, matched, err := getClusterHosts(ctx, clusterIds)
if err == nil {
for _, host := range hosts {
@@ -273,7 +273,7 @@ func GetRegistryAccountReport(ctx context.Context, filter LookupFilter) ([]model
}
var index int
- if getImages == true {
+ if getImages {
images, matched, err := getRegistryImages(ctx, registryIds)
if err == nil {
for _, image := range images {
@@ -295,10 +295,7 @@ func getGenericDirectNodeReport[T reporters.Cypherable](ctx context.Context, fil
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -415,10 +412,7 @@ func getNodeConnections[T reporters.Cypherable](ctx context.Context, ids []strin
return inbound, outbound, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return inbound, outbound, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -471,10 +465,7 @@ func getIndirectFromIDs[T any](ctx context.Context, query string, ids []string)
return res, matchedId, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, matchedId, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
diff --git a/deepfence_server/reporters/scan/notify.go b/deepfence_server/reporters/scan/notify.go
index a123762367..a540ceea00 100644
--- a/deepfence_server/reporters/scan/notify.go
+++ b/deepfence_server/reporters/scan/notify.go
@@ -52,7 +52,10 @@ func Notify[T any](ctx context.Context, res []T, common model.ScanResultsCommon,
// add scantype
extras["scan_type"] = scanType
- integrationModel.SendNotification(ctx, string(messageByte), extras)
+ err = integrationModel.SendNotification(ctx, string(messageByte), extras)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
return nil
@@ -83,7 +86,7 @@ func injectNodeData[T any](results []T, common model.ScanResultsCommon,
if _, ok := m["updated_at"]; ok {
flag := integration.IsMessagingFormat(integrationType)
- if flag == true {
+ if flag {
ts := m["updated_at"].(int64)
tm := time.Unix(0, ts*int64(time.Millisecond))
m["updated_at"] = tm
diff --git a/deepfence_server/reporters/scan/scan_reporters.go b/deepfence_server/reporters/scan/scan_reporters.go
index 2f20c4f2ba..a64393a11d 100644
--- a/deepfence_server/reporters/scan/scan_reporters.go
+++ b/deepfence_server/reporters/scan/scan_reporters.go
@@ -833,7 +833,7 @@ func GetScanResults[T any](ctx context.Context, scan_type utils.Neo4jScanType, s
func GetFilters(ctx context.Context, having map[string]interface{}, detectedType string, filters []string) (map[string][]string, error) {
andQuery := "{"
index := 0
- for key, _ := range having {
+ for key := range having {
if index == 0 {
andQuery += fmt.Sprintf("%s:$%s", key, key)
} else {
diff --git a/deepfence_server/reporters/search/search.go b/deepfence_server/reporters/search/search.go
index 76b4ec13fc..50764e619c 100644
--- a/deepfence_server/reporters/search/search.go
+++ b/deepfence_server/reporters/search/search.go
@@ -76,10 +76,7 @@ func CountNodes(ctx context.Context) (NodeCountResp, error) {
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -201,10 +198,7 @@ func searchGenericDirectNodeReport[T reporters.Cypherable](ctx context.Context,
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -284,10 +278,7 @@ func searchCloudNode(ctx context.Context, filter SearchFilter, fw model.FetchWin
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -393,10 +384,7 @@ func getScanStatusMap(ctx context.Context, id string, cloudProvider string) (map
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
@@ -435,10 +423,7 @@ func searchGenericScanInfoReport(ctx context.Context, scan_type utils.Neo4jScanT
return res, err
}
- session, err := driver.Session(neo4j.AccessModeRead)
- if err != nil {
- return res, err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
diff --git a/deepfence_server/router/router.go b/deepfence_server/router/router.go
index 0b04ba939f..6e214beeab 100644
--- a/deepfence_server/router/router.go
+++ b/deepfence_server/router/router.go
@@ -487,7 +487,9 @@ func SetupRoutes(r *chi.Mux, serverPort string, serveOpenapiDocs bool, ingestC c
r.Route("/scheduled-task", func(r chi.Router) {
r.Get("/", dfHandler.AuthHandler(ResourceAllUsers, PermissionRead, dfHandler.GetScheduledTask))
r.Patch("/{id}", dfHandler.AuthHandler(ResourceAllUsers, PermissionWrite, dfHandler.UpdateScheduledTask))
- r.Post("/", dfHandler.AuthHandler(ResourceIntegration, PermissionWrite, dfHandler.AddScheduledTask))
+ r.Delete("/{id}", dfHandler.AuthHandler(ResourceAllUsers, PermissionDelete, dfHandler.DeleteCustomScheduledTask))
+
+ r.Post("/", dfHandler.AuthHandler(ResourceAllUsers, PermissionWrite, dfHandler.AddScheduledTask))
})
// Integration
diff --git a/deepfence_utils/directory/minio.go b/deepfence_utils/directory/minio.go
index 64fd6dbdf6..1da07e73e9 100644
--- a/deepfence_utils/directory/minio.go
+++ b/deepfence_utils/directory/minio.go
@@ -48,10 +48,18 @@ func (e PathDoesNotExistsError) Error() string {
return fmt.Sprintf("Path doesnot exists here: %s", e.Path)
}
+type FileDeleteError struct {
+ Path string
+}
+
+func (e FileDeleteError) Error() string {
+ return fmt.Sprintf("Failed to delete file: %s", e.Path)
+}
+
type FileManager interface {
ListFiles(ctx context.Context, pathPrefix string, recursive bool, maxKeys int, skipDir bool) []ObjectInfo
- UploadLocalFile(ctx context.Context, filename string, localFilename string, extra interface{}) (UploadResult, error)
- UploadFile(ctx context.Context, filename string, data []byte, extra interface{}) (UploadResult, error)
+ UploadLocalFile(ctx context.Context, filename string, localFilename string, overwrite bool, extra interface{}) (UploadResult, error)
+ UploadFile(ctx context.Context, filename string, data []byte, overwrite bool, extra interface{}) (UploadResult, error)
DeleteFile(ctx context.Context, filename string, addFilePathPrefix bool, extra interface{}) error
DownloadFile(ctx context.Context, remoteFile string, localFile string, extra interface{}) error
DownloadFileTo(ctx context.Context, remoteFile string, localFile io.WriteCloser, extra interface{}) error
@@ -141,7 +149,9 @@ func (mfm *MinioFileManager) ListFiles(ctx context.Context, pathPrefix string, r
return objectsInfo
}
-func (mfm *MinioFileManager) UploadLocalFile(ctx context.Context, filename string, localFilename string, extra interface{}) (UploadResult, error) {
+func (mfm *MinioFileManager) UploadLocalFile(ctx context.Context,
+ filename string, localFilename string, overwrite bool, extra interface{}) (UploadResult, error) {
+
err := mfm.createBucketIfNeeded(ctx)
if err != nil {
return UploadResult{}, err
@@ -149,8 +159,18 @@ func (mfm *MinioFileManager) UploadLocalFile(ctx context.Context, filename strin
objectName := mfm.addNamespacePrefix(filename)
- if key, has := checkIfFileExists(ctx, mfm.client, mfm.bucket, objectName); has {
- return UploadResult{}, AlreadyPresentError{Path: key}
+ key, has := checkIfFileExists(ctx, mfm.client, mfm.bucket, objectName)
+ if has {
+ if !overwrite {
+ return UploadResult{}, AlreadyPresentError{Path: key}
+ } else {
+ log.Info().Msgf("overwrite file %s", key)
+ err := mfm.DeleteFile(ctx, objectName, false, minio.RemoveObjectOptions{ForceDelete: true})
+ if err != nil {
+ log.Error().Err(err).Msg("failed to delete file while overwriting")
+ return UploadResult{}, FileDeleteError{Path: key}
+ }
+ }
}
info, err := mfm.client.FPutObject(ctx, mfm.bucket, objectName, localFilename, extra.(minio.PutObjectOptions))
@@ -169,7 +189,9 @@ func (mfm *MinioFileManager) UploadLocalFile(ctx context.Context, filename strin
}, nil
}
-func (mfm *MinioFileManager) UploadFile(ctx context.Context, filename string, data []byte, extra interface{}) (UploadResult, error) {
+func (mfm *MinioFileManager) UploadFile(ctx context.Context,
+ filename string, data []byte, overwrite bool, extra interface{}) (UploadResult, error) {
+
err := mfm.createBucketIfNeeded(ctx)
if err != nil {
return UploadResult{}, err
@@ -177,8 +199,18 @@ func (mfm *MinioFileManager) UploadFile(ctx context.Context, filename string, da
objectName := mfm.addNamespacePrefix(filename)
- if key, has := checkIfFileExists(ctx, mfm.client, mfm.bucket, objectName); has {
- return UploadResult{}, AlreadyPresentError{Path: key}
+ key, has := checkIfFileExists(ctx, mfm.client, mfm.bucket, objectName)
+ if has {
+ if !overwrite {
+ return UploadResult{}, AlreadyPresentError{Path: key}
+ } else {
+ log.Info().Msgf("overwrite file %s", key)
+ err := mfm.DeleteFile(ctx, objectName, false, minio.RemoveObjectOptions{ForceDelete: true})
+ if err != nil {
+ log.Error().Err(err).Msg("failed to delete file while overwriting")
+ return UploadResult{}, FileDeleteError{Path: key}
+ }
+ }
}
info, err := mfm.client.PutObject(ctx, mfm.bucket, objectName, bytes.NewReader(data), int64(len(data)), extra.(minio.PutObjectOptions))
diff --git a/deepfence_utils/directory/worker.go b/deepfence_utils/directory/worker.go
index b76692af9d..a10fdb814d 100644
--- a/deepfence_utils/directory/worker.go
+++ b/deepfence_utils/directory/worker.go
@@ -40,7 +40,7 @@ func new_asynq_client(endpoints DBConfigs) (*asyncq_clients, error) {
}, nil
}
-func (ws WorkEnqueuer) Enqueue(task_enum string, data []byte) error {
+func (ws WorkEnqueuer) Enqueue(task_enum string, data []byte, opts ...asynq.Option) error {
client := ws.clients.client
inspector := ws.clients.inspector
@@ -62,7 +62,7 @@ func (ws WorkEnqueuer) Enqueue(task_enum string, data []byte) error {
return ErrExhaustedResources
}
- _, err = client.Enqueue(asynq.NewTask(task_enum, data))
+ _, err = client.Enqueue(asynq.NewTask(task_enum, data), opts...)
return err
}
diff --git a/deepfence_utils/postgresql/postgresql-db/db.go b/deepfence_utils/postgresql/postgresql-db/db.go
index 16bc59cfee..f1d5ea77f4 100644
--- a/deepfence_utils/postgresql/postgresql-db/db.go
+++ b/deepfence_utils/postgresql/postgresql-db/db.go
@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
-// sqlc v1.21.0
+// sqlc v1.23.0
package postgresql_db
diff --git a/deepfence_utils/postgresql/postgresql-db/models.go b/deepfence_utils/postgresql/postgresql-db/models.go
index 8da0a45762..1b0bbb741c 100644
--- a/deepfence_utils/postgresql/postgresql-db/models.go
+++ b/deepfence_utils/postgresql/postgresql-db/models.go
@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
-// sqlc v1.21.0
+// sqlc v1.23.0
package postgresql_db
diff --git a/deepfence_utils/postgresql/postgresql-db/queries.sql.go b/deepfence_utils/postgresql/postgresql-db/queries.sql.go
index d9d6fea8c7..135a736937 100644
--- a/deepfence_utils/postgresql/postgresql-db/queries.sql.go
+++ b/deepfence_utils/postgresql/postgresql-db/queries.sql.go
@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
-// sqlc v1.21.0
+// sqlc v1.23.0
// source: queries.sql
package postgresql_db
@@ -549,6 +549,18 @@ func (q *Queries) DeleteContainerRegistry(ctx context.Context, id int32) error {
return err
}
+const deleteCustomSchedule = `-- name: DeleteCustomSchedule :exec
+DELETE
+FROM scheduler
+WHERE id = $1
+ AND is_system='f'
+`
+
+func (q *Queries) DeleteCustomSchedule(ctx context.Context, id int64) error {
+ _, err := q.db.ExecContext(ctx, deleteCustomSchedule, id)
+ return err
+}
+
const deleteIntegration = `-- name: DeleteIntegration :exec
DELETE
FROM integration
diff --git a/deepfence_utils/postgresql/queries.sql b/deepfence_utils/postgresql/queries.sql
index 15e2637486..bf2fdc11c5 100644
--- a/deepfence_utils/postgresql/queries.sql
+++ b/deepfence_utils/postgresql/queries.sql
@@ -637,4 +637,10 @@ WHERE id = $6;
-- name: DeleteSchedule :exec
DELETE
FROM scheduler
-WHERE id = $1;
\ No newline at end of file
+WHERE id = $1;
+
+-- name: DeleteCustomSchedule :exec
+DELETE
+FROM scheduler
+WHERE id = $1
+ AND is_system='f';
diff --git a/deepfence_utils/utils/asynq.go b/deepfence_utils/utils/asynq.go
new file mode 100644
index 0000000000..cfbe3e969b
--- /dev/null
+++ b/deepfence_utils/utils/asynq.go
@@ -0,0 +1,47 @@
+package utils
+
+import (
+ "sync"
+
+ "github.com/hibiken/asynq"
+)
+
+const (
+ DefaultTaskMaxRetries = 3
+
+ Q_CRITICAL = "critical"
+ Q_DEFAULT = "default"
+ Q_LOW = "low"
+)
+
+var (
+ maxRetry int
+ once sync.Once
+)
+
+func TasksMaxRetries() asynq.Option {
+ once.Do(
+ func() {
+ maxRetry = GetEnvOrDefaultInt("DEEPFENCE_TASKS_MAX_RETRIES", DefaultTaskMaxRetries)
+ },
+ )
+ return asynq.MaxRetry(maxRetry)
+}
+
+func TaskOptions(queue string, opts ...asynq.Option) []asynq.Option {
+ newOpts := []asynq.Option{asynq.Queue(queue)}
+ newOpts = append(newOpts, opts...)
+ return newOpts
+}
+
+func CritialTaskOpts(opts ...asynq.Option) []asynq.Option {
+ return TaskOptions(Q_CRITICAL, append(opts, TasksMaxRetries())...)
+}
+
+func DefaultTaskOpts(opts ...asynq.Option) []asynq.Option {
+ return TaskOptions(Q_DEFAULT, append(opts, TasksMaxRetries())...)
+}
+
+func LowTaskOpts(opts ...asynq.Option) []asynq.Option {
+ return TaskOptions(Q_LOW, append(opts, TasksMaxRetries())...)
+}
diff --git a/deepfence_utils/utils/structs.go b/deepfence_utils/utils/structs.go
index 9281a4a445..b9eb7038a7 100644
--- a/deepfence_utils/utils/structs.go
+++ b/deepfence_utils/utils/structs.go
@@ -83,6 +83,7 @@ type ReportFilters struct {
NodeType string `json:"node_type" validate:"required" required:"true" enum:"host,container,container_image,linux,cluster,aws,gcp,azure"`
SeverityOrCheckType []string `json:"severity_or_check_type" enum:"critical,high,medium,low,cis,gdpr,nist,hipaa,pci,soc_2"`
IncludeDeadNode bool `json:"include_dead_nodes"`
+ MostExploitableReport bool `json:"most_exploitable_report"`
AdvancedReportFilters AdvancedReportFilters `json:"advanced_report_filters,omitempty"`
}
diff --git a/deepfence_utils/utils/utils.go b/deepfence_utils/utils/utils.go
index 18540ef340..c18403bf8d 100644
--- a/deepfence_utils/utils/utils.go
+++ b/deepfence_utils/utils/utils.go
@@ -55,7 +55,7 @@ var (
func GetHttpClient() *http.Client {
once1.Do(func() {
- secureClient = &http.Client{Timeout: time.Second * 30}
+ secureClient = &http.Client{Timeout: time.Second * 10}
})
return secureClient
@@ -65,7 +65,7 @@ func GetInsecureHttpClient() *http.Client {
once2.Do(func() {
tlsConfig := &tls.Config{RootCAs: x509.NewCertPool(), InsecureSkipVerify: true}
insecureClient = &http.Client{
- Timeout: time.Second * 30,
+ Timeout: time.Second * 10,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
WriteBufferSize: 10240,
@@ -113,10 +113,7 @@ func UUIDFromString(uuidStr string) (uuid.UUID, error) {
func IsUUIDValid(uuidStr string) bool {
_, err := UUIDFromString(uuidStr)
- if err != nil {
- return false
- }
- return true
+ return err == nil
}
func NewUUID() uuid.UUID {
@@ -196,7 +193,7 @@ func IsJWTExpired(token string) bool {
func GetInt64ValueFromInterfaceMap(claims map[string]interface{}, key string) (int64, error) {
val, ok := claims[key]
if !ok {
- return 0, errors.New(fmt.Sprintf("key %s not found in JWT claims", key))
+ return 0, fmt.Errorf("key %s not found in JWT claims", key)
}
number, err := InterfaceToInt(val)
if err != nil {
@@ -208,7 +205,7 @@ func GetInt64ValueFromInterfaceMap(claims map[string]interface{}, key string) (i
func GetStringValueFromInterfaceMap(claims map[string]interface{}, key string) (string, error) {
val, ok := claims[key]
if !ok {
- return "", errors.New(fmt.Sprintf("key %s not found in JWT claims", key))
+ return "", fmt.Errorf("key %s not found in JWT claims", key)
}
return fmt.Sprintf("%v", val), nil
}
@@ -573,6 +570,18 @@ func GetEnvOrDefault(envVar string, defaultValue string) string {
return envValue
}
+func GetEnvOrDefaultInt(envVar string, defaultValue int) int {
+ envValue := os.Getenv(envVar)
+ if len(envValue) == 0 {
+ return defaultValue
+ }
+ val, err := strconv.Atoi(envValue)
+ if err != nil {
+ return defaultValue
+ }
+ return val
+}
+
func URLEncode(s string) string {
return url.QueryEscape(s)
}
diff --git a/deepfence_utils/vulnerability_db/database.go b/deepfence_utils/vulnerability_db/database.go
index d106d8abb3..147c661576 100644
--- a/deepfence_utils/vulnerability_db/database.go
+++ b/deepfence_utils/vulnerability_db/database.go
@@ -107,7 +107,7 @@ func UploadToMinio(ctx context.Context, fb []byte, fName string) (string, string
}
dbFile := path.Join(VulnerabilityDbStore, fName)
- info, err := mc.UploadFile(directory.WithDatabaseContext(ctx), dbFile, fb, minio.PutObjectOptions{})
+ info, err := mc.UploadFile(directory.WithDatabaseContext(ctx), dbFile, fb, true, minio.PutObjectOptions{})
if err != nil {
return "", "", err
}
@@ -163,13 +163,7 @@ func UpdateListing(newFile, newFileCheckSum string, buildTime time.Time) {
return
}
- err = mc.DeleteFile(ctx, ListingPath, true, minio.RemoveObjectOptions{ForceDelete: true})
- if err != nil {
- log.Error().Msgf(err.Error())
- return
- }
-
- _, err = mc.UploadFile(ctx, ListingPath, lb, minio.PutObjectOptions{ContentType: "application/json"})
+ _, err = mc.UploadFile(ctx, ListingPath, lb, true, minio.PutObjectOptions{ContentType: "application/json"})
if err != nil {
log.Error().Msgf(err.Error())
return
diff --git a/deepfence_worker/Dockerfile b/deepfence_worker/Dockerfile
index 7d4b0555e9..70c41324cc 100644
--- a/deepfence_worker/Dockerfile
+++ b/deepfence_worker/Dockerfile
@@ -58,5 +58,7 @@ RUN tar -xzf /usr/local/yara.tar.gz -C /usr/local/ \
RUN export GRYPE_DB_UPDATE_URL=https://threat-intel.deepfence.io/vulnerability-db/listing.json && grype db update
+COPY --from=builder-yara /go/bin/asynq /usr/local/bin/asynq
+
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/usr/local/bin/deepfence_worker"]
diff --git a/deepfence_worker/controls/controls.go b/deepfence_worker/controls/controls.go
index ee02a88a2a..616eb12956 100644
--- a/deepfence_worker/controls/controls.go
+++ b/deepfence_worker/controls/controls.go
@@ -9,6 +9,7 @@ import (
ctl "github.com/deepfence/ThreatMapper/deepfence_utils/controls"
"github.com/deepfence/ThreatMapper/deepfence_utils/directory"
"github.com/deepfence/ThreatMapper/deepfence_utils/log"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/utils"
sdkUtils "github.com/deepfence/ThreatMapper/deepfence_utils/utils"
)
@@ -106,7 +107,7 @@ func GetRegisterControlFunc[T ctl.StartVulnerabilityScanRequest | ctl.StartSecre
controlFunc := func(ctx context.Context, req T) error {
BinArgs := ctl.GetBinArgs(req)
- log.Info().Msgf("%s payload: %+v", task, BinArgs)
+ log.Info().Msgf("enqueue %s payload: %+v", task, BinArgs)
data, err := json.Marshal(BinArgs)
if err != nil {
log.Error().Msg(err.Error())
@@ -117,7 +118,7 @@ func GetRegisterControlFunc[T ctl.StartVulnerabilityScanRequest | ctl.StartSecre
log.Error().Msg(err.Error())
return err
}
- if err := worker.Enqueue(task, data); err != nil {
+ if err := worker.Enqueue(task, data, utils.DefaultTaskOpts()...); err != nil {
log.Error().Msg(err.Error())
return err
}
diff --git a/deepfence_worker/cronjobs/agent.go b/deepfence_worker/cronjobs/agent.go
index db4fcb0d76..3360a83337 100644
--- a/deepfence_worker/cronjobs/agent.go
+++ b/deepfence_worker/cronjobs/agent.go
@@ -40,7 +40,10 @@ func CheckAgentUpgrade(ctx context.Context, task *asynq.Task) error {
log.Info().Msg("Start agent version check")
res := []map[string]interface{}{}
- getVersionMetadata("https://api.github.com/repos/deepfence/ThreatMapper/tags", &res)
+ err := getVersionMetadata("https://api.github.com/repos/deepfence/ThreatMapper/tags", &res)
+ if err != nil {
+ return err
+ }
tags_to_ingest := []string{}
for _, tag := range res {
@@ -128,7 +131,7 @@ func prepareAgentReleases(ctx context.Context, tags_to_ingest []string) (map[str
log.Error().Err(err).Msg("ReadFile")
continue
}
- res, err := minio.UploadFile(ctx, out_file, b, m.PutObjectOptions{ContentType: "application/gzip"})
+ res, err := minio.UploadFile(ctx, out_file, b, false, m.PutObjectOptions{ContentType: "application/gzip"})
key := ""
if err != nil {
ape, ok := err.(directory.AlreadyPresentError)
@@ -246,7 +249,7 @@ func prepareAgentPluginReleases(ctx context.Context, tags_to_ingest []string) (m
log.Error().Err(err).Msg("ReadFile")
continue
}
- res, err := minio.UploadFile(ctx, out_file, b, m.PutObjectOptions{ContentType: "application/gzip"})
+ res, err := minio.UploadFile(ctx, out_file, b, false, m.PutObjectOptions{ContentType: "application/gzip"})
key := ""
if err != nil {
ape, ok := err.(directory.AlreadyPresentError)
diff --git a/deepfence_worker/cronjobs/cloud_compliance.go b/deepfence_worker/cronjobs/cloud_compliance.go
index 3268c99736..22323c65b2 100644
--- a/deepfence_worker/cronjobs/cloud_compliance.go
+++ b/deepfence_worker/cronjobs/cloud_compliance.go
@@ -22,7 +22,8 @@ var BenchmarksAvailableMap = map[string][]string{
"gcp": {"cis"},
"azure": {"cis", "nist", "pci", "hipaa"},
"kubernetes": {"nsa-cisa"},
- "linux": {"hipaa", "nist", "pci", "gdpr"}}
+ "linux": {"hipaa", "nist", "pci", "gdpr"},
+}
type Benchmark struct {
BenchmarkId string `json:"benchmark_id"`
@@ -146,7 +147,7 @@ func AddCloudControls(ctx context.Context, task *asynq.Task) error {
MERGE (n:CloudComplianceExecutable:CloudComplianceBenchmark{
node_id: row.benchmark_id
})
- ON CREATE
+ ON CREATE
SET n.benchmark_id = row.benchmark_id,
n.description = row.description,
n.title = row.title,
@@ -179,6 +180,14 @@ func AddCloudControls(ctx context.Context, task *asynq.Task) error {
}
}
}
+ // connect controls to parent root benchmarks
+ if _, err = tx.Run(`
+ MATCH (n:CloudComplianceControl)
+ MATCH (b:CloudComplianceBenchmark{benchmark_id:n.parent_control_hierarchy[0]})
+ MERGE (b)-[:PARENT]->(n)`, map[string]interface{}{}); err != nil {
+ log.Error().Msgf(err.Error())
+ return nil
+ }
log.Info().Msgf("Updated Cloud Compliance Controls")
return tx.Commit()
}
@@ -258,10 +267,6 @@ func CachePostureProviders(ctx context.Context, task *asynq.Task) error {
RETURN count(distinct c)`
} else if postureProviderName == model.PostureProviderAWSOrg || postureProviderName == model.PostureProviderGCPOrg {
- postureProviderName := model.PostureProviderGCP
- if postureProviderName == model.PostureProviderAWSOrg {
- postureProviderName = model.PostureProviderAWS
- }
postureProvider.NodeLabel = "Organizations"
account_count_query = `
diff --git a/deepfence_worker/cronjobs/console.go b/deepfence_worker/cronjobs/console.go
index 7faeb90331..d303120bd4 100644
--- a/deepfence_worker/cronjobs/console.go
+++ b/deepfence_worker/cronjobs/console.go
@@ -2,6 +2,8 @@ package cronjobs
import (
"context"
+ "os"
+ "strconv"
"github.com/deepfence/ThreatMapper/deepfence_server/controls"
utils_ctl "github.com/deepfence/ThreatMapper/deepfence_utils/controls"
@@ -11,14 +13,30 @@ import (
)
const (
- ConsoleAgentId = "deepfence-console-cron"
- MaxWorkload = 5
+ ConsoleAgentId = "deepfence-console-cron"
+ DefaultMaxWorkload = 5
)
var (
- ScanWorkloadAllocator = utils_ctl.NewWorkloadAllocator(MaxWorkload)
+ MaxWorkload int
+ ScanWorkloadAllocator *utils_ctl.WorkloadAllocator
)
+func init() {
+ numWorkloadStr := os.Getenv("DEEPFENCE_MAX_SCAN_WORKLOAD")
+ if len(numWorkloadStr) == 0 {
+ MaxWorkload = DefaultMaxWorkload
+ } else {
+ numWorkload, err := strconv.Atoi(numWorkloadStr)
+ if err != nil {
+ MaxWorkload = DefaultMaxWorkload
+ } else {
+ MaxWorkload = numWorkload
+ }
+ }
+ ScanWorkloadAllocator = utils_ctl.NewWorkloadAllocator(DefaultMaxWorkload)
+}
+
/*
While this functon is a cron job, it is running on the worker's address space
Hence Allocator can be shared across tasks
diff --git a/deepfence_worker/cronjobs/notification.go b/deepfence_worker/cronjobs/notification.go
index a142be9976..2d691b86ed 100644
--- a/deepfence_worker/cronjobs/notification.go
+++ b/deepfence_worker/cronjobs/notification.go
@@ -5,11 +5,13 @@ import (
"database/sql"
"encoding/json"
"errors"
- reporters_search "github.com/deepfence/ThreatMapper/deepfence_server/reporters/search"
+ "os"
"strconv"
"sync"
"time"
+ reporters_search "github.com/deepfence/ThreatMapper/deepfence_server/reporters/search"
+
"github.com/deepfence/ThreatMapper/deepfence_server/model"
"github.com/deepfence/ThreatMapper/deepfence_server/pkg/integration"
"github.com/deepfence/ThreatMapper/deepfence_server/reporters"
@@ -82,7 +84,32 @@ var fieldsMap = map[string]map[string]string{utils.ScanTypeDetectedNode[utils.NE
},
}
-var notificationLock sync.Mutex
+const DefaultNotificationErrorBackoff = 15 * time.Minute
+
+var (
+ NotificationErrorBackoff time.Duration
+ notificationLock sync.Mutex
+)
+
+func init() {
+ backoffTimeStr := os.Getenv("DEEPFENCE_NOTIFICATION_ERROR_BACKOFF_MINUTES")
+ status := false
+ if len(backoffTimeStr) > 0 {
+ value, err := strconv.Atoi(backoffTimeStr)
+ if err == nil && value > 0 {
+ NotificationErrorBackoff = time.Duration(value) * time.Minute
+ status = true
+ log.Info().Msgf("Setting notification err backoff to: %v",
+ NotificationErrorBackoff)
+ }
+ }
+
+ if !status {
+ log.Info().Msgf("Setting notification err backoff to default: %v",
+ DefaultNotificationErrorBackoff)
+ NotificationErrorBackoff = DefaultNotificationErrorBackoff
+ }
+}
func SendNotifications(ctx context.Context, task *asynq.Task) error {
//This lock is to ensure only one notification handler runs at a time
@@ -102,6 +129,16 @@ func SendNotifications(ctx context.Context, task *asynq.Task) error {
wg := sync.WaitGroup{}
wg.Add(len(integrations))
for _, integrationRow := range integrations {
+ if integrationRow.ErrorMsg.String != "" &&
+ time.Since(integrationRow.LastSentTime.Time) < NotificationErrorBackoff {
+ log.Info().Msgf("Skipping integration for %s rowId: %d due to error: %s "+
+ "occured at last attempt, %s ago",
+ integrationRow.IntegrationType, integrationRow.ID,
+ integrationRow.ErrorMsg.String, time.Since(integrationRow.LastSentTime.Time))
+ wg.Done()
+ continue
+ }
+
go func(integration postgresql_db.Integration) {
defer wg.Done()
log.Info().Msgf("Processing integration for %s rowId: %d",
@@ -133,7 +170,10 @@ func SendNotifications(ctx context.Context, task *asynq.Task) error {
},
}
}
- pgClient.UpdateIntegrationStatus(ctx, params)
+ err = pgClient.UpdateIntegrationStatus(ctx, params)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}(integrationRow)
}
@@ -199,7 +239,7 @@ func injectNodeDatamap(results []map[string]interface{}, common model.ScanResult
if _, ok := r["updated_at"]; ok {
flag := integration.IsMessagingFormat(integrationType)
- if flag == true {
+ if flag {
ts := r["updated_at"].(int64)
tm := time.Unix(0, ts*int64(time.Millisecond))
r["updated_at"] = tm
@@ -275,6 +315,9 @@ func processIntegration[T any](ctx context.Context, task *asynq.Task, integratio
results, common, err := reporters_scan.GetScanResults[T](ctx,
utils.DetectedNodeScanType[integrationRow.Resource], scan.ScanId,
filters.FieldsFilters, model.FetchWindow{})
+ if err != nil {
+ return err
+ }
totalQueryTime = totalQueryTime + time.Since(profileStart).Milliseconds()
if len(results) == 0 {
@@ -296,8 +339,8 @@ func processIntegration[T any](ctx context.Context, task *asynq.Task, integratio
if integration.IsMessagingFormat(integrationRow.IntegrationType) {
updatedResults = FormatForMessagingApps(results, integrationRow.Resource)
} else {
+ updatedResults = []map[string]interface{}{}
for _, r := range results {
- updatedResults = []map[string]interface{}{}
updatedResults = append(updatedResults, utils.ToMap[T](r))
}
}
diff --git a/deepfence_worker/cronjobs/reports.go b/deepfence_worker/cronjobs/reports.go
index 65d86b2a83..7547d9707e 100644
--- a/deepfence_worker/cronjobs/reports.go
+++ b/deepfence_worker/cronjobs/reports.go
@@ -55,11 +55,11 @@ func CleanUpReports(ctx context.Context, task *asynq.Task) error {
cleanup(minioReportsPrefix)
// delete the reports which are in failed state
- deleteFailedReports(ctx, session)
+ err = deleteFailedReports(ctx, session)
log.Info().Msg("Complete reports cleanup")
- return nil
+ return err
}
func deleteReport(ctx context.Context, session neo4j.Session, path string) error {
diff --git a/deepfence_worker/cronjobs/scheduled_tasks.go b/deepfence_worker/cronjobs/scheduled_tasks.go
index d885159329..fc02832ce5 100644
--- a/deepfence_worker/cronjobs/scheduled_tasks.go
+++ b/deepfence_worker/cronjobs/scheduled_tasks.go
@@ -3,6 +3,7 @@ package cronjobs
import (
"context"
"encoding/json"
+ "strings"
"github.com/deepfence/ThreatMapper/deepfence_server/handler"
"github.com/deepfence/ThreatMapper/deepfence_server/model"
@@ -25,17 +26,24 @@ func RunScheduledTasks(ctx context.Context, task *asynq.Task) error {
}
log.Info().Msgf("RunScheduledTasks: %s", messagePayload["description"])
+ log.Info().Msgf("RunScheduledTasks: %v", messagePayload)
scheduleId := int64(messagePayload["id"].(float64))
jobStatus := "Success"
- err := runScheduledTasks(ctx, messagePayload)
+ isSystem := messagePayload["is_system"].(bool)
+ var err error
+ if isSystem {
+ err = runSystemScheduledTasks(ctx, messagePayload)
+ } else {
+ err = runCustomScheduledTasks(ctx, messagePayload)
+ }
if err != nil {
jobStatus = err.Error()
- log.Error().Msg("runScheduledTasks: " + err.Error())
+ log.Error().Msg("RunScheduledTasks: " + err.Error())
}
err = saveJobStatus(ctx, scheduleId, jobStatus)
if err != nil {
- log.Error().Msg("runScheduledTasks saveJobStatus: " + err.Error())
+ log.Error().Msg("RunScheduledTasks saveJobStatus: " + err.Error())
}
return nil
}
@@ -48,43 +56,21 @@ var (
}
)
-func runScheduledTasks(ctx context.Context, messagePayload map[string]interface{}) error {
+func runSystemScheduledTasks(ctx context.Context, messagePayload map[string]interface{}) error {
payload := messagePayload["payload"].(map[string]interface{})
nodeType := payload["node_type"].(string)
+ isPriority := false
+ if _, ok := payload["is_priority"]; ok {
+ isPriority = payload["is_priority"].(bool)
+ }
- var searchFilter reporters_search.SearchFilter
- _, ok := payload["filters"]
- if ok {
- filters := payload["filters"].(string)
- err := json.Unmarshal([]byte(filters), &searchFilter)
- if err != nil {
- log.Error().Msgf("Error Unmarshaling filter: %v", err)
- return err
- }
-
- fieldsValues := searchFilter.Filters.ContainsFilter.FieldsValues
- if fieldsValues == nil {
- fieldsValues = make(map[string][]interface{})
- }
-
- if _, ok := fieldsValues["pseudo"]; !ok {
- fieldsValues["pseudo"] = append(make([]interface{}, 0), false)
- }
-
- if _, ok := fieldsValues["active"]; !ok {
- fieldsValues["active"] = append(make([]interface{}, 0), true)
- }
-
- searchFilter.Filters.ContainsFilter.FieldsValues = fieldsValues
- } else {
- searchFilter = reporters_search.SearchFilter{
- InFieldFilter: []string{"node_id"},
- Filters: reporters.FieldsFilters{
- ContainsFilter: reporters.ContainsFilter{
- FieldsValues: map[string][]interface{}{"pseudo": {false}, "active": {true}},
- },
+ searchFilter := reporters_search.SearchFilter{
+ InFieldFilter: []string{"node_id"},
+ Filters: reporters.FieldsFilters{
+ ContainsFilter: reporters.ContainsFilter{
+ FieldsValues: map[string][]interface{}{"pseudo": {false}, "active": {true}},
},
- }
+ },
}
extSearchFilter := reporters_search.SearchFilter{}
@@ -133,7 +119,8 @@ func runScheduledTasks(ctx context.Context, messagePayload map[string]interface{
return nil
}
- scanTrigger := model.ScanTriggerCommon{NodeIds: nodeIds, Filters: model.ScanFilter{}}
+ scanTrigger := model.ScanTriggerCommon{NodeIds: nodeIds,
+ Filters: model.ScanFilter{}, IsPriority: isPriority}
switch messagePayload["action"].(string) {
case utils.VULNERABILITY_SCAN:
@@ -160,7 +147,77 @@ func runScheduledTasks(ctx context.Context, messagePayload map[string]interface{
log.Warn().Msgf("Unknown node type %s for compliance scan", nodeType)
return nil
}
- _, _, err := handler.StartMultiCloudComplianceScan(ctx, nodeIds, benchmarkTypes)
+ _, _, err := handler.StartMultiCloudComplianceScan(ctx, nodeIds, benchmarkTypes, false)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func runCustomScheduledTasks(ctx context.Context, messagePayload map[string]interface{}) error {
+ var payload model.ScheduleTaskPayload
+ val := messagePayload["payload"].(map[string]interface{})
+ payloadRaw, err := json.Marshal(val)
+ if err != nil {
+ log.Error().Msgf("Failed to marshal the payload, error:%v", err)
+ return err
+ }
+
+ err = json.Unmarshal(payloadRaw, &payload)
+ if err != nil {
+ return err
+ }
+
+ nodeIds := payload.NodeIds
+ scanFilter := payload.Filters
+ scheduleJobId := int64(messagePayload["id"].(float64))
+
+ if len(nodeIds) == 0 {
+ log.Info().Msgf("No nodes found for CustomScheduledTasks, jobid:%d, description:%s",
+ scheduleJobId, messagePayload["description"])
+ return nil
+ }
+
+ scanTrigger := model.ScanTriggerCommon{NodeIds: nodeIds,
+ Filters: scanFilter, IsPriority: payload.IsPriority}
+
+ action := utils.Neo4jScanType(messagePayload["action"].(string))
+
+ switch action {
+ case utils.NEO4J_VULNERABILITY_SCAN:
+ binArgs := make(map[string]string, 0)
+ if payload.ScanConfigLanguages != nil && len(payload.ScanConfigLanguages) > 0 {
+ languages := []string{}
+ for _, language := range payload.ScanConfigLanguages {
+ languages = append(languages, language.Language)
+ }
+ binArgs["scan_type"] = strings.Join(languages, ",")
+ }
+
+ actionBuilder := handler.StartScanActionBuilder(ctx, ctl.StartVulnerabilityScan, binArgs)
+ _, _, err := handler.StartMultiScan(ctx, false, utils.NEO4J_VULNERABILITY_SCAN, scanTrigger, actionBuilder)
+ if err != nil {
+ return err
+ }
+ case utils.NEO4J_SECRET_SCAN:
+ actionBuilder := handler.StartScanActionBuilder(ctx, ctl.StartSecretScan, nil)
+ _, _, err := handler.StartMultiScan(ctx, false, utils.NEO4J_SECRET_SCAN, scanTrigger, actionBuilder)
+ if err != nil {
+ return err
+ }
+ case utils.NEO4J_MALWARE_SCAN:
+ actionBuilder := handler.StartScanActionBuilder(ctx, ctl.StartMalwareScan, nil)
+ _, _, err := handler.StartMultiScan(ctx, false, utils.NEO4J_MALWARE_SCAN, scanTrigger, actionBuilder)
+ if err != nil {
+ return err
+ }
+ case utils.NEO4J_COMPLIANCE_SCAN, utils.NEO4J_CLOUD_COMPLIANCE_SCAN:
+ if payload.BenchmarkTypes == nil || len(payload.BenchmarkTypes) == 0 {
+ log.Warn().Msgf("Invalid benchmarkType for compliance scan, job id: %d", scheduleJobId)
+ return nil
+ }
+ _, _, err := handler.StartMultiCloudComplianceScan(ctx, nodeIds, payload.BenchmarkTypes, false)
if err != nil {
return err
}
diff --git a/deepfence_worker/cronscheduler/init_neo4j.go b/deepfence_worker/cronscheduler/init_neo4j.go
index f762ad8d94..be7c648157 100644
--- a/deepfence_worker/cronscheduler/init_neo4j.go
+++ b/deepfence_worker/cronscheduler/init_neo4j.go
@@ -11,6 +11,13 @@ import (
"github.com/neo4j/neo4j-go-driver/v4/neo4j"
)
+func RunDisplayError(session neo4j.Session, statement string) {
+ _, err := session.Run(statement, map[string]interface{}{})
+ if err != nil {
+ log.Error().Msgf("%s, err: %v", statement, err)
+ }
+}
+
func initNeo4jDatabase(ctx context.Context) error {
log.Info().Msgf("Init Neo4j Constraints")
defer log.Info().Msgf("Init Neo4j Constraints - Done")
@@ -22,75 +29,65 @@ func initNeo4jDatabase(ctx context.Context) error {
session := nc.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
- session.Run("CREATE CONSTRAINT ON (n:CloudProvider) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudRegion) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:AgentVersion) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:KubernetesCluster) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:ContainerImage) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:ImageStub) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:ImageTag) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Node) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Container) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Pod) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Process) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Secret) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:SecretRule) ASSERT n.rule_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Malware) ASSERT n.malware_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:MalwareRule) ASSERT n.rule_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Vulnerability) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:VulnerabilityStub) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:SecurityGroup) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudNode) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudResource) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:RegistryAccount) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:Compliance) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:ComplianceRule) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudCompliance) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:AgentDiagnosticLogs) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudScannerDiagnosticLogs) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudComplianceExecutable) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudComplianceControl) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run("CREATE CONSTRAINT ON (n:CloudComplianceBenchmark) ASSERT n.node_id IS UNIQUE", map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_SECRET_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_VULNERABILITY_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_COMPLIANCE_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_CLOUD_COMPLIANCE_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_MALWARE_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_SECRET_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_VULNERABILITY_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_COMPLIANCE_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_CLOUD_COMPLIANCE_SCAN), map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_MALWARE_SCAN), map[string]interface{}{})
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudProvider) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudRegion) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:AgentVersion) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:KubernetesCluster) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:ContainerImage) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:ImageStub) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:ImageTag) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Node) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Container) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Pod) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Process) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Secret) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:SecretRule) ASSERT n.rule_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Malware) ASSERT n.malware_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:MalwareRule) ASSERT n.rule_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Vulnerability) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:VulnerabilityStub) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:SecurityGroup) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudNode) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudResource) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:RegistryAccount) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:Compliance) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:ComplianceRule) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudCompliance) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:AgentDiagnosticLogs) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudScannerDiagnosticLogs) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudComplianceExecutable) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudComplianceControl) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, "CREATE CONSTRAINT ON (n:CloudComplianceBenchmark) ASSERT n.node_id IS UNIQUE")
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_SECRET_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_VULNERABILITY_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_COMPLIANCE_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_CLOUD_COMPLIANCE_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_MALWARE_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_SECRET_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_VULNERABILITY_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_COMPLIANCE_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_CLOUD_COMPLIANCE_SCAN))
+ RunDisplayError(session, fmt.Sprintf("CREATE CONSTRAINT ON (n:Bulk%s) ASSERT n.node_id IS UNIQUE", utils.NEO4J_MALWARE_SCAN))
- session.Run("MERGE (n:Node{node_id:'in-the-internet'}) SET n.node_name='The Internet (Inbound)', n.pseudo=true, n.cloud_provider='internet', n.cloud_region='internet', n.depth=0, n.active=true", map[string]interface{}{})
- session.Run("MERGE (n:Node{node_id:'out-the-internet'}) SET n.node_name='The Internet (Outbound)', n.pseudo=true, n.cloud_provider='internet', n.cloud_region='internet', n.depth=0, n.active=true", map[string]interface{}{})
- session.Run("MERGE (n:Node{node_id:'"+cronjobs.ConsoleAgentId+"'}) SET n.node_name='Console', n.pseudo=true, n.cloud_provider='internet', n.cloud_region='internet', n.depth=0, n.push_back=COALESCE(n.push_back,1)", map[string]interface{}{})
+ RunDisplayError(session, "MERGE (n:Node{node_id:'in-the-internet'}) SET n.node_name='The Internet (Inbound)', n.pseudo=true, n.cloud_provider='internet', n.cloud_region='internet', n.depth=0, n.active=true")
+ RunDisplayError(session, "MERGE (n:Node{node_id:'out-the-internet'}) SET n.node_name='The Internet (Outbound)', n.pseudo=true, n.cloud_provider='internet', n.cloud_region='internet', n.depth=0, n.active=true")
+ RunDisplayError(session, "MERGE (n:Node{node_id:'"+cronjobs.ConsoleAgentId+"'}) SET n.node_name='Console', n.pseudo=true, n.cloud_provider='internet', n.cloud_region='internet', n.depth=0, n.push_back=COALESCE(n.push_back,1)")
// Indexes for fast searching & ordering
addIndexOnIssuesCount(session, "ContainerImage")
addIndexOnIssuesCount(session, "Container")
- session.Run("CREATE INDEX NodeDepth IF NOT EXISTS FOR (n:Node) ON (n.depth)", map[string]interface{}{})
- session.Run("CREATE INDEX CloudResourceDepth IF NOT EXISTS FOR (n:CloudResource) ON (n.depth)", map[string]interface{}{})
- session.Run("CREATE INDEX CloudResourceLinked IF NOT EXISTS FOR (n:CloudResource) ON (n.linked)", map[string]interface{}{})
+ RunDisplayError(session, "CREATE INDEX NodeDepth IF NOT EXISTS FOR (n:Node) ON (n.depth)")
+ RunDisplayError(session, "CREATE INDEX CloudResourceDepth IF NOT EXISTS FOR (n:CloudResource) ON (n.depth)")
+ RunDisplayError(session, "CREATE INDEX CloudResourceLinked IF NOT EXISTS FOR (n:CloudResource) ON (n.linked)")
return nil
}
func addIndexOnIssuesCount(session neo4j.Session, node_type string) {
- session.Run(fmt.Sprintf("CREATE INDEX %sOrderByVulnerabilitiesCount IF NOT EXISTS FOR (n:%s) ON (n.vulnerabilities_count)",
- node_type, node_type),
- map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE INDEX %sOrderBySecretsCount IF NOT EXISTS FOR (n:%s) ON (n.vulnerabilities_count)",
- node_type, node_type),
- map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE INDEX %sOrderByMalwaresCount IF NOT EXISTS FOR (n:%s) ON (n.secrets_count)",
- node_type, node_type),
- map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE INDEX %sOrderByCompliancesCount IF NOT EXISTS FOR (n:%s) ON (n.compliances_count)",
- node_type, node_type),
- map[string]interface{}{})
- session.Run(fmt.Sprintf("CREATE INDEX %sOrderByCloudCompliancesCount IF NOT EXISTS FOR (n:%s) ON (n.cloud_compliances_count)",
- node_type, node_type),
- map[string]interface{}{})
+ RunDisplayError(session, fmt.Sprintf("CREATE INDEX %sOrderByVulnerabilitiesCount IF NOT EXISTS FOR (n:%s) ON (n.vulnerabilities_count)", node_type, node_type))
+ RunDisplayError(session, fmt.Sprintf("CREATE INDEX %sOrderBySecretsCount IF NOT EXISTS FOR (n:%s) ON (n.vulnerabilities_count)", node_type, node_type))
+ RunDisplayError(session, fmt.Sprintf("CREATE INDEX %sOrderByMalwaresCount IF NOT EXISTS FOR (n:%s) ON (n.secrets_count)", node_type, node_type))
+ RunDisplayError(session, fmt.Sprintf("CREATE INDEX %sOrderByCompliancesCount IF NOT EXISTS FOR (n:%s) ON (n.compliances_count)", node_type, node_type))
+ RunDisplayError(session, fmt.Sprintf("CREATE INDEX %sOrderByCloudCompliancesCount IF NOT EXISTS FOR (n:%s) ON (n.cloud_compliances_count)", node_type, node_type))
}
diff --git a/deepfence_worker/cronscheduler/scheduler.go b/deepfence_worker/cronscheduler/scheduler.go
index 251264581d..4a7bcbf9bd 100644
--- a/deepfence_worker/cronscheduler/scheduler.go
+++ b/deepfence_worker/cronscheduler/scheduler.go
@@ -12,8 +12,9 @@ import (
"github.com/deepfence/ThreatMapper/deepfence_utils/directory"
"github.com/deepfence/ThreatMapper/deepfence_utils/log"
postgresqlDb "github.com/deepfence/ThreatMapper/deepfence_utils/postgresql/postgresql-db"
- sdkUtils "github.com/deepfence/ThreatMapper/deepfence_utils/utils"
+ "github.com/deepfence/ThreatMapper/deepfence_utils/utils"
"github.com/deepfence/ThreatMapper/deepfence_utils/vulnerability_db"
+ "github.com/hibiken/asynq"
"github.com/robfig/cron/v3"
)
@@ -34,8 +35,9 @@ type Jobs struct {
}
type Scheduler struct {
- cron *cron.Cron
- jobs Jobs
+ cron *cron.Cron
+ jobs Jobs
+ tasksMaxRetries asynq.Option
}
func NewScheduler() (*Scheduler, error) {
@@ -50,6 +52,7 @@ func NewScheduler() (*Scheduler, error) {
CronJobs: make(map[directory.NamespaceID]CronJobs),
ScheduledJobs: make(map[directory.NamespaceID]ScheduledJobs),
},
+ tasksMaxRetries: utils.TasksMaxRetries(),
}
return scheduler, nil
}
@@ -112,7 +115,7 @@ func (s *Scheduler) RemoveJobs(ctx context.Context) error {
}
func (s *Scheduler) updateScheduledJobs() {
- ticker := time.NewTicker(15 * time.Minute)
+ ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
@@ -152,18 +155,13 @@ func (s *Scheduler) addScheduledJobs(ctx context.Context) error {
var newHashes []string
newJobHashToId := make(map[string]cron.EntryID)
for _, schedule := range schedules {
- jobHash := sdkUtils.GetScheduledJobHash(schedule)
- if sdkUtils.InSlice(jobHash, scheduledJobs.jobHashes) {
+ jobHash := utils.GetScheduledJobHash(schedule)
+ if utils.InSlice(jobHash, scheduledJobs.jobHashes) {
newHashes = append(newHashes, jobHash)
newJobHashToId[jobHash] = scheduledJobs.jobHashToId[jobHash]
continue
}
- var payload map[string]string
- err = json.Unmarshal(schedule.Payload, &payload)
- if err != nil {
- log.Error().Msg("addScheduledJobs payload: " + err.Error())
- continue
- }
+ payload := schedule.Payload
jobId, err := s.cron.AddFunc(schedule.CronExpr, s.enqueueScheduledTask(namespace, schedule, payload))
if err != nil {
return err
@@ -172,7 +170,8 @@ func (s *Scheduler) addScheduledJobs(ctx context.Context) error {
newJobHashToId[jobHash] = jobId
}
for _, oldJobHash := range scheduledJobs.jobHashes {
- if !sdkUtils.InSlice(oldJobHash, scheduledJobs.jobHashes) {
+ if !utils.InSlice(oldJobHash, newHashes) {
+ log.Info().Msgf("Removing job from cron: %s", oldJobHash)
s.cron.Remove(scheduledJobs.jobHashToId[oldJobHash])
}
}
@@ -196,92 +195,92 @@ func (s *Scheduler) addCronJobs(ctx context.Context) error {
// Documentation: https://pkg.go.dev/github.com/robfig/cron#hdr-Usage
var jobID cron.EntryID
- jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, sdkUtils.TriggerConsoleActionsTask))
+ jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, utils.TriggerConsoleActionsTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 120s", s.enqueueTask(namespace, sdkUtils.CleanUpGraphDBTask))
+ jobID, err = s.cron.AddFunc("@every 120s", s.enqueueTask(namespace, utils.CleanUpGraphDBTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 120s", s.enqueueTask(namespace, sdkUtils.ComputeThreatTask))
+ jobID, err = s.cron.AddFunc("@every 120s", s.enqueueTask(namespace, utils.ComputeThreatTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 120s", s.enqueueTask(namespace, sdkUtils.RetryFailedScansTask))
+ jobID, err = s.cron.AddFunc("@every 120s", s.enqueueTask(namespace, utils.RetryFailedScansTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 10m", s.enqueueTask(namespace, sdkUtils.RetryFailedUpgradesTask))
+ jobID, err = s.cron.AddFunc("@every 10m", s.enqueueTask(namespace, utils.RetryFailedUpgradesTask, utils.DefaultTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 5m", s.enqueueTask(namespace, sdkUtils.CleanUpPostgresqlTask))
+ jobID, err = s.cron.AddFunc("@every 5m", s.enqueueTask(namespace, utils.CleanUpPostgresqlTask, utils.DefaultTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, sdkUtils.CleanupDiagnosisLogs))
+ jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, utils.CleanupDiagnosisLogs, utils.DefaultTaskOpts()...))
if err != nil {
return err
}
// Adding CloudComplianceTask only to ensure data is ingested if task fails on startup, Retry to be handled by watermill
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, sdkUtils.CloudComplianceTask))
+ jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, utils.CloudComplianceTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, sdkUtils.CheckAgentUpgradeTask))
- if err != nil {
- return err
- }
- jobIDs = append(jobIDs, jobID)
+ //jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, utils.CheckAgentUpgradeTask))
+ //if err != nil {
+ // return err
+ //}
+ //jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 12h", s.enqueueTask(namespace, sdkUtils.SyncRegistryTask))
+ jobID, err = s.cron.AddFunc("@every 12h", s.enqueueTask(namespace, utils.SyncRegistryTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, sdkUtils.SendNotificationTask))
+ jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, utils.SendNotificationTask, utils.LowTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, sdkUtils.ReportCleanUpTask))
+ jobID, err = s.cron.AddFunc("@every 60m", s.enqueueTask(namespace, utils.ReportCleanUpTask, utils.DefaultTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 15m", s.enqueueTask(namespace, sdkUtils.CachePostureProviders))
+ jobID, err = s.cron.AddFunc("@every 15m", s.enqueueTask(namespace, utils.CachePostureProviders, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, sdkUtils.LinkCloudResourceTask))
+ jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, utils.LinkCloudResourceTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
jobIDs = append(jobIDs, jobID)
- jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, sdkUtils.LinkNodesTask))
+ jobID, err = s.cron.AddFunc("@every 30s", s.enqueueTask(namespace, utils.LinkNodesTask, utils.CritialTaskOpts()...))
if err != nil {
return err
}
@@ -309,11 +308,11 @@ func (s *Scheduler) startInitJobs(ctx context.Context) error {
}
log.Info().Msgf("Start immediate cronjobs for namespace %s", namespace)
- s.enqueueTask(namespace, sdkUtils.CheckAgentUpgradeTask)()
- s.enqueueTask(namespace, sdkUtils.SyncRegistryTask)()
- s.enqueueTask(namespace, sdkUtils.CloudComplianceTask)()
- s.enqueueTask(namespace, sdkUtils.ReportCleanUpTask)()
- s.enqueueTask(namespace, sdkUtils.CachePostureProviders)()
+ //s.enqueueTask(namespace, utils.CheckAgentUpgradeTask)()
+ s.enqueueTask(namespace, utils.SyncRegistryTask, utils.CritialTaskOpts()...)()
+ s.enqueueTask(namespace, utils.CloudComplianceTask, utils.CritialTaskOpts()...)()
+ s.enqueueTask(namespace, utils.ReportCleanUpTask, utils.CritialTaskOpts()...)()
+ s.enqueueTask(namespace, utils.CachePostureProviders, utils.CritialTaskOpts()...)()
return nil
}
@@ -322,7 +321,8 @@ func (s *Scheduler) Run() {
s.cron.Run()
}
-func (s *Scheduler) enqueueScheduledTask(namespace directory.NamespaceID, schedule postgresqlDb.Scheduler, payload map[string]string) func() {
+func (s *Scheduler) enqueueScheduledTask(namespace directory.NamespaceID,
+ schedule postgresqlDb.Scheduler, payload json.RawMessage) func() {
log.Info().Msgf("Registering task: %s, %s for namespace %s", schedule.Description, schedule.CronExpr, namespace)
return func() {
log.Info().Msgf("Enqueuing task: %s, %s for namespace %s",
@@ -332,6 +332,7 @@ func (s *Scheduler) enqueueScheduledTask(namespace directory.NamespaceID, schedu
"id": schedule.ID,
"payload": payload,
"description": schedule.Description,
+ "is_system": schedule.IsSystem,
}
messageJson, _ := json.Marshal(message)
worker, err := directory.Worker(directory.NewContextWithNameSpace(namespace))
@@ -339,14 +340,14 @@ func (s *Scheduler) enqueueScheduledTask(namespace directory.NamespaceID, schedu
log.Error().Msg(err.Error())
return
}
- err = worker.Enqueue(sdkUtils.ScheduledTasks, messageJson)
+ err = worker.Enqueue(utils.ScheduledTasks, messageJson, utils.DefaultTaskOpts()...)
if err != nil {
log.Error().Msg(err.Error())
}
}
}
-func (s *Scheduler) enqueueTask(namespace directory.NamespaceID, task string) func() {
+func (s *Scheduler) enqueueTask(namespace directory.NamespaceID, task string, taskOpts ...asynq.Option) func() {
log.Info().Msgf("Registering task: %s for namespace %s", task, namespace)
return func() {
log.Info().Msgf("Enqueuing task: %s for namespace %s", task, namespace)
@@ -355,7 +356,7 @@ func (s *Scheduler) enqueueTask(namespace directory.NamespaceID, task string) fu
log.Error().Msg(err.Error())
return
}
- err = worker.Enqueue(task, []byte(strconv.FormatInt(sdkUtils.GetTimestamp(), 10)))
+ err = worker.Enqueue(task, []byte(strconv.FormatInt(utils.GetTimestamp(), 10)), taskOpts...)
if err != nil {
log.Error().Msg(err.Error())
}
diff --git a/deepfence_worker/go.mod b/deepfence_worker/go.mod
index d3a95eb4ac..a9685357a3 100644
--- a/deepfence_worker/go.mod
+++ b/deepfence_worker/go.mod
@@ -1,6 +1,6 @@
module github.com/deepfence/ThreatMapper/deepfence_worker
-go 1.21.0
+go 1.21
replace github.com/deepfence/golang_deepfence_sdk/client => ../golang_deepfence_sdk/client/
diff --git a/deepfence_worker/ingester.go b/deepfence_worker/ingester.go
index ec4a6bbf4e..589c1e77c8 100644
--- a/deepfence_worker/ingester.go
+++ b/deepfence_worker/ingester.go
@@ -50,7 +50,10 @@ func startIngester(cfg config) error {
processors.StartKafkaProcessors(ctx)
// start audit log processor
- processors.StartAuditLogProcessor(ctx)
+ err = processors.StartAuditLogProcessor(ctx)
+ if err != nil {
+ log.Error().Msgf("%v", err)
+ }
// start kafka consumers for all given topics
err = processors.StartKafkaConsumers(
diff --git a/deepfence_worker/ingesters/cloud_resource.go b/deepfence_worker/ingesters/cloud_resource.go
index 88a9b405ec..7929587f2a 100644
--- a/deepfence_worker/ingesters/cloud_resource.go
+++ b/deepfence_worker/ingesters/cloud_resource.go
@@ -28,11 +28,8 @@ func CommitFuncCloudResource(ns string, cs []ingestersUtil.CloudResource) error
if err != nil {
return err
}
- session, err := driver.Session(neo4j.AccessModeWrite)
- if err != nil {
- return err
- }
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
batch, hosts, clusters := ResourceToMaps(cs)
@@ -173,11 +170,11 @@ func ResourceToMaps(ms []ingestersUtil.CloudResource) ([]map[string]interface{},
// TODO: Call somewhere
func LinkNodesWithCloudResources(ctx context.Context) error {
driver, err := directory.Neo4jClient(ctx)
- session, err := driver.Session(neo4j.AccessModeWrite)
-
if err != nil {
return err
}
+
+ session := driver.NewSession(neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
defer session.Close()
tx, err := session.BeginTransaction(neo4j.WithTxTimeout(30 * time.Second))
diff --git a/deepfence_worker/ingesters/common.go b/deepfence_worker/ingesters/common.go
index 925e31cced..7d6baa0f99 100644
--- a/deepfence_worker/ingesters/common.go
+++ b/deepfence_worker/ingesters/common.go
@@ -98,14 +98,14 @@ func CommitFuncStatus[Status any](ts utils.Neo4jScanType) func(ns string, data [
if ts == utils.NEO4J_CLOUD_COMPLIANCE_SCAN {
task = utils.UpdateCloudResourceScanStatusTask
}
- if err := worker.Enqueue(task, b); err != nil {
+ if err := worker.Enqueue(task, b, utils.DefaultTaskOpts()...); err != nil {
log.Error().Err(err).Msgf("failed to enqueue %s", task)
}
}
if (ts == utils.NEO4J_COMPLIANCE_SCAN || ts == utils.NEO4J_CLOUD_COMPLIANCE_SCAN) && anyCompleted(others) {
err := worker.Enqueue(utils.CachePostureProviders,
- []byte(strconv.FormatInt(utils.GetTimestamp(), 10)))
+ []byte(strconv.FormatInt(utils.GetTimestamp(), 10)), utils.CritialTaskOpts()...)
if err != nil {
log.Error().Err(err).Msgf("failed to enqueue %s", utils.CachePostureProviders)
}
diff --git a/deepfence_worker/main.go b/deepfence_worker/main.go
index 70553710be..c3af0d420a 100644
--- a/deepfence_worker/main.go
+++ b/deepfence_worker/main.go
@@ -39,6 +39,8 @@ type config struct {
RedisDbNumber int `default:"0" split_words:"true"`
RedisPort string `default:"6379" split_words:"true"`
RedisPassword string `default:"" split_words:"true"`
+ TasksConcurrency int `default:"50" split_words:"true"`
+ ProcessQueues []string `split_words:"true"`
}
// build info
diff --git a/deepfence_worker/processors/bulk_processor.go b/deepfence_worker/processors/bulk_processor.go
index 9605506c5b..5e9b903259 100644
--- a/deepfence_worker/processors/bulk_processor.go
+++ b/deepfence_worker/processors/bulk_processor.go
@@ -27,11 +27,9 @@ func init() {
neo4j_host := os.Getenv("DEEPFENCE_NEO4J_HOST")
go func() {
for {
- select {
- case <-wait:
- breaker.Lock()
- log.Info().Msgf("Breaker opened")
- }
+ <-wait
+ breaker.Lock()
+ log.Info().Msgf("Breaker opened")
for {
err := utils.WaitServiceTcpConn(neo4j_host, neo4j_port, time.Second*30)
if err != nil {
diff --git a/deepfence_worker/processors/common.go b/deepfence_worker/processors/common.go
index 3fea330f2c..dd46edf6cf 100644
--- a/deepfence_worker/processors/common.go
+++ b/deepfence_worker/processors/common.go
@@ -114,13 +114,16 @@ func StartKafkaProcessors(ctx context.Context) {
1_000)
for i := range processors {
- processors[i].Start(ctx)
+ err := processors[i].Start(ctx)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
func StopKafkaProcessors() {
for i := range processors {
- processors[i].Stop()
+ _ = processors[i].Stop()
}
}
diff --git a/deepfence_worker/tasks/malwarescan/malwarescan.go b/deepfence_worker/tasks/malwarescan/malwarescan.go
index 95a36d55f0..bd7197f1c1 100644
--- a/deepfence_worker/tasks/malwarescan/malwarescan.go
+++ b/deepfence_worker/tasks/malwarescan/malwarescan.go
@@ -145,7 +145,10 @@ func (s MalwareScan) StartMalwareScan(ctx context.Context, task *asynq.Task) err
malwareScanner := malwareScan.New(opts, yaraconfig, yrScanner, params.ScanId)
// send inprogress status
- scanCtx.Checkpoint("After initialization")
+ err = scanCtx.Checkpoint("After initialization")
+ if err != nil {
+ return err
+ }
// get registry credentials
authDir, creds, err := workerUtils.GetConfigFileFromRegistry(ctx, params.RegistryId)
@@ -199,7 +202,7 @@ func (s MalwareScan) StartMalwareScan(ctx context.Context, task *asynq.Task) err
return err
}
- scanCtx.Checkpoint("After skopeo download")
+ err = scanCtx.Checkpoint("After skopeo download")
if err != nil {
return err
diff --git a/deepfence_worker/tasks/reports/data.go b/deepfence_worker/tasks/reports/data.go
index f8d3cdfd22..6182fc6e3a 100644
--- a/deepfence_worker/tasks/reports/data.go
+++ b/deepfence_worker/tasks/reports/data.go
@@ -121,6 +121,9 @@ func scanResultFilter(levelKey string, levelValues []string, masked []bool) repo
func getVulnerabilityData(ctx context.Context, params sdkUtils.ReportParams) (*Info[model.Vulnerability], error) {
+ if params.Filters.MostExploitableReport {
+ return getMostExploitableVulnData(ctx, params)
+ }
searchFilter := searchScansFilter(params)
var (
@@ -181,6 +184,51 @@ func getVulnerabilityData(ctx context.Context, params sdkUtils.ReportParams) (*I
return &data, nil
}
+func getMostExploitableVulnData(ctx context.Context, params sdkUtils.ReportParams) (*Info[model.Vulnerability], error) {
+ var req rptSearch.SearchNodeReq
+ req.ExtendedNodeFilter.Filters.OrderFilter.OrderFields = []reporters.OrderSpec{{FieldName: "cve_cvss_score", Descending: true}}
+ req.NodeFilter.Filters.ContainsFilter.FieldsValues = map[string][]interface{}{"exploitability_score": {1, 2, 3}}
+ req.NodeFilter.Filters.OrderFilter.OrderFields = []reporters.OrderSpec{{FieldName: "exploitability_score", Descending: true, Size: 1000}}
+ req.Window.Size = 1000
+ req.Window.Offset = 0
+ entries, err := rptSearch.SearchReport[model.Vulnerability](ctx, req.NodeFilter, req.ExtendedNodeFilter, req.IndirectFilters, req.Window)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ end time.Time = time.Now()
+ start time.Time = time.Now()
+ )
+ nodeWiseData := NodeWiseData[model.Vulnerability]{
+ SeverityCount: make(map[string]map[string]int32),
+ ScanData: make(map[string]ScanData[model.Vulnerability]),
+ }
+ nodeKey := "most_exploitable_vulnerabilities"
+ nodeWiseData.SeverityCount[nodeKey] = make(map[string]int32)
+ nodeWiseData.ScanData[nodeKey] = ScanData[model.Vulnerability]{ScanResults: entries}
+ sevMap := nodeWiseData.SeverityCount[nodeKey]
+ for _, entry := range entries {
+ count, present := sevMap[entry.Cve_severity]
+ if !present {
+ count = 1
+ } else {
+ count += 1
+ }
+ sevMap[entry.Cve_severity] = count
+ }
+
+ data := Info[model.Vulnerability]{
+ ScanType: VULNERABILITY,
+ Title: "Vulnerability Scan Report",
+ StartTime: start.Format(time.RFC3339),
+ EndTime: end.Format(time.RFC3339),
+ AppliedFilters: updateFilters(ctx, params.Filters),
+ NodeWiseData: nodeWiseData,
+ }
+
+ return &data, nil
+}
+
func getSecretData(ctx context.Context, params sdkUtils.ReportParams) (*Info[model.Secret], error) {
searchFilter := searchScansFilter(params)
diff --git a/deepfence_worker/tasks/reports/reports.go b/deepfence_worker/tasks/reports/reports.go
index 34a3dcb98d..19588eecff 100644
--- a/deepfence_worker/tasks/reports/reports.go
+++ b/deepfence_worker/tasks/reports/reports.go
@@ -114,7 +114,7 @@ func GenerateReport(ctx context.Context, task *asynq.Task) error {
reportName := path.Join("/report", reportFileName(params))
res, err := mc.UploadLocalFile(ctx, reportName,
- localReportPath, putOpts(sdkUtils.ReportType(params.ReportType)))
+ localReportPath, false, putOpts(sdkUtils.ReportType(params.ReportType)))
if err != nil {
log.Error().Err(err).Msg("failed to upload file to minio")
return nil
diff --git a/deepfence_worker/tasks/reports/xlsx.go b/deepfence_worker/tasks/reports/xlsx.go
index 7b47672266..030ac9bb54 100644
--- a/deepfence_worker/tasks/reports/xlsx.go
+++ b/deepfence_worker/tasks/reports/xlsx.go
@@ -3,6 +3,7 @@ package reports
import (
"context"
"os"
+ "time"
"github.com/deepfence/ThreatMapper/deepfence_utils/log"
"github.com/deepfence/ThreatMapper/deepfence_utils/utils"
@@ -119,7 +120,10 @@ func xlsxSave(xlsx *excelize.File, params utils.ReportParams) (string, error) {
func xlsxSetHeader(xlsx *excelize.File, sheet string, headers map[string]string) {
for k, v := range headers {
- xlsx.SetCellValue(sheet, k, v)
+ err := xlsx.SetCellValue(sheet, k, v)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
@@ -141,13 +145,14 @@ func vulnerabilityXLSX(ctx context.Context, params utils.ReportParams) (string,
offset := 0
for _, nodeScanData := range data.NodeWiseData.ScanData {
+ updatedAt := time.UnixMilli(nodeScanData.ScanInfo.UpdatedAt).String()
for i, v := range nodeScanData.ScanResults {
cellName, err := excelize.CoordinatesToCellName(1, offset+i+2)
if err != nil {
log.Error().Err(err).Msg("error generating cell name")
}
value := []interface{}{
- nodeScanData.ScanInfo.UpdatedAt,
+ updatedAt,
v.Cve_attack_vector,
v.Cve_caused_by_package,
nodeScanData.ScanInfo.NodeName,
@@ -165,7 +170,10 @@ func vulnerabilityXLSX(ctx context.Context, params utils.ReportParams) (string,
nodeScanData.ScanInfo.CloudAccountID,
v.Masked,
}
- xlsx.SetSheetRow("Sheet1", cellName, &value)
+ err = xlsx.SetSheetRow("Sheet1", cellName, &value)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
offset = offset + len(nodeScanData.ScanResults)
}
@@ -207,7 +215,10 @@ func secretXLSX(ctx context.Context, params utils.ReportParams) (string, error)
nodeScanData.ScanInfo.KubernetesClusterName,
s.SignatureToMatch,
}
- xlsx.SetSheetRow("Sheet1", cellName, &value)
+ err = xlsx.SetSheetRow("Sheet1", cellName, &value)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
offset = offset + len(nodeScanData.ScanResults)
}
@@ -251,7 +262,10 @@ func malwareXLSX(ctx context.Context, params utils.ReportParams) (string, error)
nodeScanData.ScanInfo.KubernetesClusterName,
nodeScanData.ScanInfo.NodeType,
}
- xlsx.SetSheetRow("Sheet1", cellName, &value)
+ err = xlsx.SetSheetRow("Sheet1", cellName, &value)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
offset = offset + len(nodeScanData.ScanResults)
}
@@ -277,13 +291,14 @@ func complianceXLSX(ctx context.Context, params utils.ReportParams) (string, err
offset := 0
for _, nodeScanData := range data.NodeWiseData.ScanData {
+ updatedAt := time.UnixMilli(nodeScanData.ScanInfo.UpdatedAt).String()
for i, c := range nodeScanData.ScanResults {
cellName, err := excelize.CoordinatesToCellName(1, offset+i+2)
if err != nil {
log.Error().Err(err).Msg("error generating cell name")
}
value := []interface{}{
- nodeScanData.ScanInfo.UpdatedAt,
+ updatedAt,
c.ComplianceCheckType,
"",
"",
@@ -299,7 +314,10 @@ func complianceXLSX(ctx context.Context, params utils.ReportParams) (string, err
c.TestInfo,
c.TestNumber,
}
- xlsx.SetSheetRow("Sheet1", cellName, &value)
+ err = xlsx.SetSheetRow("Sheet1", cellName, &value)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
offset = offset + len(nodeScanData.ScanResults)
}
@@ -324,13 +342,14 @@ func cloudComplianceXLSX(ctx context.Context, params utils.ReportParams) (string
xlsxSetHeader(xlsx, "Sheet1", complianceHeader)
for _, data := range data.NodeWiseData.ScanData {
+ updatedAt := time.UnixMilli(data.ScanInfo.UpdatedAt).String()
for i, c := range data.ScanResults {
cellName, err := excelize.CoordinatesToCellName(1, i+2)
if err != nil {
log.Error().Err(err).Msg("error generating cell name")
}
value := []interface{}{
- data.ScanInfo.UpdatedAt,
+ updatedAt,
c.ComplianceCheckType,
"",
"",
@@ -346,7 +365,10 @@ func cloudComplianceXLSX(ctx context.Context, params utils.ReportParams) (string
c.Title,
c.ControlID,
}
- xlsx.SetSheetRow("Sheet1", cellName, &value)
+ err = xlsx.SetSheetRow("Sheet1", cellName, &value)
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
}
}
diff --git a/deepfence_worker/tasks/sbom/generate_sbom.go b/deepfence_worker/tasks/sbom/generate_sbom.go
index 240c18381d..7c8a104e61 100644
--- a/deepfence_worker/tasks/sbom/generate_sbom.go
+++ b/deepfence_worker/tasks/sbom/generate_sbom.go
@@ -7,7 +7,6 @@ import (
"encoding/json"
"os"
"path"
- "strings"
"sync"
"time"
@@ -177,7 +176,10 @@ func (s SbomGenerator) GenerateSbom(ctx context.Context, task *asynq.Task) error
log.Debug().Msgf("config: %+v", cfg)
- scanCtx.Checkpoint("Before generating SBOM")
+ err = scanCtx.Checkpoint("Before generating SBOM")
+ if err != nil {
+ return err
+ }
rawSbom, err := syft.GenerateSBOM(scanCtx.Context, cfg)
if err != nil {
@@ -193,7 +195,10 @@ func (s SbomGenerator) GenerateSbom(ctx context.Context, task *asynq.Task) error
}
gzipwriter.Close()
- scanCtx.Checkpoint("Before storing to minio")
+ err = scanCtx.Checkpoint("Before storing to minio")
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
// upload sbom to minio
mc, err := directory.MinioClient(ctx)
@@ -203,39 +208,11 @@ func (s SbomGenerator) GenerateSbom(ctx context.Context, task *asynq.Task) error
}
sbomFile := path.Join("/sbom/", utils.ScanIdReplacer.Replace(params.ScanId)+".json.gz")
- info, err := mc.UploadFile(ctx, sbomFile, gzpb64Sbom.Bytes(),
+ info, err := mc.UploadFile(ctx, sbomFile, gzpb64Sbom.Bytes(), true,
minio.PutObjectOptions{ContentType: "application/gzip"})
-
if err != nil {
- logError := true
- if strings.Contains(err.Error(), "Already exists here") {
- /*If the file already exists, we will delete the old file and upload the new one
- File can exists in 2 conditions:
- - When the earlier scan was stuck during the scan phase
- - When the service was restarted
- - Bug/Race conditon in the worker service
- */
- log.Warn().Msg(err.Error() + ", Will try to overwrite the file: " + sbomFile)
- err = mc.DeleteFile(ctx, sbomFile, true, minio.RemoveObjectOptions{ForceDelete: true})
- if err == nil {
- info, err = mc.UploadFile(ctx, sbomFile, gzpb64Sbom.Bytes(),
- minio.PutObjectOptions{ContentType: "application/gzip"})
-
- if err == nil {
- log.Info().Msgf("Successfully overwritten the file: %s", sbomFile)
- logError = false
- } else {
- log.Error().Msgf("Failed to upload the file, error is: %v", err)
- }
- } else {
- log.Error().Msgf("Failed to delete the old file, error is: %v", err)
- }
- }
-
- if logError == true {
- log.Error().Msg(err.Error())
- return err
- }
+ log.Error().Err(err).Msg("failed to uplaod sbom")
+ return err
}
log.Info().Msgf("sbom file uploaded %+v", info)
@@ -250,7 +227,7 @@ func (s SbomGenerator) GenerateSbom(ctx context.Context, task *asynq.Task) error
return nil
}
- err = worker.Enqueue(utils.ScanSBOMTask, payload)
+ err = worker.Enqueue(utils.ScanSBOMTask, payload, utils.DefaultTaskOpts()...)
if err != nil {
return err
}
diff --git a/deepfence_worker/tasks/sbom/scan_sbom.go b/deepfence_worker/tasks/sbom/scan_sbom.go
index 756f74db71..8702b5e36a 100644
--- a/deepfence_worker/tasks/sbom/scan_sbom.go
+++ b/deepfence_worker/tasks/sbom/scan_sbom.go
@@ -9,7 +9,6 @@ import (
"io"
"os"
"path"
- "regexp"
"time"
"github.com/anchore/syft/syft/formats"
@@ -29,7 +28,6 @@ import (
)
var (
- attackVectorRegex = regexp.MustCompile(`.*av:n.*`)
grypeConfig = "/usr/local/bin/grype.yaml"
grypeBin = "grype"
minioHost = utils.GetEnvOrDefault("DEEPFENCE_MINIO_HOST", "deepfence-file-server")
@@ -230,7 +228,7 @@ func (s SbomParser) ScanSBOM(ctx context.Context, task *asynq.Task) error {
}
runtimeSbomPath := path.Join("/sbom/", "runtime-"+utils.ScanIdReplacer.Replace(params.ScanId)+".json")
- uploadInfo, err := mc.UploadFile(context.Background(), runtimeSbomPath, runtimeSbomBytes,
+ uploadInfo, err := mc.UploadFile(context.Background(), runtimeSbomPath, runtimeSbomBytes, true,
minio.PutObjectOptions{ContentType: "application/json"})
if err != nil {
log.Error().Err(err).Msgf("failed to upload runtime sbom")
diff --git a/deepfence_worker/tasks/secretscan/secretscan.go b/deepfence_worker/tasks/secretscan/secretscan.go
index 034e4ba283..6035c5a897 100644
--- a/deepfence_worker/tasks/secretscan/secretscan.go
+++ b/deepfence_worker/tasks/secretscan/secretscan.go
@@ -127,7 +127,10 @@ func (s SecretScan) StartSecretScan(ctx context.Context, task *asynq.Task) error
}()
// send inprogress status
- scanCtx.Checkpoint("After initialization")
+ err = scanCtx.Checkpoint("After initialization")
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
// get registry credentials
authDir, creds, err := workerUtils.GetConfigFileFromRegistry(ctx, params.RegistryId)
@@ -186,7 +189,10 @@ func (s SecretScan) StartSecretScan(ctx context.Context, task *asynq.Task) error
return nil
}
- scanCtx.Checkpoint("After skopeo download")
+ err = scanCtx.Checkpoint("After skopeo download")
+ if err != nil {
+ log.Error().Msg(err.Error())
+ }
// init secret scan
scanResult, err := secretScan.ExtractAndScanFromTar(dir, imageName, scanCtx)
diff --git a/deepfence_worker/utils/registry.go b/deepfence_worker/utils/registry.go
index 542518048d..55157d3850 100644
--- a/deepfence_worker/utils/registry.go
+++ b/deepfence_worker/utils/registry.go
@@ -542,39 +542,3 @@ func createAuthFile(registryId, registryUrl, username, password string) (string,
}
return authFilePath, nil
}
-
-func getEcrCredentials(awsAccessKey, awsSecret, awsRegionName, registryId string, useIAMRole bool, targetAccountRoleARN string) (string, string) {
- var awsConfig aws.Config
- var svc *ecr.ECR
- var creds *credentials.Credentials
-
- if !useIAMRole {
- awsConfig.WithCredentials(credentials.NewStaticCredentials(awsAccessKey, awsSecret, ""))
- }
- mySession := session.Must(session.NewSession(&awsConfig))
-
- if useIAMRole {
- creds = stscreds.NewCredentials(mySession, targetAccountRoleARN)
- svc = ecr.New(mySession, &aws.Config{
- Credentials: creds,
- Region: &awsRegionName,
- })
- } else {
- svc = ecr.New(mySession, aws.NewConfig().WithRegion(awsRegionName))
- }
-
- var authorizationTokenRequestInput ecr.GetAuthorizationTokenInput
- if registryId != "" {
- authorizationTokenRequestInput.SetRegistryIds([]*string{®istryId})
- }
- authorizationTokenResponse, err := svc.GetAuthorizationToken(&authorizationTokenRequestInput)
- if err != nil {
- return "", ""
- }
- authorizationData := authorizationTokenResponse.AuthorizationData
- if len(authorizationData) == 0 {
- return "", ""
- }
- authData := *authorizationData[0]
- return *authData.ProxyEndpoint, *authData.AuthorizationToken
-}
diff --git a/deepfence_worker/worker.go b/deepfence_worker/worker.go
index 635810dc33..eb0530a3cf 100644
--- a/deepfence_worker/worker.go
+++ b/deepfence_worker/worker.go
@@ -22,6 +22,14 @@ import (
"github.com/twmb/franz-go/pkg/kgo"
)
+var (
+ DefaultQueues = map[string]int{
+ utils.Q_CRITICAL: 6,
+ utils.Q_DEFAULT: 3,
+ utils.Q_LOW: 1,
+ }
+)
+
type Worker struct {
cfg config
mux *asynq.ServeMux
@@ -71,13 +79,12 @@ func skipRetryCallbackWrapper(taskCallback wtils.WorkerHandler) wtils.WorkerHand
func (w *Worker) AddRetryableHandler(
task string,
taskCallback wtils.WorkerHandler,
-) error {
+) {
w.mux.HandleFunc(
task,
contextInjectorCallbackWrapper(w.namespace,
telemetryCallbackWrapper(task, taskCallback)),
)
- return nil
}
// CronJobHandler do not retry on failure
@@ -85,14 +92,13 @@ func (w *Worker) AddRetryableHandler(
func (w *Worker) AddOneShotHandler(
task string,
taskCallback wtils.WorkerHandler,
-) error {
+) {
w.mux.HandleFunc(
task,
skipRetryCallbackWrapper(
contextInjectorCallbackWrapper(w.namespace,
telemetryCallbackWrapper(task, taskCallback))),
)
- return nil
}
func NewWorker(ns directory.NamespaceID, cfg config) (Worker, context.CancelFunc, error) {
@@ -106,28 +112,43 @@ func NewWorker(ns directory.NamespaceID, cfg config) (Worker, context.CancelFunc
ingestC := make(chan *kgo.Record, 10000)
go utils.StartKafkaProducer(kafkaCtx, cfg.KafkaBrokers, ingestC)
+ // worker config
+ qCfg := asynq.Config{
+ StrictPriority: true,
+ Concurrency: cfg.TasksConcurrency,
+ ErrorHandler: asynq.ErrorHandlerFunc(func(ctx context.Context, task *asynq.Task, err error) {
+ retried, _ := asynq.GetRetryCount(ctx)
+ maxRetry, _ := asynq.GetMaxRetry(ctx)
+ if retried >= maxRetry {
+ err = fmt.Errorf("retry exhausted for task %s: %w", task.Type(), err)
+ }
+ log.Error().Err(err).Msgf("worker task %s, payload: %s", task.Type(), task.Payload())
+ }),
+ }
+
+ if len(cfg.ProcessQueues) > 0 {
+ log.Info().Msgf("process mesages from queues %s", cfg.ProcessQueues)
+ processQueues := map[string]int{}
+ for _, qName := range cfg.ProcessQueues {
+ if val, found := DefaultQueues[qName]; found {
+ processQueues[qName] = val
+ } else {
+ log.Error().Msgf("unknown queue name %s", qName)
+ }
+ }
+ qCfg.Queues = processQueues
+ } else {
+ log.Info().Msg("process messages from all queues")
+ qCfg.Queues = DefaultQueues
+ }
+
srv := asynq.NewServer(
asynq.RedisClientOpt{
Addr: fmt.Sprintf("%s:%s", cfg.RedisHost, cfg.RedisPort),
DB: cfg.RedisDbNumber,
Password: cfg.RedisPassword,
},
- asynq.Config{
- Concurrency: 10,
- Queues: map[string]int{
- "critical": 6,
- "default": 3,
- "low": 1,
- },
- ErrorHandler: asynq.ErrorHandlerFunc(func(ctx context.Context, task *asynq.Task, err error) {
- retried, _ := asynq.GetRetryCount(ctx)
- maxRetry, _ := asynq.GetMaxRetry(ctx)
- if retried >= maxRetry {
- err = fmt.Errorf("retry exhausted for task %s: %w", task.Type(), err)
- }
- log.Error().Msgf("worker task error: %v", err)
- }),
- },
+ qCfg,
)
mux := asynq.NewServeMux()
diff --git a/deployment-scripts/docker-compose.yml b/deployment-scripts/docker-compose.yml
index ecef94a67c..5f15336dde 100644
--- a/deployment-scripts/docker-compose.yml
+++ b/deployment-scripts/docker-compose.yml
@@ -37,6 +37,9 @@ x-service-variables: &common-creds
DF_ENABLE_DEBUG: ""
DEEPFENCE_ENABLE_PPROF: ""
DEEPFENCE_TELEMETRY_ENABLED: "true"
+ DEEPFENCE_TASKS_CONCURRENCY: 50
+ DEEPFENCE_TASKS_MAX_RETRIES: 3
+ DEEPFENCE_MAX_SCAN_WORKLOAD: 5
services:
@@ -363,6 +366,23 @@ services:
max-size: "100m"
restart: unless-stopped
+ # deepfence-asynq-mon:
+ # image: ${IMAGE_REPOSITORY:-deepfenceio}/asynqmon:latest
+ # hostname: deepfence-asynq-mon
+ # container_name: deepfence-asynq-mon
+ # environment:
+ # - PORT=8090
+ # - REDIS_ADDR=deepfence-redis:6379
+ # ports:
+ # - "8090:8090"
+ # networks:
+ # - deepfence_net
+ # logging:
+ # driver: "json-file"
+ # options:
+ # max-size: "100m"
+ # restart: unless-stopped
+
volumes:
deepfence_data:
driver: local
diff --git a/docker_builders/Dockerfile-alpine b/docker_builders/Dockerfile-alpine
index f8f121c258..8b7a852586 100644
--- a/docker_builders/Dockerfile-alpine
+++ b/docker_builders/Dockerfile-alpine
@@ -59,3 +59,5 @@ RUN mkdir /home/deepfence
COPY deepfence_agent/build_scripts/*.sh /home/deepfence/
RUN apk add cmake fts-dev openssl-dev openssl-libs-static
+
+RUN go install github.com/hibiken/asynq/tools/asynq@latest
diff --git a/docker_builders/Dockerfile-debian b/docker_builders/Dockerfile-debian
index ac4c2f4bb0..0de35593f1 100644
--- a/docker_builders/Dockerfile-debian
+++ b/docker_builders/Dockerfile-debian
@@ -1,7 +1,7 @@
ARG DF_IMG_TAG=latest
ARG IMAGE_REPOSITORY=deepfenceio
-FROM golang:1.20-bookworm
+FROM golang:1.21-bookworm
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update
diff --git a/docs/docs/architecture/cloudscanner.md b/docs/docs/architecture/cloudscanner.md
index 8a050f1ad7..86ed114a13 100644
--- a/docs/docs/architecture/cloudscanner.md
+++ b/docs/docs/architecture/cloudscanner.md
@@ -25,7 +25,7 @@ Each Cloud Scanner task runs in your cloud environment, gathering inventory and
Cloud Scanner tasks are deployed using the appropriate Terraform module for each cloud, and are configured with the address and API key of your management console. They 'phone home' to your management console and take instructions on demand; they do not listen for remote connections or control.
:::info
-Refer to the Installation Documentation to [Learn how to install Cloud Scanner tasks](/docs/v2.0/cloudscanner)
+Refer to the Installation Documentation to [Learn how to install Cloud Scanner tasks](/docs/cloudscanner)
:::
diff --git a/docs/docs/architecture/sensors.md b/docs/docs/architecture/sensors.md
index 13bd0b3c1f..f762d3fddb 100644
--- a/docs/docs/architecture/sensors.md
+++ b/docs/docs/architecture/sensors.md
@@ -14,5 +14,5 @@ The sensors support the following production platforms:
* **AWS Fargate** The sensor is deployed as a daemon service alongside each serverless instance.
:::info
-Refer to the Installation Documentation to [Learn how to install Sensor Agents](/docs/v2.0/sensors)
+Refer to the Installation Documentation to [Learn how to install Sensor Agents](/docs/sensors)
:::
\ No newline at end of file
diff --git a/docs/docs/cloudscanner/aws.md b/docs/docs/cloudscanner/aws.md
index 1255c9a1ee..afb7a972c2 100644
--- a/docs/docs/cloudscanner/aws.md
+++ b/docs/docs/cloudscanner/aws.md
@@ -102,7 +102,7 @@ Controls are grouped into **benchmarks**. Where multiple benchmarks are availabl
When you run a compliance scan, you can select which benchmarks you wish to measure against, and ThreatMapper will then evaluate the appropriate controls and present the results, by benchmark, once the scan has completed.
-For full information, refer to [Operations: Compliance Scanning](/docs/v2.0/operations/compliance).
+For full information, refer to [Operations: Compliance Scanning](/docs/operations/compliance).
:::tip Maximizing Coverage
For maximum coverage, you can use both Cloud Scanner and local Sensor Agent compliance scans together. You could scan your AWS infrastructure using Cloud Scanner, and [scan selected VMs deployed within AWS](other) using the Sensor Agent.
diff --git a/docs/docs/cloudscanner/azure.md b/docs/docs/cloudscanner/azure.md
index 239af84344..5b65f996e0 100644
--- a/docs/docs/cloudscanner/azure.md
+++ b/docs/docs/cloudscanner/azure.md
@@ -45,7 +45,7 @@ Controls are grouped into **benchmarks**. Where multiple benchmarks are availabl
When you run a compliance scan, you can select which benchmarks you wish to measure against, and ThreatMapper will then evaluate the appropriate controls and present the results, by benchmark, once the scan has completed.
-For full information, refer to [Operations: Compliance Scanning](/docs/v2.0/operations/compliance).
+For full information, refer to [Operations: Compliance Scanning](/docs/operations/compliance).
:::tip Maximizing Coverage
For maximum coverage, you can use both Cloud Scanner and local Sensor Agent compliance scans together. You could scan your Azure infrastructure using Cloud Scanner, and [scan selected VMs deployed within Azure](other) using the Sensor Agent.
diff --git a/docs/docs/cloudscanner/gcp.md b/docs/docs/cloudscanner/gcp.md
index 3bdc080832..77fb7cb6f0 100644
--- a/docs/docs/cloudscanner/gcp.md
+++ b/docs/docs/cloudscanner/gcp.md
@@ -70,7 +70,7 @@ Controls are grouped into **benchmarks**. Where multiple benchmarks are availabl
When you run a compliance scan, you can select which benchmarks you wish to measure against, and ThreatMapper will then evaluate the appropriate controls and present the results, by benchmark, once the scan has completed.
-For full information, refer to [Operations: Compliance Scanning](/docs/v2.0/operations/compliance).
+For full information, refer to [Operations: Compliance Scanning](/docs/operations/compliance).
:::tip Maximizing Coverage
For maximum coverage, you can use both Cloud Scanner and local Sensor Agent compliance scans together. You could scan your GCP infrastructure using Cloud Scanner, and [scan selected VMs deployed within GCP](other) using the Sensor Agent.
diff --git a/docs/docs/cloudscanner/index.md b/docs/docs/cloudscanner/index.md
index 5e02371c47..e77634c98e 100644
--- a/docs/docs/cloudscanner/index.md
+++ b/docs/docs/cloudscanner/index.md
@@ -10,7 +10,7 @@ The Cloud Scanner task interacts with the local cloud APIs under the instruction
## Before You Begin
-Review the architecture for compliance scanning, as described in [Architecture: Cloud Scanner task](/docs/v2.0/architecture/cloudscanner).
+Review the architecture for compliance scanning, as described in [Architecture: Cloud Scanner task](/docs/architecture/cloudscanner).
## Configuring Cloud Posture Management
diff --git a/docs/docs/cloudscanner/other.md b/docs/docs/cloudscanner/other.md
index 9074a6e04c..10558a569a 100644
--- a/docs/docs/cloudscanner/other.md
+++ b/docs/docs/cloudscanner/other.md
@@ -6,7 +6,7 @@ title: Other Platforms
ThreatMapper can perform compliance posture scanning on linux hosts and Kubernetes master and worker nodes.
-Scanning is done directly, using a local [Sensor Agent](/docs/v2.0/sensors) rather than by using the Cloud Scanner task employed by the cloud platform integrations.
+Scanning is done directly, using a local [Sensor Agent](/docs/sensors) rather than by using the Cloud Scanner task employed by the cloud platform integrations.
## What Compliance Scans are Performed?
@@ -15,7 +15,7 @@ The sensor agent has direct visibility into the configuration of the base operat
When you run a compliance scan, you can select which benchmarks you wish to measure against, and ThreatMapper will then evaluate the appropriate controls and present the results, by benchmark, once the scan has completed.
-For full information, refer to [Operations: Compliance Scanning](/docs/v2.0/operations/compliance).
+For full information, refer to [Operations: Compliance Scanning](/docs/operations/compliance).
:::tip Maximizing Coverage
diff --git a/docs/docs/console/index.md b/docs/docs/console/index.md
index 86a0daa543..a3346ce5fc 100644
--- a/docs/docs/console/index.md
+++ b/docs/docs/console/index.md
@@ -8,9 +8,9 @@ The ThreatMapper Management Console ("Console") is a standalone application, imp
## Before You Begin
-Review the architecture for the Management Console, as described in [Architecture: Management Console](/docs/v2.0/architecture/console).
+Review the architecture for the Management Console, as described in [Architecture: Management Console](/docs/architecture/console).
-Review the requirements for the Management Console, as described in [System Requirements](/docs/v2.0/console/requirements).
+Review the requirements for the Management Console, as described in [System Requirements](/docs/console/requirements).
## Installing the Management Console
diff --git a/docs/docs/console/requirements.md b/docs/docs/console/requirements.md
index b369035efc..85b79f2244 100644
--- a/docs/docs/console/requirements.md
+++ b/docs/docs/console/requirements.md
@@ -9,12 +9,12 @@ The Management Console may be installed on a single Docker host or in a dedicate
* A Docker Host is suitable for small-scale deployments, managing up to several hundred production nodes
* A Kubernetes Cluster is suitable for small and large-scale deployments
-| Feature | Requirements (Docker) | Requirements (Kubernetes) |
-|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------|
-| CPU: No of cores | 4 (Recommended: 8) | 3 nodes, 4 cores each |
-| RAM | 16 GB | 3 nodes, 8 GB each |
-| Telemetry and data from Deepfence Sensors | Port 443 (configurable), firewalled | Port 443 (configurable), firewalled |
-| Administrative and API access | Port 443 (configurable), firewalled | Port 443 (configurable), firewalled |
-| Docker | *Version 20.10.18 (minimum version 18.06.0) |
+| Feature | Requirements (Docker) | Requirements (Kubernetes) |
+|-------------------------------------------|---------------------------------------------|-------------------------------------|
+| CPU: No of cores | 8 cores | 3 nodes, 4 cores each |
+| RAM | 16 GB | 3 nodes, 8 GB each |
+| Telemetry and data from Deepfence Sensors | Port 443 (configurable), firewalled | Port 443 (configurable), firewalled |
+| Administrative and API access | Port 443 (configurable), firewalled | Port 443 (configurable), firewalled |
+| Docker | *Version 20.10.18 (minimum version 18.06.0) | |
Larger deployments, managing 250 or more production nodes, will require additional CPU and RAM resources. For enterprise-scale deployments, managing 1000+ production nodes, the ThreatMapper Console should be deployed on a Kubernetes cluster of 3 or more nodes.
diff --git a/docs/docs/developers/build.md b/docs/docs/developers/build.md
index 95830d45e8..cbf7b82c01 100644
--- a/docs/docs/developers/build.md
+++ b/docs/docs/developers/build.md
@@ -33,7 +33,7 @@ cd ThreatMapper/deployment-scripts
docker-compose -f docker-compose.yml up --detach
```
-Once started, you can point a web browser at `https://--IP-ADDRESS---/` to register a first user on the Deepfence Management Console. See [Initial Configuration](/docs/v2.0/console/initial-configuration) for more information.
+Once started, you can point a web browser at `https://--IP-ADDRESS---/` to register a first user on the Deepfence Management Console. See [Initial Configuration](/docs/console/initial-configuration) for more information.
To stop the Deepfence Management Console:
diff --git a/docs/docs/developers/deploy-agent.md b/docs/docs/developers/deploy-agent.md
index 31da6de932..0abc028dd9 100644
--- a/docs/docs/developers/deploy-agent.md
+++ b/docs/docs/developers/deploy-agent.md
@@ -4,13 +4,13 @@ title: Deploy Sensors
# Deploy custom ThreatMapper Sensor Agents
-You should first [build the management console and agents](build) and push the images to a suitable repository. You can then adapt the standard installation instructions ([Docker](/docs/v2.0/sensors/docker), [Kubernetes](/docs/v2.0/sensors/kubernetes)) to refer to your custom images rather than the Deepfence-provided ones.
+You should first [build the management console and agents](build) and push the images to a suitable repository. You can then adapt the standard installation instructions ([Docker](/docs/sensors/docker), [Kubernetes](/docs/sensors/kubernetes)) to refer to your custom images rather than the Deepfence-provided ones.
## Installing and Running the Sensor Agents on a Docker Host
:::tip
-Refer to the [Docker Installation Instructions](/docs/v2.0/sensors/docker) along with the modifications below.
+Refer to the [Docker Installation Instructions](/docs/sensors/docker) along with the modifications below.
:::
Execute the following command to install and start the sensors:
@@ -42,7 +42,7 @@ docker run -dit \
## Installing and Running the Sensor Agents in a Kubernetes Cluster
:::tip
-Refer to the [Kubernetes Installation Instructions](/docs/v2.0/sensors/kubernetes) along with the modifications below.
+Refer to the [Kubernetes Installation Instructions](/docs/sensors/kubernetes) along with the modifications below.
:::
You can use these instructions for helm-based installations in standalone and hosted Kubernetes clusters
diff --git a/docs/docs/developers/deploy-console.md b/docs/docs/developers/deploy-console.md
index 77dd5f17d9..0c1505039b 100644
--- a/docs/docs/developers/deploy-console.md
+++ b/docs/docs/developers/deploy-console.md
@@ -4,14 +4,14 @@ title: Deploy Console
# Deploy a custom ThreatMapper Console
-You should first [build the management console](build) and push the images to a suitable repository. You can then adapt the standard installation instructions ([Docker](/docs/v2.0/console/docker), [Kubernetes](/docs/v2.0/console/kubernetes)) to refer to your custom images rather than the Deepfence-provided ones.
+You should first [build the management console](build) and push the images to a suitable repository. You can then adapt the standard installation instructions ([Docker](/docs/console/docker), [Kubernetes](/docs/console/kubernetes)) to refer to your custom images rather than the Deepfence-provided ones.
## Installing and Running the Management Console on a Docker Host
:::tip
-Refer to the [Docker Installation Instructions](/docs/v2.0/console/docker) along with the modifications below.
+Refer to the [Docker Installation Instructions](/docs/console/docker) along with the modifications below.
:::
1. Download the file [docker-compose.yml](https://github.com/deepfence/ThreatMapper/blob/release-2.0/deployment-scripts/docker-compose.yml) to the system that will host the Console
@@ -31,12 +31,12 @@ Refer to the [Docker Installation Instructions](/docs/v2.0/console/docker) along
## Installing and Running the Management Console in a Kubernetes Cluster
:::tip
-Refer to the [Kubernetes Installation Instructions](/docs/v2.0/console/kubernetes) along with the modifications below.
+Refer to the [Kubernetes Installation Instructions](/docs/console/kubernetes) along with the modifications below.
:::
1. Prepare the cluster, installing the storage driver and metrics service
- Follow the instructions to install the OpenEBS storage and metrics server: [Installation Instructions](/docs/v2.0/console/kubernetes)
+ Follow the instructions to install the OpenEBS storage and metrics server: [Installation Instructions](/docs/console/kubernetes)
2. Install your Management Console
diff --git a/docs/docs/integrations/index.md b/docs/docs/integrations/index.md
index e097f40eec..38927d15bc 100644
--- a/docs/docs/integrations/index.md
+++ b/docs/docs/integrations/index.md
@@ -4,7 +4,7 @@ title: Integrations
# Integrations
-You can integrate ThreatMapper with a variety of notification services. Any time a new vulnerability is detected (for example, during [CI](/docs/v2.0/operations/scanning-ci) or an [automated scan](/docs/v2.0/operations/scanning)), ThreatMapper will submit the details to the configured notification services.
+You can integrate ThreatMapper with a variety of notification services. Any time a new vulnerability is detected (for example, during [CI](/docs/operations/scanning-ci) or an [automated scan](/docs/operations/scanning)), ThreatMapper will submit the details to the configured notification services.
| ![Integrations](../img/integrations.png) |
|:----------------------------------------------:|
diff --git a/docs/docs/kubernetes-scanner/index.md b/docs/docs/kubernetes-scanner/index.md
index 5666f3c7e2..99b9099db7 100644
--- a/docs/docs/kubernetes-scanner/index.md
+++ b/docs/docs/kubernetes-scanner/index.md
@@ -10,4 +10,4 @@ NSA & CISA Cybersecurity Technical Report describes the complexities of securely
## Configuring Kubernetes Scanner
-Deepfence Kubernetes Scanner is installed with agent sensors. Follow the documentation [here](/docs/v2.0/sensors/kubernetes) to install Deepfence agent sensors in the kubernetes cluster.
\ No newline at end of file
+Deepfence Kubernetes Scanner is installed with agent sensors. Follow the documentation [here](/docs/sensors/kubernetes) to install Deepfence agent sensors in the kubernetes cluster.
\ No newline at end of file
diff --git a/docs/docs/operations/compliance.md b/docs/docs/operations/compliance.md
index a970664bda..15c2755dc8 100644
--- a/docs/docs/operations/compliance.md
+++ b/docs/docs/operations/compliance.md
@@ -38,7 +38,7 @@ The benchmarks available vary by cloud provider:
Begin on the **Posture** page in the ThreatMapper console.
-Select a cloud instance that you have [configured previously](/docs/v2.0/cloudscanner/). You may have several instances of a given cloud type:
+Select a cloud instance that you have [configured previously](/docs/cloudscanner/). You may have several instances of a given cloud type:
| ![Cloud Compliance Scan - Select](../img/compliance-scan-1.png) |
|:---------------------------------------------------------------:|
@@ -48,7 +48,7 @@ Select a cloud instance that you have [configured previously](/docs/v2.0/cloudsc
|:---------------------------------------------------------------:|
| Select target for Cloud Compliance Scan |
-If you want to scan a host (Linux host or Kubernetes master or slave node), ensure that the [threatmapper sensor](/docs/v2.0/sensors) is deployed on that host.
+If you want to scan a host (Linux host or Kubernetes master or slave node), ensure that the [threatmapper sensor](/docs/sensors) is deployed on that host.
Select the compliance benchmarks you wish to run on the target cloud instance or host:
diff --git a/docs/docs/sensors/docker.md b/docs/docs/sensors/docker.md
index ac731faed2..94a2831ced 100644
--- a/docs/docs/sensors/docker.md
+++ b/docs/docs/sensors/docker.md
@@ -6,7 +6,7 @@ title: Docker
On a Linux-based Docker host, the ThreatMapper agents are deployed as a lightweight container.
-Install a docker runtime on the Linux host. Refer to the [Prerequisites for the Sensor Agents](/docs/v2.0/architecture#threatmapper-sensor-containers) for minimum supported platforms.
+Install a docker runtime on the Linux host. Refer to the [Prerequisites for the Sensor Agents](/docs/architecture#threatmapper-sensor-containers) for minimum supported platforms.
For Windows Server hosts, experimental support exists, but it is not suitable for production use.
diff --git a/docs/docs/sensors/index.md b/docs/docs/sensors/index.md
index 88889c6989..caf097ea49 100644
--- a/docs/docs/sensors/index.md
+++ b/docs/docs/sensors/index.md
@@ -10,11 +10,11 @@ A single ThreatMapper Console can manage multiple workload types, and on-premise
## Before You Begin
-Before you install the Sensors, obtain the Management Console URL and API key as described in the [Initial Configuration](/docs/v2.0/console/initial-configuration).
+Before you install the Sensors, obtain the Management Console URL and API key as described in the [Initial Configuration](/docs/console/initial-configuration).
You should take care to install the sensor version that matches your Management Console version, as compatibility across versions is not guaranteed.
-Review the architecture for the Sensor Agent, as described in [Architecture: Sensor Agent](/docs/v2.0/architecture/sensors).
+Review the architecture for the Sensor Agent, as described in [Architecture: Sensor Agent](/docs/architecture/sensors).
## System Requirements
diff --git a/docs/docs/sensors/linux-host.md b/docs/docs/sensors/linux-host.md
index 8aefe675b9..d252ec2eaa 100644
--- a/docs/docs/sensors/linux-host.md
+++ b/docs/docs/sensors/linux-host.md
@@ -8,7 +8,7 @@ On a Linux-based bare-metal or virtual machine workload, the ThreatMapper sensor
## ThreatMapper Sensor Agents
-Install a docker runtime on the Linux host. Refer to the [Prerequisites for the Sensor Agents](/docs/v2.0/architecture#threatmapper-sensor-containers) for minimum supported platforms.
+Install a docker runtime on the Linux host. Refer to the [Prerequisites for the Sensor Agents](/docs/architecture#threatmapper-sensor-containers) for minimum supported platforms.
Run the following command to start the Sensor Agent on the host. You can find the Deepfence API key under
`Setting>User Management>API Key`.
diff --git a/docs/docs/tips/automating-scans.md b/docs/docs/tips/automating-scans.md
index 323905760f..e76db5a0e2 100644
--- a/docs/docs/tips/automating-scans.md
+++ b/docs/docs/tips/automating-scans.md
@@ -15,7 +15,7 @@ ThreatMapper can scan your production platforms periodically, using the most up-
## Automating ThreatMapper
-The results of automated scans are added to the **Vulnerability Scans** report, and can be raised through any configured [Notification](/docs/v2.0/integrations) method.
+The results of automated scans are added to the **Vulnerability Scans** report, and can be raised through any configured [Notification](/docs/integrations) method.
ThreatMapper presents a series of APIs that you can use to enumerate nodes and run scans:
diff --git a/docs/vulnerability_feeds/listing.json b/docs/vulnerability_feeds/listing.json
index 219930131b..ae802f7f0a 100644
--- a/docs/vulnerability_feeds/listing.json
+++ b/docs/vulnerability_feeds/listing.json
@@ -2,54 +2,54 @@
"available": {
"3": [
{
- "built": "2023-10-28T00:58:59.985968641Z",
+ "built": "2023-11-05T00:58:35.283561511Z",
"version": 3,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-10-28_00-49-16/threatintel-vuln-v3-2023-10-28_00-49-16.tar.gz",
- "checksum": "d356ea0675d5a34d5d97cac44ce42974e4464ff11a12ac62c1e470cee29d6a55"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-11-05_00-49-38/threatintel-vuln-v3-2023-11-05_00-49-38.tar.gz",
+ "checksum": "cc233d9fccc55753a430eb53b29e091f0abd0135a067003d7fca3ad0ec810e8c"
},
{
- "built": "2023-10-29T01:00:33.546072153Z",
+ "built": "2023-11-06T01:00:41.469960214Z",
"version": 3,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-10-29_00-49-42/threatintel-vuln-v3-2023-10-29_00-49-42.tar.gz",
- "checksum": "378ee6628e54620407c86cb9a4c91ea903dac1abc525288c8691fea2dc4920c5"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-11-06_00-49-52/threatintel-vuln-v3-2023-11-06_00-49-52.tar.gz",
+ "checksum": "25debd575af879adaa6d8c7e11994a7174d148187e28547a74a5922356bedeb9"
},
{
- "built": "2023-10-30T01:00:47.160990196Z",
+ "built": "2023-11-07T00:59:26.741318124Z",
"version": 3,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-10-30_00-49-31/threatintel-vuln-v3-2023-10-30_00-49-31.tar.gz",
- "checksum": "7aed3d4dd014e0b1aa19a821ac66e8f4bd9188f1d57f39663b7d42cca8cb959a"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-11-07_00-49-42/threatintel-vuln-v3-2023-11-07_00-49-42.tar.gz",
+ "checksum": "62717bb183faea1c04812939a084534e73f11603bf678d30456f5136a79c89ab"
},
{
- "built": "2023-10-31T00:59:39.393644376Z",
+ "built": "2023-11-08T00:58:38.298849824Z",
"version": 3,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-10-31_00-49-28/threatintel-vuln-v3-2023-10-31_00-49-28.tar.gz",
- "checksum": "3061847f74d3bf238207c72460d96a9998767e2e4b09fba69e8a06b34381cf27"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v3-2023-11-08_00-49-25/threatintel-vuln-v3-2023-11-08_00-49-25.tar.gz",
+ "checksum": "911b907ef7819be6c4846450d0eb9fff1218f8062d0e370ada488e39432b2880"
}
],
"5": [
{
- "built": "2023-10-28T00:38:17.496648676Z",
+ "built": "2023-11-05T00:37:34.25630835Z",
"version": 5,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-10-28_00-16-00/threatintel-vuln-v5-2023-10-28_00-16-00.tar.gz",
- "checksum": "631969678eeb9c033027793dcba4fa6fe64a585bdcfedf26306f9b30900df147"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-11-05_00-18-24/threatintel-vuln-v5-2023-11-05_00-18-24.tar.gz",
+ "checksum": "edae063a9fe2b8a01396134f6d6b8b6a7ecfaad2a0c1cc9dfd6c53efd77a3f78"
},
{
- "built": "2023-10-29T00:40:17.480567019Z",
+ "built": "2023-11-06T00:32:42.102503386Z",
"version": 5,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-10-29_00-17-58/threatintel-vuln-v5-2023-10-29_00-17-58.tar.gz",
- "checksum": "523a84c849a91904740e497977955bbf13b72935ef55b0018475d27678860cbf"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-11-06_00-17-30/threatintel-vuln-v5-2023-11-06_00-17-30.tar.gz",
+ "checksum": "8691bce0cca7891bdcde47ff9c2a7e1529b9e80a5fb797300268f2c412e86da4"
},
{
- "built": "2023-10-30T00:32:44.023537268Z",
+ "built": "2023-11-07T00:35:54.399286325Z",
"version": 5,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-10-30_00-17-05/threatintel-vuln-v5-2023-10-30_00-17-05.tar.gz",
- "checksum": "e946366ecbe42316fb6d654e197ce754f2b714a75f5c71dd08c0e46cf9dce0e5"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-11-07_00-17-20/threatintel-vuln-v5-2023-11-07_00-17-20.tar.gz",
+ "checksum": "887e6c16de8a531f5b60450647de5e9771989ef1368723c4a67c4c81cddaaf44"
},
{
- "built": "2023-10-31T00:32:45.109015672Z",
+ "built": "2023-11-08T00:41:11.6710128Z",
"version": 5,
- "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-10-31_00-16-53/threatintel-vuln-v5-2023-10-31_00-16-53.tar.gz",
- "checksum": "b7b5f2deea9c5ed299d83d8779c41f85737a3a9678992932941797a0df97ffd7"
+ "url": "https://threat-intel.deepfence.io/vulnerability-db/releases/download/threatintel-vuln-v5-2023-11-08_00-16-56/threatintel-vuln-v5-2023-11-08_00-16-56.tar.gz",
+ "checksum": "cfcfa3aee794a9934397bc83128b87185d59712740324cc9dd841ff5c62e3fd2"
}
]
}
diff --git a/golang_deepfence_sdk b/golang_deepfence_sdk
index 5506162b00..ba841a2c5b 160000
--- a/golang_deepfence_sdk
+++ b/golang_deepfence_sdk
@@ -1 +1 @@
-Subproject commit 5506162b00b7a44da20782337a414a18330954f2
+Subproject commit ba841a2c5b4fc6580afb2e64223188498b110d2f