mirror of
https://github.com/projectdiscovery/nuclei.git
synced 2025-12-29 22:23:02 +00:00
Merge branch 'dev' of https://github.com/projectdiscovery/nuclei into loading-performance-improvements-v2
This commit is contained in:
commit
1b6ae44bb7
8
.github/workflows/tests.yaml
vendored
8
.github/workflows/tests.yaml
vendored
@ -59,9 +59,11 @@ jobs:
|
|||||||
working-directory: examples/simple/
|
working-directory: examples/simple/
|
||||||
# - run: go run . # Temporarily disabled very flaky in github actions
|
# - run: go run . # Temporarily disabled very flaky in github actions
|
||||||
# working-directory: examples/advanced/
|
# working-directory: examples/advanced/
|
||||||
- name: "with Speed Control"
|
|
||||||
run: go run .
|
# TODO: FIX with ExecutionID (ref: https://github.com/projectdiscovery/nuclei/pull/6296)
|
||||||
working-directory: examples/with_speed_control/
|
# - name: "with Speed Control"
|
||||||
|
# run: go run .
|
||||||
|
# working-directory: examples/with_speed_control/
|
||||||
|
|
||||||
integration:
|
integration:
|
||||||
name: "Integration tests"
|
name: "Integration tests"
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
# Build
|
# Build
|
||||||
FROM golang:1.23-alpine AS builder
|
FROM golang:1.24-alpine AS builder
|
||||||
|
|
||||||
RUN apk add build-base
|
RUN apk add build-base
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|||||||
@ -42,8 +42,8 @@ func runFunctionalTests(debug bool) (error, bool) {
|
|||||||
return errors.Wrap(err, "could not open test cases"), true
|
return errors.Wrap(err, "could not open test cases"), true
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
errored, failedTestCases := runTestCases(file, debug)
|
errored, failedTestCases := runTestCases(file, debug)
|
||||||
|
|
||||||
|
|||||||
@ -179,8 +179,8 @@ func (h *headlessFileUpload) Execute(filePath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
content, err := io.ReadAll(file)
|
content, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -238,8 +238,8 @@ func (h *headlessFileUploadNegative) Execute(filePath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
content, err := io.ReadAll(file)
|
content, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -948,8 +948,8 @@ func (h *httpRequestSelfContained) Execute(filePath string) error {
|
|||||||
_ = server.ListenAndServe()
|
_ = server.ListenAndServe()
|
||||||
}()
|
}()
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = server.Close()
|
_ = server.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
results, err := testutils.RunNucleiTemplateAndGetResults(filePath, "", debug, "-esc")
|
results, err := testutils.RunNucleiTemplateAndGetResults(filePath, "", debug, "-esc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -986,8 +986,8 @@ func (h *httpRequestSelfContainedWithParams) Execute(filePath string) error {
|
|||||||
_ = server.ListenAndServe()
|
_ = server.ListenAndServe()
|
||||||
}()
|
}()
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = server.Close()
|
_ = server.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
results, err := testutils.RunNucleiTemplateAndGetResults(filePath, "", debug, "-esc")
|
results, err := testutils.RunNucleiTemplateAndGetResults(filePath, "", debug, "-esc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1021,8 +1021,8 @@ func (h *httpRequestSelfContainedFileInput) Execute(filePath string) error {
|
|||||||
_ = server.ListenAndServe()
|
_ = server.ListenAndServe()
|
||||||
}()
|
}()
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = server.Close()
|
_ = server.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// create temp file
|
// create temp file
|
||||||
FileLoc, err := os.CreateTemp("", "self-contained-payload-*.txt")
|
FileLoc, err := os.CreateTemp("", "self-contained-payload-*.txt")
|
||||||
@ -1033,8 +1033,8 @@ func (h *httpRequestSelfContainedFileInput) Execute(filePath string) error {
|
|||||||
return errorutil.NewWithErr(err).Msgf("failed to write payload to temp file")
|
return errorutil.NewWithErr(err).Msgf("failed to write payload to temp file")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = FileLoc.Close()
|
_ = FileLoc.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
results, err := testutils.RunNucleiTemplateAndGetResults(filePath, "", debug, "-V", "test="+FileLoc.Name(), "-esc")
|
results, err := testutils.RunNucleiTemplateAndGetResults(filePath, "", debug, "-V", "test="+FileLoc.Name(), "-esc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -90,8 +91,8 @@ func main() {
|
|||||||
defer fuzzplayground.Cleanup()
|
defer fuzzplayground.Cleanup()
|
||||||
server := fuzzplayground.GetPlaygroundServer()
|
server := fuzzplayground.GetPlaygroundServer()
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = server.Close()
|
_ = server.Close()
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
if err := server.Start("localhost:8082"); err != nil {
|
if err := server.Start("localhost:8082"); err != nil {
|
||||||
if !strings.Contains(err.Error(), "Server closed") {
|
if !strings.Contains(err.Error(), "Server closed") {
|
||||||
@ -210,7 +211,7 @@ func execute(testCase testutils.TestCase, templatePath string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func expectResultsCount(results []string, expectedNumbers ...int) error {
|
func expectResultsCount(results []string, expectedNumbers ...int) error {
|
||||||
results = filterHeadlessLogs(results)
|
results = filterLines(results)
|
||||||
match := sliceutil.Contains(expectedNumbers, len(results))
|
match := sliceutil.Contains(expectedNumbers, len(results))
|
||||||
if !match {
|
if !match {
|
||||||
return fmt.Errorf("incorrect number of results: %d (actual) vs %v (expected) \nResults:\n\t%s\n", len(results), expectedNumbers, strings.Join(results, "\n\t")) // nolint:all
|
return fmt.Errorf("incorrect number of results: %d (actual) vs %v (expected) \nResults:\n\t%s\n", len(results), expectedNumbers, strings.Join(results, "\n\t")) // nolint:all
|
||||||
@ -224,6 +225,13 @@ func normalizeSplit(str string) []string {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filterLines applies all filtering functions to the results
|
||||||
|
func filterLines(results []string) []string {
|
||||||
|
results = filterHeadlessLogs(results)
|
||||||
|
results = filterUnsignedTemplatesWarnings(results)
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
// if chromium is not installed go-rod installs it in .cache directory
|
// if chromium is not installed go-rod installs it in .cache directory
|
||||||
// this function filters out the logs from download and installation
|
// this function filters out the logs from download and installation
|
||||||
func filterHeadlessLogs(results []string) []string {
|
func filterHeadlessLogs(results []string) []string {
|
||||||
@ -237,3 +245,16 @@ func filterHeadlessLogs(results []string) []string {
|
|||||||
}
|
}
|
||||||
return filtered
|
return filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filterUnsignedTemplatesWarnings filters out warning messages about unsigned templates
|
||||||
|
func filterUnsignedTemplatesWarnings(results []string) []string {
|
||||||
|
filtered := []string{}
|
||||||
|
unsignedTemplatesRegex := regexp.MustCompile(`Loading \d+ unsigned templates for scan\. Use with caution\.`)
|
||||||
|
for _, result := range results {
|
||||||
|
if unsignedTemplatesRegex.MatchString(result) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, result)
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|||||||
@ -68,17 +68,21 @@ func executeNucleiAsLibrary(templatePath, templateURL string) ([]string, error)
|
|||||||
cache := hosterrorscache.New(30, hosterrorscache.DefaultMaxHostsCount, nil)
|
cache := hosterrorscache.New(30, hosterrorscache.DefaultMaxHostsCount, nil)
|
||||||
defer cache.Close()
|
defer cache.Close()
|
||||||
|
|
||||||
|
defaultOpts := types.DefaultOptions()
|
||||||
|
defaultOpts.ExecutionId = "test"
|
||||||
|
|
||||||
mockProgress := &testutils.MockProgressClient{}
|
mockProgress := &testutils.MockProgressClient{}
|
||||||
reportingClient, err := reporting.New(&reporting.Options{}, "", false)
|
reportingClient, err := reporting.New(&reporting.Options{ExecutionId: defaultOpts.ExecutionId}, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer reportingClient.Close()
|
defer reportingClient.Close()
|
||||||
|
|
||||||
defaultOpts := types.DefaultOptions()
|
|
||||||
_ = protocolstate.Init(defaultOpts)
|
_ = protocolstate.Init(defaultOpts)
|
||||||
_ = protocolinit.Init(defaultOpts)
|
_ = protocolinit.Init(defaultOpts)
|
||||||
|
|
||||||
|
defer protocolstate.Close(defaultOpts.ExecutionId)
|
||||||
|
|
||||||
defaultOpts.Templates = goflags.StringSlice{templatePath}
|
defaultOpts.Templates = goflags.StringSlice{templatePath}
|
||||||
defaultOpts.ExcludeTags = config.ReadIgnoreFile().Tags
|
defaultOpts.ExcludeTags = config.ReadIgnoreFile().Tags
|
||||||
|
|
||||||
@ -100,7 +104,7 @@ func executeNucleiAsLibrary(templatePath, templateURL string) ([]string, error)
|
|||||||
ratelimiter := ratelimit.New(context.Background(), 150, time.Second)
|
ratelimiter := ratelimit.New(context.Background(), 150, time.Second)
|
||||||
defer ratelimiter.Stop()
|
defer ratelimiter.Stop()
|
||||||
|
|
||||||
executerOpts := protocols.ExecutorOptions{
|
executerOpts := &protocols.ExecutorOptions{
|
||||||
Output: outputWriter,
|
Output: outputWriter,
|
||||||
Options: defaultOpts,
|
Options: defaultOpts,
|
||||||
Progress: mockProgress,
|
Progress: mockProgress,
|
||||||
@ -116,7 +120,7 @@ func executeNucleiAsLibrary(templatePath, templateURL string) ([]string, error)
|
|||||||
engine := core.New(defaultOpts)
|
engine := core.New(defaultOpts)
|
||||||
engine.SetExecuterOptions(executerOpts)
|
engine.SetExecuterOptions(executerOpts)
|
||||||
|
|
||||||
workflowLoader, err := parsers.NewLoader(&executerOpts)
|
workflowLoader, err := parsers.NewLoader(executerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Could not create workflow loader: %s\n", err)
|
log.Fatalf("Could not create workflow loader: %s\n", err)
|
||||||
}
|
}
|
||||||
@ -128,7 +132,7 @@ func executeNucleiAsLibrary(templatePath, templateURL string) ([]string, error)
|
|||||||
}
|
}
|
||||||
store.Load()
|
store.Load()
|
||||||
|
|
||||||
_ = engine.Execute(context.Background(), store.Templates(), provider.NewSimpleInputProviderWithUrls(templateURL))
|
_ = engine.Execute(context.Background(), store.Templates(), provider.NewSimpleInputProviderWithUrls(defaultOpts.ExecutionId, templateURL))
|
||||||
engine.WorkPool().Wait() // Wait for the scan to finish
|
engine.WorkPool().Wait() // Wait for the scan to finish
|
||||||
|
|
||||||
return results, nil
|
return results, nil
|
||||||
|
|||||||
@ -34,8 +34,8 @@ func (h *networkBasic) Execute(filePath string) error {
|
|||||||
|
|
||||||
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -71,8 +71,8 @@ func (h *networkMultiStep) Execute(filePath string) error {
|
|||||||
|
|
||||||
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
data, err := reader.ConnReadNWithTimeout(conn, 5, time.Duration(5)*time.Second)
|
data, err := reader.ConnReadNWithTimeout(conn, 5, time.Duration(5)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -119,8 +119,8 @@ type networkRequestSelContained struct{}
|
|||||||
func (h *networkRequestSelContained) Execute(filePath string) error {
|
func (h *networkRequestSelContained) Execute(filePath string) error {
|
||||||
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
_, _ = conn.Write([]byte("Authentication successful"))
|
_, _ = conn.Write([]byte("Authentication successful"))
|
||||||
})
|
})
|
||||||
@ -141,8 +141,8 @@ func (h *networkVariables) Execute(filePath string) error {
|
|||||||
|
|
||||||
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
ts := testutils.NewTCPServer(nil, defaultStaticPort, func(conn net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -171,8 +171,8 @@ type networkPort struct{}
|
|||||||
func (n *networkPort) Execute(filePath string) error {
|
func (n *networkPort) Execute(filePath string) error {
|
||||||
ts := testutils.NewTCPServer(nil, 23846, func(conn net.Conn) {
|
ts := testutils.NewTCPServer(nil, 23846, func(conn net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -206,8 +206,8 @@ func (n *networkPort) Execute(filePath string) error {
|
|||||||
// this is positive test case where we expect port to be overridden and 34567 to be used
|
// this is positive test case where we expect port to be overridden and 34567 to be used
|
||||||
ts2 := testutils.NewTCPServer(nil, 34567, func(conn net.Conn) {
|
ts2 := testutils.NewTCPServer(nil, 34567, func(conn net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
data, err := reader.ConnReadNWithTimeout(conn, 4, time.Duration(5)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -16,7 +16,7 @@ var profileLoaderTestcases = []TestCaseInfo{
|
|||||||
type profileLoaderByRelFile struct{}
|
type profileLoaderByRelFile struct{}
|
||||||
|
|
||||||
func (h *profileLoaderByRelFile) Execute(testName string) error {
|
func (h *profileLoaderByRelFile) Execute(testName string) error {
|
||||||
results, err := testutils.RunNucleiWithArgsAndGetResults(false, "-tl", "-tp", "cloud.yml")
|
results, err := testutils.RunNucleiWithArgsAndGetResults(debug, "-tl", "-tp", "cloud.yml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.NewWithErr(err).Msgf("failed to load template with id")
|
return errorutil.NewWithErr(err).Msgf("failed to load template with id")
|
||||||
}
|
}
|
||||||
@ -29,7 +29,7 @@ func (h *profileLoaderByRelFile) Execute(testName string) error {
|
|||||||
type profileLoaderById struct{}
|
type profileLoaderById struct{}
|
||||||
|
|
||||||
func (h *profileLoaderById) Execute(testName string) error {
|
func (h *profileLoaderById) Execute(testName string) error {
|
||||||
results, err := testutils.RunNucleiWithArgsAndGetResults(false, "-tl", "-tp", "cloud")
|
results, err := testutils.RunNucleiWithArgsAndGetResults(debug, "-tl", "-tp", "cloud")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.NewWithErr(err).Msgf("failed to load template with id")
|
return errorutil.NewWithErr(err).Msgf("failed to load template with id")
|
||||||
}
|
}
|
||||||
@ -43,7 +43,7 @@ func (h *profileLoaderById) Execute(testName string) error {
|
|||||||
type customProfileLoader struct{}
|
type customProfileLoader struct{}
|
||||||
|
|
||||||
func (h *customProfileLoader) Execute(filepath string) error {
|
func (h *customProfileLoader) Execute(filepath string) error {
|
||||||
results, err := testutils.RunNucleiWithArgsAndGetResults(false, "-tl", "-tp", filepath)
|
results, err := testutils.RunNucleiWithArgsAndGetResults(debug, "-tl", "-tp", filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.NewWithErr(err).Msgf("failed to load template with id")
|
return errorutil.NewWithErr(err).Msgf("failed to load template with id")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,14 +13,15 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/projectdiscovery/gologger"
|
||||||
_pdcp "github.com/projectdiscovery/nuclei/v3/internal/pdcp"
|
_pdcp "github.com/projectdiscovery/nuclei/v3/internal/pdcp"
|
||||||
"github.com/projectdiscovery/utils/auth/pdcp"
|
"github.com/projectdiscovery/utils/auth/pdcp"
|
||||||
"github.com/projectdiscovery/utils/env"
|
"github.com/projectdiscovery/utils/env"
|
||||||
_ "github.com/projectdiscovery/utils/pprof"
|
_ "github.com/projectdiscovery/utils/pprof"
|
||||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||||
|
"github.com/rs/xid"
|
||||||
|
|
||||||
"github.com/projectdiscovery/goflags"
|
"github.com/projectdiscovery/goflags"
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/gologger/levels"
|
"github.com/projectdiscovery/gologger/levels"
|
||||||
"github.com/projectdiscovery/interactsh/pkg/client"
|
"github.com/projectdiscovery/interactsh/pkg/client"
|
||||||
"github.com/projectdiscovery/nuclei/v3/internal/runner"
|
"github.com/projectdiscovery/nuclei/v3/internal/runner"
|
||||||
@ -52,16 +53,18 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
options.Logger = gologger.DefaultLogger
|
||||||
|
|
||||||
// enables CLI specific configs mostly interactive behavior
|
// enables CLI specific configs mostly interactive behavior
|
||||||
config.CurrentAppMode = config.AppModeCLI
|
config.CurrentAppMode = config.AppModeCLI
|
||||||
|
|
||||||
if err := runner.ConfigureOptions(); err != nil {
|
if err := runner.ConfigureOptions(); err != nil {
|
||||||
gologger.Fatal().Msgf("Could not initialize options: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not initialize options: %s\n", err)
|
||||||
}
|
}
|
||||||
_ = readConfig()
|
_ = readConfig()
|
||||||
|
|
||||||
if options.ListDslSignatures {
|
if options.ListDslSignatures {
|
||||||
gologger.Info().Msgf("The available custom DSL functions are:")
|
options.Logger.Info().Msgf("The available custom DSL functions are:")
|
||||||
fmt.Println(dsl.GetPrintableDslFunctionSignatures(options.NoColor))
|
fmt.Println(dsl.GetPrintableDslFunctionSignatures(options.NoColor))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -72,7 +75,7 @@ func main() {
|
|||||||
templates.UseOptionsForSigner(options)
|
templates.UseOptionsForSigner(options)
|
||||||
tsigner, err := signer.NewTemplateSigner(nil, nil) // will read from env , config or generate new keys
|
tsigner, err := signer.NewTemplateSigner(nil, nil) // will read from env , config or generate new keys
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("couldn't initialize signer crypto engine: %s\n", err)
|
options.Logger.Fatal().Msgf("couldn't initialize signer crypto engine: %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
successCounter := 0
|
successCounter := 0
|
||||||
@ -88,7 +91,7 @@ func main() {
|
|||||||
if err != templates.ErrNotATemplate {
|
if err != templates.ErrNotATemplate {
|
||||||
// skip warnings and errors as given items are not templates
|
// skip warnings and errors as given items are not templates
|
||||||
errorCounter++
|
errorCounter++
|
||||||
gologger.Error().Msgf("could not sign '%s': %s\n", iterItem, err)
|
options.Logger.Error().Msgf("could not sign '%s': %s\n", iterItem, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
successCounter++
|
successCounter++
|
||||||
@ -97,10 +100,10 @@ func main() {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("%s\n", err)
|
options.Logger.Error().Msgf("%s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gologger.Info().Msgf("All templates signatures were elaborated success=%d failed=%d\n", successCounter, errorCounter)
|
options.Logger.Info().Msgf("All templates signatures were elaborated success=%d failed=%d\n", successCounter, errorCounter)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,7 +114,7 @@ func main() {
|
|||||||
createProfileFile := func(ext, profileType string) *os.File {
|
createProfileFile := func(ext, profileType string) *os.File {
|
||||||
f, err := os.Create(memProfile + ext)
|
f, err := os.Create(memProfile + ext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("profile: could not create %s profile %q file: %v", profileType, f.Name(), err)
|
options.Logger.Fatal().Msgf("profile: could not create %s profile %q file: %v", profileType, f.Name(), err)
|
||||||
}
|
}
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
@ -125,18 +128,18 @@ func main() {
|
|||||||
|
|
||||||
// Start tracing
|
// Start tracing
|
||||||
if err := trace.Start(traceFile); err != nil {
|
if err := trace.Start(traceFile); err != nil {
|
||||||
gologger.Fatal().Msgf("profile: could not start trace: %v", err)
|
options.Logger.Fatal().Msgf("profile: could not start trace: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start CPU profiling
|
// Start CPU profiling
|
||||||
if err := pprof.StartCPUProfile(cpuProfileFile); err != nil {
|
if err := pprof.StartCPUProfile(cpuProfileFile); err != nil {
|
||||||
gologger.Fatal().Msgf("profile: could not start CPU profile: %v", err)
|
options.Logger.Fatal().Msgf("profile: could not start CPU profile: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Start heap memory snapshot
|
// Start heap memory snapshot
|
||||||
if err := pprof.WriteHeapProfile(memProfileFile); err != nil {
|
if err := pprof.WriteHeapProfile(memProfileFile); err != nil {
|
||||||
gologger.Fatal().Msgf("profile: could not write memory profile: %v", err)
|
options.Logger.Fatal().Msgf("profile: could not write memory profile: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pprof.StopCPUProfile()
|
pprof.StopCPUProfile()
|
||||||
@ -146,24 +149,26 @@ func main() {
|
|||||||
|
|
||||||
runtime.MemProfileRate = oldMemProfileRate
|
runtime.MemProfileRate = oldMemProfileRate
|
||||||
|
|
||||||
gologger.Info().Msgf("CPU profile saved at %q", cpuProfileFile.Name())
|
options.Logger.Info().Msgf("CPU profile saved at %q", cpuProfileFile.Name())
|
||||||
gologger.Info().Msgf("Memory usage snapshot saved at %q", memProfileFile.Name())
|
options.Logger.Info().Msgf("Memory usage snapshot saved at %q", memProfileFile.Name())
|
||||||
gologger.Info().Msgf("Traced at %q", traceFile.Name())
|
options.Logger.Info().Msgf("Traced at %q", traceFile.Name())
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
options.ExecutionId = xid.New().String()
|
||||||
|
|
||||||
runner.ParseOptions(options)
|
runner.ParseOptions(options)
|
||||||
|
|
||||||
if options.ScanUploadFile != "" {
|
if options.ScanUploadFile != "" {
|
||||||
if err := runner.UploadResultsToCloud(options); err != nil {
|
if err := runner.UploadResultsToCloud(options); err != nil {
|
||||||
gologger.Fatal().Msgf("could not upload scan results to cloud dashboard: %s\n", err)
|
options.Logger.Fatal().Msgf("could not upload scan results to cloud dashboard: %s\n", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nucleiRunner, err := runner.New(options)
|
nucleiRunner, err := runner.New(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("Could not create runner: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not create runner: %s\n", err)
|
||||||
}
|
}
|
||||||
if nucleiRunner == nil {
|
if nucleiRunner == nil {
|
||||||
return
|
return
|
||||||
@ -176,10 +181,10 @@ func main() {
|
|||||||
stackMonitor.RegisterCallback(func(dumpID string) error {
|
stackMonitor.RegisterCallback(func(dumpID string) error {
|
||||||
resumeFileName := fmt.Sprintf("crash-resume-file-%s.dump", dumpID)
|
resumeFileName := fmt.Sprintf("crash-resume-file-%s.dump", dumpID)
|
||||||
if options.EnableCloudUpload {
|
if options.EnableCloudUpload {
|
||||||
gologger.Info().Msgf("Uploading scan results to cloud...")
|
options.Logger.Info().Msgf("Uploading scan results to cloud...")
|
||||||
}
|
}
|
||||||
nucleiRunner.Close()
|
nucleiRunner.Close()
|
||||||
gologger.Info().Msgf("Creating resume file: %s\n", resumeFileName)
|
options.Logger.Info().Msgf("Creating resume file: %s\n", resumeFileName)
|
||||||
err := nucleiRunner.SaveResumeConfig(resumeFileName)
|
err := nucleiRunner.SaveResumeConfig(resumeFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.NewWithErr(err).Msgf("couldn't create crash resume file")
|
return errorutil.NewWithErr(err).Msgf("couldn't create crash resume file")
|
||||||
@ -191,37 +196,35 @@ func main() {
|
|||||||
// Setup graceful exits
|
// Setup graceful exits
|
||||||
resumeFileName := types.DefaultResumeFilePath()
|
resumeFileName := types.DefaultResumeFilePath()
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
defer close(c)
|
|
||||||
signal.Notify(c, os.Interrupt)
|
signal.Notify(c, os.Interrupt)
|
||||||
go func() {
|
go func() {
|
||||||
for range c {
|
<-c
|
||||||
gologger.Info().Msgf("CTRL+C pressed: Exiting\n")
|
options.Logger.Info().Msgf("CTRL+C pressed: Exiting\n")
|
||||||
if options.DASTServer {
|
if options.DASTServer {
|
||||||
nucleiRunner.Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
gologger.Info().Msgf("Attempting graceful shutdown...")
|
|
||||||
if options.EnableCloudUpload {
|
|
||||||
gologger.Info().Msgf("Uploading scan results to cloud...")
|
|
||||||
}
|
|
||||||
nucleiRunner.Close()
|
nucleiRunner.Close()
|
||||||
if options.ShouldSaveResume() {
|
|
||||||
gologger.Info().Msgf("Creating resume file: %s\n", resumeFileName)
|
|
||||||
err := nucleiRunner.SaveResumeConfig(resumeFileName)
|
|
||||||
if err != nil {
|
|
||||||
gologger.Error().Msgf("Couldn't create resume file: %s\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
options.Logger.Info().Msgf("Attempting graceful shutdown...")
|
||||||
|
if options.EnableCloudUpload {
|
||||||
|
options.Logger.Info().Msgf("Uploading scan results to cloud...")
|
||||||
|
}
|
||||||
|
nucleiRunner.Close()
|
||||||
|
if options.ShouldSaveResume() {
|
||||||
|
options.Logger.Info().Msgf("Creating resume file: %s\n", resumeFileName)
|
||||||
|
err := nucleiRunner.SaveResumeConfig(resumeFileName)
|
||||||
|
if err != nil {
|
||||||
|
options.Logger.Error().Msgf("Couldn't create resume file: %s\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := nucleiRunner.RunEnumeration(); err != nil {
|
if err := nucleiRunner.RunEnumeration(); err != nil {
|
||||||
if options.Validate {
|
if options.Validate {
|
||||||
gologger.Fatal().Msgf("Could not validate templates: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not validate templates: %s\n", err)
|
||||||
} else {
|
} else {
|
||||||
gologger.Fatal().Msgf("Could not run nuclei: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not run nuclei: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nucleiRunner.Close()
|
nucleiRunner.Close()
|
||||||
@ -542,11 +545,11 @@ Additional documentation is available at: https://docs.nuclei.sh/getting-started
|
|||||||
h := &pdcp.PDCPCredHandler{}
|
h := &pdcp.PDCPCredHandler{}
|
||||||
_, err := h.GetCreds()
|
_, err := h.GetCreds()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msg("To utilize the `-ai` flag, please configure your API key with the `-auth` flag or set the `PDCP_API_KEY` environment variable")
|
options.Logger.Fatal().Msg("To utilize the `-ai` flag, please configure your API key with the `-auth` flag or set the `PDCP_API_KEY` environment variable")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gologger.DefaultLogger.SetTimestamp(options.Timestamp, levels.LevelDebug)
|
options.Logger.SetTimestamp(options.Timestamp, levels.LevelDebug)
|
||||||
|
|
||||||
if options.VerboseVerbose {
|
if options.VerboseVerbose {
|
||||||
// hide release notes if silent mode is enabled
|
// hide release notes if silent mode is enabled
|
||||||
@ -570,11 +573,11 @@ Additional documentation is available at: https://docs.nuclei.sh/getting-started
|
|||||||
}
|
}
|
||||||
if cfgFile != "" {
|
if cfgFile != "" {
|
||||||
if !fileutil.FileExists(cfgFile) {
|
if !fileutil.FileExists(cfgFile) {
|
||||||
gologger.Fatal().Msgf("given config file '%s' does not exist", cfgFile)
|
options.Logger.Fatal().Msgf("given config file '%s' does not exist", cfgFile)
|
||||||
}
|
}
|
||||||
// merge config file with flags
|
// merge config file with flags
|
||||||
if err := flagSet.MergeConfigFile(cfgFile); err != nil {
|
if err := flagSet.MergeConfigFile(cfgFile); err != nil {
|
||||||
gologger.Fatal().Msgf("Could not read config: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not read config: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if options.NewTemplatesDirectory != "" {
|
if options.NewTemplatesDirectory != "" {
|
||||||
@ -587,7 +590,7 @@ Additional documentation is available at: https://docs.nuclei.sh/getting-started
|
|||||||
if tp := findProfilePathById(templateProfile, defaultProfilesPath); tp != "" {
|
if tp := findProfilePathById(templateProfile, defaultProfilesPath); tp != "" {
|
||||||
templateProfile = tp
|
templateProfile = tp
|
||||||
} else {
|
} else {
|
||||||
gologger.Fatal().Msgf("'%s' is not a profile-id or profile path", templateProfile)
|
options.Logger.Fatal().Msgf("'%s' is not a profile-id or profile path", templateProfile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !filepath.IsAbs(templateProfile) {
|
if !filepath.IsAbs(templateProfile) {
|
||||||
@ -602,17 +605,17 @@ Additional documentation is available at: https://docs.nuclei.sh/getting-started
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !fileutil.FileExists(templateProfile) {
|
if !fileutil.FileExists(templateProfile) {
|
||||||
gologger.Fatal().Msgf("given template profile file '%s' does not exist", templateProfile)
|
options.Logger.Fatal().Msgf("given template profile file '%s' does not exist", templateProfile)
|
||||||
}
|
}
|
||||||
if err := flagSet.MergeConfigFile(templateProfile); err != nil {
|
if err := flagSet.MergeConfigFile(templateProfile); err != nil {
|
||||||
gologger.Fatal().Msgf("Could not read template profile: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not read template profile: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(options.SecretsFile) > 0 {
|
if len(options.SecretsFile) > 0 {
|
||||||
for _, secretFile := range options.SecretsFile {
|
for _, secretFile := range options.SecretsFile {
|
||||||
if !fileutil.FileExists(secretFile) {
|
if !fileutil.FileExists(secretFile) {
|
||||||
gologger.Fatal().Msgf("given secrets file '%s' does not exist", options.SecretsFile)
|
options.Logger.Fatal().Msgf("given secrets file '%s' does not exist", secretFile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -638,25 +641,25 @@ func readFlagsConfig(flagset *goflags.FlagSet) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// something went wrong either dir is not readable or something else went wrong upstream in `goflags`
|
// something went wrong either dir is not readable or something else went wrong upstream in `goflags`
|
||||||
// warn and exit in this case
|
// warn and exit in this case
|
||||||
gologger.Warning().Msgf("Could not read config file: %s\n", err)
|
options.Logger.Warning().Msgf("Could not read config file: %s\n", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cfgFile := config.DefaultConfig.GetFlagsConfigFilePath()
|
cfgFile := config.DefaultConfig.GetFlagsConfigFilePath()
|
||||||
if !fileutil.FileExists(cfgFile) {
|
if !fileutil.FileExists(cfgFile) {
|
||||||
if !fileutil.FileExists(defaultCfgFile) {
|
if !fileutil.FileExists(defaultCfgFile) {
|
||||||
// if default config does not exist, warn and exit
|
// if default config does not exist, warn and exit
|
||||||
gologger.Warning().Msgf("missing default config file : %s", defaultCfgFile)
|
options.Logger.Warning().Msgf("missing default config file : %s", defaultCfgFile)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if does not exist copy it from the default config
|
// if does not exist copy it from the default config
|
||||||
if err = fileutil.CopyFile(defaultCfgFile, cfgFile); err != nil {
|
if err = fileutil.CopyFile(defaultCfgFile, cfgFile); err != nil {
|
||||||
gologger.Warning().Msgf("Could not copy config file: %s\n", err)
|
options.Logger.Warning().Msgf("Could not copy config file: %s\n", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if config file exists, merge it with the default config
|
// if config file exists, merge it with the default config
|
||||||
if err = flagset.MergeConfigFile(cfgFile); err != nil {
|
if err = flagset.MergeConfigFile(cfgFile); err != nil {
|
||||||
gologger.Warning().Msgf("failed to merge configfile with flags got: %s\n", err)
|
options.Logger.Warning().Msgf("failed to merge configfile with flags got: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -667,29 +670,29 @@ func disableUpdatesCallback() {
|
|||||||
|
|
||||||
// printVersion prints the nuclei version and exits.
|
// printVersion prints the nuclei version and exits.
|
||||||
func printVersion() {
|
func printVersion() {
|
||||||
gologger.Info().Msgf("Nuclei Engine Version: %s", config.Version)
|
options.Logger.Info().Msgf("Nuclei Engine Version: %s", config.Version)
|
||||||
gologger.Info().Msgf("Nuclei Config Directory: %s", config.DefaultConfig.GetConfigDir())
|
options.Logger.Info().Msgf("Nuclei Config Directory: %s", config.DefaultConfig.GetConfigDir())
|
||||||
gologger.Info().Msgf("Nuclei Cache Directory: %s", config.DefaultConfig.GetCacheDir()) // cache dir contains resume files
|
options.Logger.Info().Msgf("Nuclei Cache Directory: %s", config.DefaultConfig.GetCacheDir()) // cache dir contains resume files
|
||||||
gologger.Info().Msgf("PDCP Directory: %s", pdcp.PDCPDir)
|
options.Logger.Info().Msgf("PDCP Directory: %s", pdcp.PDCPDir)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// printTemplateVersion prints the nuclei template version and exits.
|
// printTemplateVersion prints the nuclei template version and exits.
|
||||||
func printTemplateVersion() {
|
func printTemplateVersion() {
|
||||||
cfg := config.DefaultConfig
|
cfg := config.DefaultConfig
|
||||||
gologger.Info().Msgf("Public nuclei-templates version: %s (%s)\n", cfg.TemplateVersion, cfg.TemplatesDirectory)
|
options.Logger.Info().Msgf("Public nuclei-templates version: %s (%s)\n", cfg.TemplateVersion, cfg.TemplatesDirectory)
|
||||||
|
|
||||||
if fileutil.FolderExists(cfg.CustomS3TemplatesDirectory) {
|
if fileutil.FolderExists(cfg.CustomS3TemplatesDirectory) {
|
||||||
gologger.Info().Msgf("Custom S3 templates location: %s\n", cfg.CustomS3TemplatesDirectory)
|
options.Logger.Info().Msgf("Custom S3 templates location: %s\n", cfg.CustomS3TemplatesDirectory)
|
||||||
}
|
}
|
||||||
if fileutil.FolderExists(cfg.CustomGitHubTemplatesDirectory) {
|
if fileutil.FolderExists(cfg.CustomGitHubTemplatesDirectory) {
|
||||||
gologger.Info().Msgf("Custom GitHub templates location: %s ", cfg.CustomGitHubTemplatesDirectory)
|
options.Logger.Info().Msgf("Custom GitHub templates location: %s ", cfg.CustomGitHubTemplatesDirectory)
|
||||||
}
|
}
|
||||||
if fileutil.FolderExists(cfg.CustomGitLabTemplatesDirectory) {
|
if fileutil.FolderExists(cfg.CustomGitLabTemplatesDirectory) {
|
||||||
gologger.Info().Msgf("Custom GitLab templates location: %s ", cfg.CustomGitLabTemplatesDirectory)
|
options.Logger.Info().Msgf("Custom GitLab templates location: %s ", cfg.CustomGitLabTemplatesDirectory)
|
||||||
}
|
}
|
||||||
if fileutil.FolderExists(cfg.CustomAzureTemplatesDirectory) {
|
if fileutil.FolderExists(cfg.CustomAzureTemplatesDirectory) {
|
||||||
gologger.Info().Msgf("Custom Azure templates location: %s ", cfg.CustomAzureTemplatesDirectory)
|
options.Logger.Info().Msgf("Custom Azure templates location: %s ", cfg.CustomAzureTemplatesDirectory)
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
@ -705,13 +708,13 @@ Following files will be deleted:
|
|||||||
Note: Make sure you have backup of your custom nuclei-templates before proceeding
|
Note: Make sure you have backup of your custom nuclei-templates before proceeding
|
||||||
|
|
||||||
`, config.DefaultConfig.GetConfigDir(), config.DefaultConfig.TemplatesDirectory)
|
`, config.DefaultConfig.GetConfigDir(), config.DefaultConfig.TemplatesDirectory)
|
||||||
gologger.Print().Msg(warning)
|
options.Logger.Print().Msg(warning)
|
||||||
reader := bufio.NewReader(os.Stdin)
|
reader := bufio.NewReader(os.Stdin)
|
||||||
for {
|
for {
|
||||||
fmt.Print("Are you sure you want to continue? [y/n]: ")
|
fmt.Print("Are you sure you want to continue? [y/n]: ")
|
||||||
resp, err := reader.ReadString('\n')
|
resp, err := reader.ReadString('\n')
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("could not read response: %s", err)
|
options.Logger.Fatal().Msgf("could not read response: %s", err)
|
||||||
}
|
}
|
||||||
resp = strings.TrimSpace(resp)
|
resp = strings.TrimSpace(resp)
|
||||||
if stringsutil.EqualFoldAny(resp, "y", "yes") {
|
if stringsutil.EqualFoldAny(resp, "y", "yes") {
|
||||||
@ -724,13 +727,13 @@ Note: Make sure you have backup of your custom nuclei-templates before proceedin
|
|||||||
}
|
}
|
||||||
err := os.RemoveAll(config.DefaultConfig.GetConfigDir())
|
err := os.RemoveAll(config.DefaultConfig.GetConfigDir())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("could not delete config dir: %s", err)
|
options.Logger.Fatal().Msgf("could not delete config dir: %s", err)
|
||||||
}
|
}
|
||||||
err = os.RemoveAll(config.DefaultConfig.TemplatesDirectory)
|
err = os.RemoveAll(config.DefaultConfig.TemplatesDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("could not delete templates dir: %s", err)
|
options.Logger.Fatal().Msgf("could not delete templates dir: %s", err)
|
||||||
}
|
}
|
||||||
gologger.Info().Msgf("Successfully deleted all nuclei configurations files and nuclei-templates")
|
options.Logger.Info().Msgf("Successfully deleted all nuclei configurations files and nuclei-templates")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -750,7 +753,7 @@ func findProfilePathById(profileId, templatesDir string) string {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil && err.Error() != "FOUND" {
|
if err != nil && err.Error() != "FOUND" {
|
||||||
gologger.Error().Msgf("%s\n", err)
|
options.Logger.Error().Msgf("%s\n", err)
|
||||||
}
|
}
|
||||||
return profilePath
|
return profilePath
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,7 +20,6 @@ var (
|
|||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
// Set up
|
// Set up
|
||||||
|
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelSilent)
|
gologger.DefaultLogger.SetMaxLevel(levels.LevelSilent)
|
||||||
_ = os.Setenv("DISABLE_STDOUT", "true")
|
_ = os.Setenv("DISABLE_STDOUT", "true")
|
||||||
|
|
||||||
@ -93,6 +92,8 @@ func getDefaultOptions() *types.Options {
|
|||||||
LoadHelperFileFunction: types.DefaultOptions().LoadHelperFileFunction,
|
LoadHelperFileFunction: types.DefaultOptions().LoadHelperFileFunction,
|
||||||
// DialerKeepAlive: time.Duration(0),
|
// DialerKeepAlive: time.Duration(0),
|
||||||
// DASTServerAddress: "localhost:9055",
|
// DASTServerAddress: "localhost:9055",
|
||||||
|
ExecutionId: "test",
|
||||||
|
Logger: gologger.DefaultLogger,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -146,8 +146,8 @@ func process(opts options) error {
|
|||||||
gologger.Fatal().Msgf("could not open error log file: %s\n", err)
|
gologger.Fatal().Msgf("could not open error log file: %s\n", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = errFile.Close()
|
_ = errFile.Close()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
templateCatalog := disk.NewCatalog(filepath.Dir(opts.input))
|
templateCatalog := disk.NewCatalog(filepath.Dir(opts.input))
|
||||||
@ -401,7 +401,7 @@ func parseAndAddMaxRequests(catalog catalog.Catalog, path, data string) (string,
|
|||||||
|
|
||||||
// parseTemplate parses a template and returns the template object
|
// parseTemplate parses a template and returns the template object
|
||||||
func parseTemplate(catalog catalog.Catalog, templatePath string) (*templates.Template, error) {
|
func parseTemplate(catalog catalog.Catalog, templatePath string) (*templates.Template, error) {
|
||||||
executorOpts := protocols.ExecutorOptions{
|
executorOpts := &protocols.ExecutorOptions{
|
||||||
Catalog: catalog,
|
Catalog: catalog,
|
||||||
Options: defaultOpts,
|
Options: defaultOpts,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -99,12 +99,12 @@ func main() {
|
|||||||
gologger.Info().Msgf("✓ Template signed & verified successfully")
|
gologger.Info().Msgf("✓ Template signed & verified successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultExecutorOpts(templatePath string) protocols.ExecutorOptions {
|
func defaultExecutorOpts(templatePath string) *protocols.ExecutorOptions {
|
||||||
// use parsed options when initializing signer instead of default options
|
// use parsed options when initializing signer instead of default options
|
||||||
options := types.DefaultOptions()
|
options := types.DefaultOptions()
|
||||||
templates.UseOptionsForSigner(options)
|
templates.UseOptionsForSigner(options)
|
||||||
catalog := disk.NewCatalog(filepath.Dir(templatePath))
|
catalog := disk.NewCatalog(filepath.Dir(templatePath))
|
||||||
executerOpts := protocols.ExecutorOptions{
|
executerOpts := &protocols.ExecutorOptions{
|
||||||
Catalog: catalog,
|
Catalog: catalog,
|
||||||
Options: options,
|
Options: options,
|
||||||
TemplatePath: templatePath,
|
TemplatePath: templatePath,
|
||||||
|
|||||||
78
go.mod
78
go.mod
@ -17,7 +17,7 @@ require (
|
|||||||
github.com/julienschmidt/httprouter v1.3.0
|
github.com/julienschmidt/httprouter v1.3.0
|
||||||
github.com/logrusorgru/aurora v2.0.3+incompatible
|
github.com/logrusorgru/aurora v2.0.3+incompatible
|
||||||
github.com/miekg/dns v1.1.66
|
github.com/miekg/dns v1.1.66
|
||||||
github.com/olekukonko/tablewriter v0.0.5
|
github.com/olekukonko/tablewriter v1.0.8
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/projectdiscovery/clistats v0.1.1
|
github.com/projectdiscovery/clistats v0.1.1
|
||||||
github.com/projectdiscovery/fastdialer v0.4.1
|
github.com/projectdiscovery/fastdialer v0.4.1
|
||||||
@ -25,7 +25,7 @@ require (
|
|||||||
github.com/projectdiscovery/interactsh v1.2.4
|
github.com/projectdiscovery/interactsh v1.2.4
|
||||||
github.com/projectdiscovery/rawhttp v0.1.90
|
github.com/projectdiscovery/rawhttp v0.1.90
|
||||||
github.com/projectdiscovery/retryabledns v1.0.103
|
github.com/projectdiscovery/retryabledns v1.0.103
|
||||||
github.com/projectdiscovery/retryablehttp-go v1.0.116
|
github.com/projectdiscovery/retryablehttp-go v1.0.117
|
||||||
github.com/projectdiscovery/yamldoc-go v1.0.6
|
github.com/projectdiscovery/yamldoc-go v1.0.6
|
||||||
github.com/remeh/sizedwaitgroup v1.0.0
|
github.com/remeh/sizedwaitgroup v1.0.0
|
||||||
github.com/rs/xid v1.6.0
|
github.com/rs/xid v1.6.0
|
||||||
@ -50,7 +50,8 @@ require (
|
|||||||
github.com/DataDog/gostackparse v0.7.0
|
github.com/DataDog/gostackparse v0.7.0
|
||||||
github.com/Masterminds/semver/v3 v3.4.0
|
github.com/Masterminds/semver/v3 v3.4.0
|
||||||
github.com/Mzack9999/gcache v0.0.0-20230410081825-519e28eab057
|
github.com/Mzack9999/gcache v0.0.0-20230410081825-519e28eab057
|
||||||
github.com/alecthomas/chroma v0.10.0
|
github.com/Mzack9999/goja v0.0.0-20250507184235-e46100e9c697
|
||||||
|
github.com/Mzack9999/goja_nodejs v0.0.0-20250507184139-66bcbf65c883
|
||||||
github.com/alitto/pond v1.9.2
|
github.com/alitto/pond v1.9.2
|
||||||
github.com/antchfx/xmlquery v1.4.4
|
github.com/antchfx/xmlquery v1.4.4
|
||||||
github.com/antchfx/xpath v1.3.4
|
github.com/antchfx/xpath v1.3.4
|
||||||
@ -60,17 +61,14 @@ require (
|
|||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.82
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.82
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.82.0
|
||||||
github.com/bytedance/sonic v1.13.3
|
github.com/bytedance/sonic v1.14.0
|
||||||
github.com/cespare/xxhash v1.1.0
|
github.com/cespare/xxhash v1.1.0
|
||||||
github.com/charmbracelet/glamour v0.10.0
|
github.com/charmbracelet/glamour v0.10.0
|
||||||
github.com/clbanning/mxj/v2 v2.7.0
|
github.com/clbanning/mxj/v2 v2.7.0
|
||||||
github.com/ditashi/jsbeautifier-go v0.0.0-20141206144643-2520a8026a9c
|
github.com/ditashi/jsbeautifier-go v0.0.0-20141206144643-2520a8026a9c
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/dop251/goja v0.0.0-20250624190929-4d26883d182a
|
|
||||||
github.com/dop251/goja_nodejs v0.0.0-20250409162600-f7acab6894b0
|
|
||||||
github.com/fatih/structs v1.1.0
|
github.com/fatih/structs v1.1.0
|
||||||
github.com/getkin/kin-openapi v0.132.0
|
github.com/getkin/kin-openapi v0.132.0
|
||||||
github.com/go-echarts/go-echarts/v2 v2.6.0
|
|
||||||
github.com/go-git/go-git/v5 v5.16.2
|
github.com/go-git/go-git/v5 v5.16.2
|
||||||
github.com/go-ldap/ldap/v3 v3.4.11
|
github.com/go-ldap/ldap/v3 v3.4.11
|
||||||
github.com/go-pg/pg v8.0.7+incompatible
|
github.com/go-pg/pg v8.0.7+incompatible
|
||||||
@ -108,20 +106,17 @@ require (
|
|||||||
github.com/projectdiscovery/uncover v1.1.0
|
github.com/projectdiscovery/uncover v1.1.0
|
||||||
github.com/projectdiscovery/useragent v0.0.101
|
github.com/projectdiscovery/useragent v0.0.101
|
||||||
github.com/projectdiscovery/utils v0.4.21
|
github.com/projectdiscovery/utils v0.4.21
|
||||||
github.com/projectdiscovery/wappalyzergo v0.2.35
|
github.com/projectdiscovery/wappalyzergo v0.2.36
|
||||||
github.com/redis/go-redis/v9 v9.11.0
|
github.com/redis/go-redis/v9 v9.11.0
|
||||||
github.com/seh-msft/burpxml v1.0.1
|
github.com/seh-msft/burpxml v1.0.1
|
||||||
github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466
|
github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.10.0
|
||||||
github.com/tarunKoyalwar/goleak v0.0.0-20240429141123-0efa90dbdcf9
|
github.com/tarunKoyalwar/goleak v0.0.0-20240429141123-0efa90dbdcf9
|
||||||
github.com/trivago/tgo v1.0.7
|
|
||||||
github.com/yassinebenaid/godump v0.11.1
|
github.com/yassinebenaid/godump v0.11.1
|
||||||
github.com/zmap/zgrab2 v0.1.8
|
github.com/zmap/zgrab2 v0.1.8
|
||||||
gitlab.com/gitlab-org/api/client-go v0.130.1
|
gitlab.com/gitlab-org/api/client-go v0.130.1
|
||||||
go.mongodb.org/mongo-driver v1.17.4
|
go.mongodb.org/mongo-driver v1.17.4
|
||||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
|
|
||||||
golang.org/x/term v0.32.0
|
golang.org/x/term v0.32.0
|
||||||
golang.org/x/tools v0.34.0
|
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
moul.io/http2curl v1.0.0
|
moul.io/http2curl v1.0.0
|
||||||
)
|
)
|
||||||
@ -174,7 +169,7 @@ require (
|
|||||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||||
github.com/bodgit/windows v1.0.1 // indirect
|
github.com/bodgit/windows v1.0.1 // indirect
|
||||||
github.com/buger/jsonparser v1.1.1 // indirect
|
github.com/buger/jsonparser v1.1.1 // indirect
|
||||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
||||||
github.com/caddyserver/certmagic v0.19.2 // indirect
|
github.com/caddyserver/certmagic v0.19.2 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
@ -194,14 +189,13 @@ require (
|
|||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/davidmz/go-pageant v1.0.2 // indirect
|
github.com/davidmz/go-pageant v1.0.2 // indirect
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
|
||||||
github.com/dlclark/regexp2 v1.11.5 // indirect
|
github.com/dlclark/regexp2 v1.11.5 // indirect
|
||||||
github.com/docker/cli v27.4.1+incompatible // indirect
|
github.com/docker/cli v27.4.1+incompatible // indirect
|
||||||
github.com/docker/docker v27.1.1+incompatible // indirect
|
github.com/docker/docker v27.1.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.5.0 // indirect
|
github.com/docker/go-connections v0.5.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||||
github.com/emirpasic/gods v1.18.1 // indirect
|
github.com/emirpasic/gods v1.18.1 // indirect
|
||||||
github.com/fatih/color v1.16.0 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
github.com/felixge/fgprof v0.9.5 // indirect
|
github.com/felixge/fgprof v0.9.5 // indirect
|
||||||
github.com/free5gc/util v1.0.5-0.20230511064842-2e120956883b // indirect
|
github.com/free5gc/util v1.0.5-0.20230511064842-2e120956883b // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||||
@ -219,12 +213,8 @@ require (
|
|||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
|
github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||||
github.com/goburrow/cache v0.1.4 // indirect
|
|
||||||
github.com/gobwas/httphead v0.1.0 // indirect
|
|
||||||
github.com/gobwas/pool v0.2.1 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
|
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
|
||||||
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
||||||
@ -232,20 +222,19 @@ require (
|
|||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
||||||
github.com/google/go-github/v30 v30.1.0 // indirect
|
github.com/google/go-github/v30 v30.1.0 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
|
||||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||||
github.com/gorilla/css v1.0.1 // indirect
|
github.com/gorilla/css v1.0.1 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/hbakhtiyor/strsim v0.0.0-20190107154042-4d2bbb273edf // indirect
|
github.com/hbakhtiyor/strsim v0.0.0-20190107154042-4d2bbb273edf // indirect
|
||||||
github.com/hdm/jarm-go v0.0.7 // indirect
|
github.com/hdm/jarm-go v0.0.7 // indirect
|
||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
github.com/itchyny/timefmt-go v0.1.6 // indirect
|
github.com/itchyny/timefmt-go v0.1.6 // indirect
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||||
@ -290,6 +279,8 @@ require (
|
|||||||
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
|
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
|
||||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
||||||
|
github.com/olekukonko/errors v1.1.0 // indirect
|
||||||
|
github.com/olekukonko/ll v0.0.9 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||||
github.com/opencontainers/runc v1.2.3 // indirect
|
github.com/opencontainers/runc v1.2.3 // indirect
|
||||||
@ -303,13 +294,11 @@ require (
|
|||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||||
github.com/projectdiscovery/asnmap v1.1.1 // indirect
|
github.com/projectdiscovery/asnmap v1.1.1 // indirect
|
||||||
github.com/projectdiscovery/blackrock v0.0.1 // indirect
|
github.com/projectdiscovery/blackrock v0.0.1 // indirect
|
||||||
github.com/projectdiscovery/cdncheck v1.1.15 // indirect
|
github.com/projectdiscovery/cdncheck v1.1.26 // indirect
|
||||||
github.com/projectdiscovery/freeport v0.0.7 // indirect
|
github.com/projectdiscovery/freeport v0.0.7 // indirect
|
||||||
github.com/projectdiscovery/ldapserver v1.0.2-0.20240219154113-dcc758ebc0cb // indirect
|
github.com/projectdiscovery/ldapserver v1.0.2-0.20240219154113-dcc758ebc0cb // indirect
|
||||||
github.com/projectdiscovery/machineid v0.0.0-20240226150047-2e2c51e35983 // indirect
|
github.com/projectdiscovery/machineid v0.0.0-20240226150047-2e2c51e35983 // indirect
|
||||||
github.com/refraction-networking/utls v1.7.0 // indirect
|
github.com/refraction-networking/utls v1.7.0 // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
|
||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect
|
|
||||||
github.com/sashabaranov/go-openai v1.37.0 // indirect
|
github.com/sashabaranov/go-openai v1.37.0 // indirect
|
||||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||||
@ -343,33 +332,52 @@ require (
|
|||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||||
github.com/ysmood/fetchup v0.2.3 // indirect
|
github.com/ysmood/fetchup v0.2.3 // indirect
|
||||||
github.com/ysmood/goob v0.4.0 // indirect
|
|
||||||
github.com/ysmood/got v0.40.0 // indirect
|
github.com/ysmood/got v0.40.0 // indirect
|
||||||
github.com/ysmood/gson v0.7.3 // indirect
|
|
||||||
github.com/ysmood/leakless v0.9.0 // indirect
|
|
||||||
github.com/yuin/goldmark v1.7.8 // indirect
|
github.com/yuin/goldmark v1.7.8 // indirect
|
||||||
github.com/yuin/goldmark-emoji v1.0.5 // indirect
|
github.com/yuin/goldmark-emoji v1.0.5 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
|
||||||
github.com/zcalusic/sysinfo v1.0.2 // indirect
|
github.com/zcalusic/sysinfo v1.0.2 // indirect
|
||||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||||
|
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||||
|
golang.org/x/arch v0.3.0 // indirect
|
||||||
|
golang.org/x/sync v0.15.0 // indirect
|
||||||
|
gopkg.in/djherbis/times.v1 v1.3.0 // indirect
|
||||||
|
mellium.im/sasl v0.3.2 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||||
|
github.com/goburrow/cache v0.1.4 // indirect
|
||||||
|
github.com/gobwas/httphead v0.1.0 // indirect
|
||||||
|
github.com/gobwas/pool v0.2.1 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||||
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
|
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect
|
||||||
|
github.com/trivago/tgo v1.0.7
|
||||||
|
github.com/ysmood/goob v0.4.0 // indirect
|
||||||
|
github.com/ysmood/gson v0.7.3 // indirect
|
||||||
|
github.com/ysmood/leakless v0.9.0 // indirect
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248 // indirect
|
github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248 // indirect
|
||||||
github.com/zmap/zcrypto v0.0.0-20240512203510-0fef58d9a9db // indirect
|
github.com/zmap/zcrypto v0.0.0-20240512203510-0fef58d9a9db // indirect
|
||||||
go.etcd.io/bbolt v1.3.10 // indirect
|
go.etcd.io/bbolt v1.3.10 // indirect
|
||||||
go.uber.org/zap v1.25.0 // indirect
|
go.uber.org/zap v1.25.0 // indirect
|
||||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
|
||||||
goftp.io/server/v2 v2.0.1 // indirect
|
goftp.io/server/v2 v2.0.1 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
|
||||||
golang.org/x/crypto v0.39.0 // indirect
|
golang.org/x/crypto v0.39.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
|
||||||
golang.org/x/mod v0.25.0 // indirect
|
golang.org/x/mod v0.25.0 // indirect
|
||||||
golang.org/x/sync v0.15.0 // indirect
|
|
||||||
golang.org/x/sys v0.33.0 // indirect
|
golang.org/x/sys v0.33.0 // indirect
|
||||||
golang.org/x/time v0.11.0 // indirect
|
golang.org/x/time v0.11.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.6 // indirect
|
golang.org/x/tools v0.34.0
|
||||||
|
google.golang.org/protobuf v1.35.1 // indirect
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
|
||||||
gopkg.in/corvus-ch/zbase32.v1 v1.0.0 // indirect
|
gopkg.in/corvus-ch/zbase32.v1 v1.0.0 // indirect
|
||||||
gopkg.in/djherbis/times.v1 v1.3.0 // indirect
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/alecthomas/chroma v0.10.0
|
||||||
|
github.com/go-echarts/go-echarts/v2 v2.6.0
|
||||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||||
mellium.im/sasl v0.3.2 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// https://go.dev/ref/mod#go-mod-file-retract
|
// https://go.dev/ref/mod#go-mod-file-retract
|
||||||
|
|||||||
58
go.sum
58
go.sum
@ -83,6 +83,10 @@ github.com/Mzack9999/gcache v0.0.0-20230410081825-519e28eab057 h1:KFac3SiGbId8ub
|
|||||||
github.com/Mzack9999/gcache v0.0.0-20230410081825-519e28eab057/go.mod h1:iLB2pivrPICvLOuROKmlqURtFIEsoJZaMidQfCG1+D4=
|
github.com/Mzack9999/gcache v0.0.0-20230410081825-519e28eab057/go.mod h1:iLB2pivrPICvLOuROKmlqURtFIEsoJZaMidQfCG1+D4=
|
||||||
github.com/Mzack9999/go-http-digest-auth-client v0.6.1-0.20220414142836-eb8883508809 h1:ZbFL+BDfBqegi+/Ssh7im5+aQfBRx6it+kHnC7jaDU8=
|
github.com/Mzack9999/go-http-digest-auth-client v0.6.1-0.20220414142836-eb8883508809 h1:ZbFL+BDfBqegi+/Ssh7im5+aQfBRx6it+kHnC7jaDU8=
|
||||||
github.com/Mzack9999/go-http-digest-auth-client v0.6.1-0.20220414142836-eb8883508809/go.mod h1:upgc3Zs45jBDnBT4tVRgRcgm26ABpaP7MoTSdgysca4=
|
github.com/Mzack9999/go-http-digest-auth-client v0.6.1-0.20220414142836-eb8883508809/go.mod h1:upgc3Zs45jBDnBT4tVRgRcgm26ABpaP7MoTSdgysca4=
|
||||||
|
github.com/Mzack9999/goja v0.0.0-20250507184235-e46100e9c697 h1:54I+OF5vS4a/rxnUrN5J3hi0VEYKcrTlpc8JosDyP+c=
|
||||||
|
github.com/Mzack9999/goja v0.0.0-20250507184235-e46100e9c697/go.mod h1:yNqYRqxYkSROY1J+LX+A0tOSA/6soXQs5m8hZSqYBac=
|
||||||
|
github.com/Mzack9999/goja_nodejs v0.0.0-20250507184139-66bcbf65c883 h1:+Is1AS20q3naP+qJophNpxuvx1daFOx9C0kLIuI0GVk=
|
||||||
|
github.com/Mzack9999/goja_nodejs v0.0.0-20250507184139-66bcbf65c883/go.mod h1:K+FhM7iKGKtalkeXGEviafPPwyVjDv1a/ehomabLF2w=
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||||
@ -212,11 +216,11 @@ github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0
|
|||||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||||
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||||
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
|
github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ=
|
||||||
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA=
|
||||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
|
||||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||||
github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0=
|
github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0=
|
||||||
github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8=
|
github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8=
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||||
@ -299,10 +303,6 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj
|
|||||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/dop251/goja v0.0.0-20250624190929-4d26883d182a h1:QIWJoaD2+zxUjN28l8zixmbuvtYqqcxj49Iwzw7mDpk=
|
|
||||||
github.com/dop251/goja v0.0.0-20250624190929-4d26883d182a/go.mod h1:MxLav0peU43GgvwVgNbLAj1s/bSGboKkhuULvq/7hx4=
|
|
||||||
github.com/dop251/goja_nodejs v0.0.0-20250409162600-f7acab6894b0 h1:fuHXpEVTTk7TilRdfGRLHpiTD6tnT0ihEowCfWjlFvw=
|
|
||||||
github.com/dop251/goja_nodejs v0.0.0-20250409162600-f7acab6894b0/go.mod h1:Tb7Xxye4LX7cT3i8YLvmPMGCV92IOi4CDZvm/V8ylc0=
|
|
||||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
|
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
|
||||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
||||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||||
@ -315,8 +315,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
|||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||||
@ -397,8 +397,8 @@ github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI6
|
|||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||||
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
|
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||||
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||||
github.com/goburrow/cache v0.1.4 h1:As4KzO3hgmzPlnaMniZU9+VmoNYseUhuELbxy9mRBfw=
|
github.com/goburrow/cache v0.1.4 h1:As4KzO3hgmzPlnaMniZU9+VmoNYseUhuELbxy9mRBfw=
|
||||||
github.com/goburrow/cache v0.1.4/go.mod h1:cDFesZDnIlrHoNlMYqqMpCRawuXulgx+y7mXU8HZ+/c=
|
github.com/goburrow/cache v0.1.4/go.mod h1:cDFesZDnIlrHoNlMYqqMpCRawuXulgx+y7mXU8HZ+/c=
|
||||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||||
@ -522,8 +522,8 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1
|
|||||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
|
||||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
@ -543,8 +543,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
|||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
|
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
|
||||||
github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
|
github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
|
||||||
github.com/invopop/yaml v0.3.1 h1:f0+ZpmhfBSS4MhG+4HYseMdJhoeeopbSKbq5Rpeelso=
|
github.com/invopop/yaml v0.3.1 h1:f0+ZpmhfBSS4MhG+4HYseMdJhoeeopbSKbq5Rpeelso=
|
||||||
@ -652,7 +652,6 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP
|
|||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
|
||||||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
@ -714,8 +713,12 @@ github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//J
|
|||||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
|
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
|
||||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
|
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
|
||||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
|
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
|
||||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM=
|
||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y=
|
||||||
|
github.com/olekukonko/ll v0.0.9 h1:Y+1YqDfVkqMWuEQMclsF9HUR5+a82+dxJuL1HHSRpxI=
|
||||||
|
github.com/olekukonko/ll v0.0.9/go.mod h1:En+sEW0JNETl26+K8eZ6/W4UQ7CYSrrgg/EdIYT2H8g=
|
||||||
|
github.com/olekukonko/tablewriter v1.0.8 h1:f6wJzHg4QUtJdvrVPKco4QTrAylgaU0+b9br/lJxEiQ=
|
||||||
|
github.com/olekukonko/tablewriter v1.0.8/go.mod h1:H428M+HzoUXC6JU2Abj9IT9ooRmdq9CxuDmKMtrOCMs=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||||
@ -760,8 +763,8 @@ github.com/projectdiscovery/asnmap v1.1.1 h1:ImJiKIaACOT7HPx4Pabb5dksolzaFYsD1kI
|
|||||||
github.com/projectdiscovery/asnmap v1.1.1/go.mod h1:QT7jt9nQanj+Ucjr9BqGr1Q2veCCKSAVyUzLXfEcQ60=
|
github.com/projectdiscovery/asnmap v1.1.1/go.mod h1:QT7jt9nQanj+Ucjr9BqGr1Q2veCCKSAVyUzLXfEcQ60=
|
||||||
github.com/projectdiscovery/blackrock v0.0.1 h1:lHQqhaaEFjgf5WkuItbpeCZv2DUIE45k0VbGJyft6LQ=
|
github.com/projectdiscovery/blackrock v0.0.1 h1:lHQqhaaEFjgf5WkuItbpeCZv2DUIE45k0VbGJyft6LQ=
|
||||||
github.com/projectdiscovery/blackrock v0.0.1/go.mod h1:ANUtjDfaVrqB453bzToU+YB4cUbvBRpLvEwoWIwlTss=
|
github.com/projectdiscovery/blackrock v0.0.1/go.mod h1:ANUtjDfaVrqB453bzToU+YB4cUbvBRpLvEwoWIwlTss=
|
||||||
github.com/projectdiscovery/cdncheck v1.1.15 h1:rRs3LW2MP7V8QeONVRYce6RhDcWp83O+AWmt+QQ4mBM=
|
github.com/projectdiscovery/cdncheck v1.1.26 h1:R6JxzU3ptGNrzzvS261xHWssDt9GlOCWXkuet1huOyA=
|
||||||
github.com/projectdiscovery/cdncheck v1.1.15/go.mod h1:dFEGsG0qAJY0AaRr2N1BY0OtZiTxS4kYeT5+OkF8t1U=
|
github.com/projectdiscovery/cdncheck v1.1.26/go.mod h1:dFEGsG0qAJY0AaRr2N1BY0OtZiTxS4kYeT5+OkF8t1U=
|
||||||
github.com/projectdiscovery/clistats v0.1.1 h1:8mwbdbwTU4aT88TJvwIzTpiNeow3XnAB72JIg66c8wE=
|
github.com/projectdiscovery/clistats v0.1.1 h1:8mwbdbwTU4aT88TJvwIzTpiNeow3XnAB72JIg66c8wE=
|
||||||
github.com/projectdiscovery/clistats v0.1.1/go.mod h1:4LtTC9Oy//RiuT1+76MfTg8Hqs7FQp1JIGBM3nHK6a0=
|
github.com/projectdiscovery/clistats v0.1.1/go.mod h1:4LtTC9Oy//RiuT1+76MfTg8Hqs7FQp1JIGBM3nHK6a0=
|
||||||
github.com/projectdiscovery/dsl v0.5.0 h1:3HHY14FNmdwWXq3pi9dd8JjUHQzskZjLD/pZKVx5Vi4=
|
github.com/projectdiscovery/dsl v0.5.0 h1:3HHY14FNmdwWXq3pi9dd8JjUHQzskZjLD/pZKVx5Vi4=
|
||||||
@ -808,8 +811,8 @@ github.com/projectdiscovery/rdap v0.9.0 h1:wPhHx5pQ2QI+WGhyNb2PjhTl0NtB39Nk7YFZ9
|
|||||||
github.com/projectdiscovery/rdap v0.9.0/go.mod h1:zk4yrJFQ2Hy36Aqk+DvotYQxYAeALaCJ5ORySkff36Q=
|
github.com/projectdiscovery/rdap v0.9.0/go.mod h1:zk4yrJFQ2Hy36Aqk+DvotYQxYAeALaCJ5ORySkff36Q=
|
||||||
github.com/projectdiscovery/retryabledns v1.0.103 h1:rPnoMTK+CXLbO8kT7ODtwbhyQGAUpJsqhVq8AAvu1bs=
|
github.com/projectdiscovery/retryabledns v1.0.103 h1:rPnoMTK+CXLbO8kT7ODtwbhyQGAUpJsqhVq8AAvu1bs=
|
||||||
github.com/projectdiscovery/retryabledns v1.0.103/go.mod h1:sfu91YrZkb8Ccvij8YDTV96cQt69IPqnfa+OEFUke1o=
|
github.com/projectdiscovery/retryabledns v1.0.103/go.mod h1:sfu91YrZkb8Ccvij8YDTV96cQt69IPqnfa+OEFUke1o=
|
||||||
github.com/projectdiscovery/retryablehttp-go v1.0.116 h1:yjgT5q6lGkZ7gkuuHe5wm2mmq9tE5t23PSk6sz3F6/E=
|
github.com/projectdiscovery/retryablehttp-go v1.0.117 h1:xU9H2ONb9iG25Sm4eCinDhb4kt/s542BomUZAx4CGEs=
|
||||||
github.com/projectdiscovery/retryablehttp-go v1.0.116/go.mod h1:GUMjLgc9hJtIzx34igabPtem98ewhq2xEG8TZmhefog=
|
github.com/projectdiscovery/retryablehttp-go v1.0.117/go.mod h1:pAQWFh6lg9Gmno5zrQxbfuAbc9OvIugl5P9kaoXztgM=
|
||||||
github.com/projectdiscovery/sarif v0.0.1 h1:C2Tyj0SGOKbCLgHrx83vaE6YkzXEVrMXYRGLkKCr/us=
|
github.com/projectdiscovery/sarif v0.0.1 h1:C2Tyj0SGOKbCLgHrx83vaE6YkzXEVrMXYRGLkKCr/us=
|
||||||
github.com/projectdiscovery/sarif v0.0.1/go.mod h1:cEYlDu8amcPf6b9dSakcz2nNnJsoz4aR6peERwV+wuQ=
|
github.com/projectdiscovery/sarif v0.0.1/go.mod h1:cEYlDu8amcPf6b9dSakcz2nNnJsoz4aR6peERwV+wuQ=
|
||||||
github.com/projectdiscovery/stringsutil v0.0.2 h1:uzmw3IVLJSMW1kEg8eCStG/cGbYYZAja8BH3LqqJXMA=
|
github.com/projectdiscovery/stringsutil v0.0.2 h1:uzmw3IVLJSMW1kEg8eCStG/cGbYYZAja8BH3LqqJXMA=
|
||||||
@ -822,8 +825,8 @@ github.com/projectdiscovery/useragent v0.0.101 h1:8A+XOJ/nIH+WqW8ogLxJ/psemGp8AT
|
|||||||
github.com/projectdiscovery/useragent v0.0.101/go.mod h1:RGoRw1BQ/lJnhYMbMpEKjyAAgCaDCr/+GsULo5yEJ2I=
|
github.com/projectdiscovery/useragent v0.0.101/go.mod h1:RGoRw1BQ/lJnhYMbMpEKjyAAgCaDCr/+GsULo5yEJ2I=
|
||||||
github.com/projectdiscovery/utils v0.4.21 h1:yAothTUSF6NwZ9yoC4iGe5gSBrovqKR9JwwW3msxk3Q=
|
github.com/projectdiscovery/utils v0.4.21 h1:yAothTUSF6NwZ9yoC4iGe5gSBrovqKR9JwwW3msxk3Q=
|
||||||
github.com/projectdiscovery/utils v0.4.21/go.mod h1:HJuJFqjB6EmVaDl0ilFPKvLoMaX2GyE6Il2TqKXNs8I=
|
github.com/projectdiscovery/utils v0.4.21/go.mod h1:HJuJFqjB6EmVaDl0ilFPKvLoMaX2GyE6Il2TqKXNs8I=
|
||||||
github.com/projectdiscovery/wappalyzergo v0.2.35 h1:4LN5Paa4l5Z5Q5XYwNlF0cRsx1ojYeY5EELEMpk8grw=
|
github.com/projectdiscovery/wappalyzergo v0.2.36 h1:g/E2gatdYcmLKk9R81vrkq4RdpACpYgN1fuyY3041eE=
|
||||||
github.com/projectdiscovery/wappalyzergo v0.2.35/go.mod h1:L4P6SZuaEgEE2eXbpf4OnSGxjWj9vn6xM15SD78niLA=
|
github.com/projectdiscovery/wappalyzergo v0.2.36/go.mod h1:L4P6SZuaEgEE2eXbpf4OnSGxjWj9vn6xM15SD78niLA=
|
||||||
github.com/projectdiscovery/yamldoc-go v1.0.6 h1:GCEdIRlQjDux28xTXKszM7n3jlMf152d5nqVpVoetas=
|
github.com/projectdiscovery/yamldoc-go v1.0.6 h1:GCEdIRlQjDux28xTXKszM7n3jlMf152d5nqVpVoetas=
|
||||||
github.com/projectdiscovery/yamldoc-go v1.0.6/go.mod h1:R5lWrNzP+7Oyn77NDVPnBsxx2/FyQZBBkIAaSaCQFxw=
|
github.com/projectdiscovery/yamldoc-go v1.0.6/go.mod h1:R5lWrNzP+7Oyn77NDVPnBsxx2/FyQZBBkIAaSaCQFxw=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
@ -1462,8 +1465,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
|
|||||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@ -1491,7 +1494,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||||
|
|||||||
@ -55,10 +55,11 @@ type UploadWriter struct {
|
|||||||
scanName string
|
scanName string
|
||||||
counter atomic.Int32
|
counter atomic.Int32
|
||||||
TeamID string
|
TeamID string
|
||||||
|
Logger *gologger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUploadWriter creates a new upload writer
|
// NewUploadWriter creates a new upload writer
|
||||||
func NewUploadWriter(ctx context.Context, creds *pdcpauth.PDCPCredentials) (*UploadWriter, error) {
|
func NewUploadWriter(ctx context.Context, logger *gologger.Logger, creds *pdcpauth.PDCPCredentials) (*UploadWriter, error) {
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
return nil, fmt.Errorf("no credentials provided")
|
return nil, fmt.Errorf("no credentials provided")
|
||||||
}
|
}
|
||||||
@ -66,6 +67,7 @@ func NewUploadWriter(ctx context.Context, creds *pdcpauth.PDCPCredentials) (*Upl
|
|||||||
creds: creds,
|
creds: creds,
|
||||||
done: make(chan struct{}, 1),
|
done: make(chan struct{}, 1),
|
||||||
TeamID: NoneTeamID,
|
TeamID: NoneTeamID,
|
||||||
|
Logger: logger,
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
@ -128,8 +130,8 @@ func (u *UploadWriter) autoCommit(ctx context.Context, r *io.PipeReader) {
|
|||||||
// continuously read from the reader and send to channel
|
// continuously read from the reader and send to channel
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
}()
|
}()
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
for {
|
for {
|
||||||
data, err := reader.ReadString('\n')
|
data, err := reader.ReadString('\n')
|
||||||
@ -147,9 +149,9 @@ func (u *UploadWriter) autoCommit(ctx context.Context, r *io.PipeReader) {
|
|||||||
close(u.done)
|
close(u.done)
|
||||||
// if no scanid is generated no results were uploaded
|
// if no scanid is generated no results were uploaded
|
||||||
if u.scanID == "" {
|
if u.scanID == "" {
|
||||||
gologger.Verbose().Msgf("Scan results upload to cloud skipped, no results found to upload")
|
u.Logger.Verbose().Msgf("Scan results upload to cloud skipped, no results found to upload")
|
||||||
} else {
|
} else {
|
||||||
gologger.Info().Msgf("%v Scan results uploaded to cloud, you can view scan results at %v", u.counter.Load(), getScanDashBoardURL(u.scanID, u.TeamID))
|
u.Logger.Info().Msgf("%v Scan results uploaded to cloud, you can view scan results at %v", u.counter.Load(), getScanDashBoardURL(u.scanID, u.TeamID))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// temporary buffer to store the results
|
// temporary buffer to store the results
|
||||||
@ -162,7 +164,7 @@ func (u *UploadWriter) autoCommit(ctx context.Context, r *io.PipeReader) {
|
|||||||
// flush before exit
|
// flush before exit
|
||||||
if buff.Len() > 0 {
|
if buff.Len() > 0 {
|
||||||
if err := u.uploadChunk(buff); err != nil {
|
if err := u.uploadChunk(buff); err != nil {
|
||||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
u.Logger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -170,14 +172,14 @@ func (u *UploadWriter) autoCommit(ctx context.Context, r *io.PipeReader) {
|
|||||||
// flush the buffer
|
// flush the buffer
|
||||||
if buff.Len() > 0 {
|
if buff.Len() > 0 {
|
||||||
if err := u.uploadChunk(buff); err != nil {
|
if err := u.uploadChunk(buff); err != nil {
|
||||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
u.Logger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case line, ok := <-ch:
|
case line, ok := <-ch:
|
||||||
if !ok {
|
if !ok {
|
||||||
if buff.Len() > 0 {
|
if buff.Len() > 0 {
|
||||||
if err := u.uploadChunk(buff); err != nil {
|
if err := u.uploadChunk(buff); err != nil {
|
||||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
u.Logger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -185,7 +187,7 @@ func (u *UploadWriter) autoCommit(ctx context.Context, r *io.PipeReader) {
|
|||||||
if buff.Len()+len(line) > MaxChunkSize {
|
if buff.Len()+len(line) > MaxChunkSize {
|
||||||
// flush existing buffer
|
// flush existing buffer
|
||||||
if err := u.uploadChunk(buff); err != nil {
|
if err := u.uploadChunk(buff); err != nil {
|
||||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
u.Logger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
buff.WriteString(line)
|
buff.WriteString(line)
|
||||||
@ -202,7 +204,7 @@ func (u *UploadWriter) uploadChunk(buff *bytes.Buffer) error {
|
|||||||
// if successful, reset the buffer
|
// if successful, reset the buffer
|
||||||
buff.Reset()
|
buff.Reset()
|
||||||
// log in verbose mode
|
// log in verbose mode
|
||||||
gologger.Warning().Msgf("Uploaded results chunk, you can view scan results at %v", getScanDashBoardURL(u.scanID, u.TeamID))
|
u.Logger.Warning().Msgf("Uploaded results chunk, you can view scan results at %v", getScanDashBoardURL(u.scanID, u.TeamID))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,8 +218,8 @@ func (u *UploadWriter) upload(data []byte) error {
|
|||||||
return errorutil.NewWithErr(err).Msgf("could not upload results")
|
return errorutil.NewWithErr(err).Msgf("could not upload results")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
}()
|
}()
|
||||||
bin, err := io.ReadAll(resp.Body)
|
bin, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.NewWithErr(err).Msgf("could not get id from response")
|
return errorutil.NewWithErr(err).Msgf("could not get id from response")
|
||||||
@ -260,7 +262,7 @@ func (u *UploadWriter) getRequest(bin []byte) (*retryablehttp.Request, error) {
|
|||||||
if u.scanName != "" && req.Path == uploadEndpoint {
|
if u.scanName != "" && req.Path == uploadEndpoint {
|
||||||
req.Params.Add("name", u.scanName)
|
req.Params.Add("name", u.scanName)
|
||||||
}
|
}
|
||||||
req.URL.Update()
|
req.Update()
|
||||||
|
|
||||||
req.Header.Set(pdcpauth.ApiKeyHeaderName, u.creds.APIKey)
|
req.Header.Set(pdcpauth.ApiKeyHeaderName, u.creds.APIKey)
|
||||||
if u.TeamID != NoneTeamID && u.TeamID != "" {
|
if u.TeamID != NoneTeamID && u.TeamID != "" {
|
||||||
|
|||||||
@ -2,11 +2,11 @@ package runner
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/hmap/store/hybrid"
|
"github.com/projectdiscovery/hmap/store/hybrid"
|
||||||
"github.com/projectdiscovery/httpx/common/httpx"
|
"github.com/projectdiscovery/httpx/common/httpx"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/input/provider"
|
"github.com/projectdiscovery/nuclei/v3/pkg/input/provider"
|
||||||
@ -28,7 +28,7 @@ func (r *Runner) initializeTemplatesHTTPInput() (*hybrid.HybridMap, error) {
|
|||||||
// currently http probing for input mode types is not supported
|
// currently http probing for input mode types is not supported
|
||||||
return hm, nil
|
return hm, nil
|
||||||
}
|
}
|
||||||
gologger.Info().Msgf("Running httpx on input host")
|
r.Logger.Info().Msgf("Running httpx on input host")
|
||||||
|
|
||||||
httpxOptions := httpx.DefaultOptions
|
httpxOptions := httpx.DefaultOptions
|
||||||
if r.options.AliveHttpProxy != "" {
|
if r.options.AliveHttpProxy != "" {
|
||||||
@ -38,7 +38,13 @@ func (r *Runner) initializeTemplatesHTTPInput() (*hybrid.HybridMap, error) {
|
|||||||
}
|
}
|
||||||
httpxOptions.RetryMax = r.options.Retries
|
httpxOptions.RetryMax = r.options.Retries
|
||||||
httpxOptions.Timeout = time.Duration(r.options.Timeout) * time.Second
|
httpxOptions.Timeout = time.Duration(r.options.Timeout) * time.Second
|
||||||
httpxOptions.NetworkPolicy = protocolstate.NetworkPolicy
|
|
||||||
|
dialers := protocolstate.GetDialersWithId(r.options.ExecutionId)
|
||||||
|
if dialers == nil {
|
||||||
|
return nil, fmt.Errorf("dialers not initialized for %s", r.options.ExecutionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
httpxOptions.NetworkPolicy = dialers.NetworkPolicy
|
||||||
httpxClient, err := httpx.New(&httpxOptions)
|
httpxClient, err := httpx.New(&httpxOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not create httpx client")
|
return nil, errors.Wrap(err, "could not create httpx client")
|
||||||
@ -57,7 +63,7 @@ func (r *Runner) initializeTemplatesHTTPInput() (*hybrid.HybridMap, error) {
|
|||||||
|
|
||||||
if r.options.ProbeConcurrency > 0 && swg.Size != r.options.ProbeConcurrency {
|
if r.options.ProbeConcurrency > 0 && swg.Size != r.options.ProbeConcurrency {
|
||||||
if err := swg.Resize(context.Background(), r.options.ProbeConcurrency); err != nil {
|
if err := swg.Resize(context.Background(), r.options.ProbeConcurrency); err != nil {
|
||||||
gologger.Error().Msgf("Could not resize workpool: %s\n", err)
|
r.Logger.Error().Msgf("Could not resize workpool: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,6 +80,6 @@ func (r *Runner) initializeTemplatesHTTPInput() (*hybrid.HybridMap, error) {
|
|||||||
})
|
})
|
||||||
swg.Wait()
|
swg.Wait()
|
||||||
|
|
||||||
gologger.Info().Msgf("Found %d URL from httpx", count.Load())
|
r.Logger.Info().Msgf("Found %d URL from httpx", count.Load())
|
||||||
return hm, nil
|
return hm, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,12 +22,12 @@ import (
|
|||||||
|
|
||||||
type AuthLazyFetchOptions struct {
|
type AuthLazyFetchOptions struct {
|
||||||
TemplateStore *loader.Store
|
TemplateStore *loader.Store
|
||||||
ExecOpts protocols.ExecutorOptions
|
ExecOpts *protocols.ExecutorOptions
|
||||||
OnError func(error)
|
OnError func(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAuthTmplStore create new loader for loading auth templates
|
// GetAuthTmplStore create new loader for loading auth templates
|
||||||
func GetAuthTmplStore(opts types.Options, catalog catalog.Catalog, execOpts protocols.ExecutorOptions) (*loader.Store, error) {
|
func GetAuthTmplStore(opts *types.Options, catalog catalog.Catalog, execOpts *protocols.ExecutorOptions) (*loader.Store, error) {
|
||||||
tmpls := []string{}
|
tmpls := []string{}
|
||||||
for _, file := range opts.SecretsFile {
|
for _, file := range opts.SecretsFile {
|
||||||
data, err := authx.GetTemplatePathsFromSecretFile(file)
|
data, err := authx.GetTemplatePathsFromSecretFile(file)
|
||||||
@ -54,7 +54,7 @@ func GetAuthTmplStore(opts types.Options, catalog catalog.Catalog, execOpts prot
|
|||||||
opts.Protocols = nil
|
opts.Protocols = nil
|
||||||
opts.ExcludeProtocols = nil
|
opts.ExcludeProtocols = nil
|
||||||
opts.IncludeConditions = nil
|
opts.IncludeConditions = nil
|
||||||
cfg := loader.NewConfig(&opts, catalog, execOpts)
|
cfg := loader.NewConfig(opts, catalog, execOpts)
|
||||||
cfg.StoreId = loader.AuthStoreId
|
cfg.StoreId = loader.AuthStoreId
|
||||||
store, err := loader.New(cfg)
|
store, err := loader.New(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -31,7 +31,6 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils/yaml"
|
"github.com/projectdiscovery/nuclei/v3/pkg/utils/yaml"
|
||||||
fileutil "github.com/projectdiscovery/utils/file"
|
fileutil "github.com/projectdiscovery/utils/file"
|
||||||
"github.com/projectdiscovery/utils/generic"
|
"github.com/projectdiscovery/utils/generic"
|
||||||
logutil "github.com/projectdiscovery/utils/log"
|
|
||||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,17 +72,17 @@ func ParseOptions(options *types.Options) {
|
|||||||
vardump.Limit = options.VarDumpLimit
|
vardump.Limit = options.VarDumpLimit
|
||||||
}
|
}
|
||||||
if options.ShowActions {
|
if options.ShowActions {
|
||||||
gologger.Info().Msgf("Showing available headless actions: ")
|
options.Logger.Info().Msgf("Showing available headless actions: ")
|
||||||
for action := range engine.ActionStringToAction {
|
for action := range engine.ActionStringToAction {
|
||||||
gologger.Print().Msgf("\t%s", action)
|
options.Logger.Print().Msgf("\t%s", action)
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultProfilesPath := filepath.Join(config.DefaultConfig.GetTemplateDir(), "profiles")
|
defaultProfilesPath := filepath.Join(config.DefaultConfig.GetTemplateDir(), "profiles")
|
||||||
if options.ListTemplateProfiles {
|
if options.ListTemplateProfiles {
|
||||||
gologger.Print().Msgf(
|
options.Logger.Print().Msgf(
|
||||||
"\nListing available %v nuclei template profiles for %v",
|
"Listing available %v nuclei template profiles for %v",
|
||||||
config.DefaultConfig.TemplateVersion,
|
config.DefaultConfig.TemplateVersion,
|
||||||
config.DefaultConfig.TemplatesDirectory,
|
config.DefaultConfig.TemplatesDirectory,
|
||||||
)
|
)
|
||||||
@ -95,23 +94,23 @@ func ParseOptions(options *types.Options) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if profileRelPath, err := filepath.Rel(templatesRootDir, iterItem); err == nil {
|
if profileRelPath, err := filepath.Rel(templatesRootDir, iterItem); err == nil {
|
||||||
gologger.Print().Msgf("%s (%s)\n", profileRelPath, strings.TrimSuffix(filepath.Base(iterItem), ext))
|
options.Logger.Print().Msgf("%s (%s)\n", profileRelPath, strings.TrimSuffix(filepath.Base(iterItem), ext))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("%s\n", err)
|
options.Logger.Error().Msgf("%s\n", err)
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
if options.StoreResponseDir != DefaultDumpTrafficOutputFolder && !options.StoreResponse {
|
if options.StoreResponseDir != DefaultDumpTrafficOutputFolder && !options.StoreResponse {
|
||||||
gologger.Debug().Msgf("Store response directory specified, enabling \"store-resp\" flag automatically\n")
|
options.Logger.Debug().Msgf("Store response directory specified, enabling \"store-resp\" flag automatically\n")
|
||||||
options.StoreResponse = true
|
options.StoreResponse = true
|
||||||
}
|
}
|
||||||
// Validate the options passed by the user and if any
|
// Validate the options passed by the user and if any
|
||||||
// invalid options have been used, exit.
|
// invalid options have been used, exit.
|
||||||
if err := ValidateOptions(options); err != nil {
|
if err := ValidateOptions(options); err != nil {
|
||||||
gologger.Fatal().Msgf("Program exiting: %s\n", err)
|
options.Logger.Fatal().Msgf("Program exiting: %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load the resolvers if user asked for them
|
// Load the resolvers if user asked for them
|
||||||
@ -119,7 +118,7 @@ func ParseOptions(options *types.Options) {
|
|||||||
|
|
||||||
err := protocolinit.Init(options)
|
err := protocolinit.Init(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("Could not initialize protocols: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not initialize protocols: %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set GitHub token in env variable. runner.getGHClientWithToken() reads token from env
|
// Set GitHub token in env variable. runner.getGHClientWithToken() reads token from env
|
||||||
@ -170,7 +169,7 @@ func ValidateOptions(options *types.Options) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if options.Validate {
|
if options.Validate {
|
||||||
validateTemplatePaths(config.DefaultConfig.TemplatesDirectory, options.Templates, options.Workflows)
|
validateTemplatePaths(options.Logger, config.DefaultConfig.TemplatesDirectory, options.Templates, options.Workflows)
|
||||||
}
|
}
|
||||||
if options.DAST {
|
if options.DAST {
|
||||||
if err := validateDASTOptions(options); err != nil {
|
if err := validateDASTOptions(options); err != nil {
|
||||||
@ -183,7 +182,7 @@ func ValidateOptions(options *types.Options) error {
|
|||||||
if generic.EqualsAny("", options.ClientCertFile, options.ClientKeyFile, options.ClientCAFile) {
|
if generic.EqualsAny("", options.ClientCertFile, options.ClientKeyFile, options.ClientCAFile) {
|
||||||
return errors.New("if a client certification option is provided, then all three must be provided")
|
return errors.New("if a client certification option is provided, then all three must be provided")
|
||||||
}
|
}
|
||||||
validateCertificatePaths(options.ClientCertFile, options.ClientKeyFile, options.ClientCAFile)
|
validateCertificatePaths(options.Logger, options.ClientCertFile, options.ClientKeyFile, options.ClientCAFile)
|
||||||
}
|
}
|
||||||
// Verify AWS secrets are passed if a S3 template bucket is passed
|
// Verify AWS secrets are passed if a S3 template bucket is passed
|
||||||
if options.AwsBucketName != "" && options.UpdateTemplates && !options.AwsTemplateDisableDownload {
|
if options.AwsBucketName != "" && options.UpdateTemplates && !options.AwsTemplateDisableDownload {
|
||||||
@ -306,8 +305,8 @@ func createReportingOptions(options *types.Options) (*reporting.Options, error)
|
|||||||
return nil, errors.Wrap(err, "could not open reporting config file")
|
return nil, errors.Wrap(err, "could not open reporting config file")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := yaml.DecodeAndValidate(file, reportingOptions); err != nil {
|
if err := yaml.DecodeAndValidate(file, reportingOptions); err != nil {
|
||||||
return nil, errors.Wrap(err, "could not parse reporting config file")
|
return nil, errors.Wrap(err, "could not parse reporting config file")
|
||||||
@ -345,32 +344,33 @@ func createReportingOptions(options *types.Options) (*reporting.Options, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
reportingOptions.OmitRaw = options.OmitRawRequests
|
reportingOptions.OmitRaw = options.OmitRawRequests
|
||||||
|
reportingOptions.ExecutionId = options.ExecutionId
|
||||||
return reportingOptions, nil
|
return reportingOptions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureOutput configures the output logging levels to be displayed on the screen
|
// configureOutput configures the output logging levels to be displayed on the screen
|
||||||
func configureOutput(options *types.Options) {
|
func configureOutput(options *types.Options) {
|
||||||
if options.NoColor {
|
if options.NoColor {
|
||||||
gologger.DefaultLogger.SetFormatter(formatter.NewCLI(true))
|
options.Logger.SetFormatter(formatter.NewCLI(true))
|
||||||
}
|
}
|
||||||
// If the user desires verbose output, show verbose output
|
// If the user desires verbose output, show verbose output
|
||||||
if options.Debug || options.DebugRequests || options.DebugResponse {
|
if options.Debug || options.DebugRequests || options.DebugResponse {
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelDebug)
|
options.Logger.SetMaxLevel(levels.LevelDebug)
|
||||||
}
|
}
|
||||||
// Debug takes precedence before verbose
|
// Debug takes precedence before verbose
|
||||||
// because debug is a lower logging level.
|
// because debug is a lower logging level.
|
||||||
if options.Verbose || options.Validate {
|
if options.Verbose || options.Validate {
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelVerbose)
|
options.Logger.SetMaxLevel(levels.LevelVerbose)
|
||||||
}
|
}
|
||||||
if options.NoColor {
|
if options.NoColor {
|
||||||
gologger.DefaultLogger.SetFormatter(formatter.NewCLI(true))
|
options.Logger.SetFormatter(formatter.NewCLI(true))
|
||||||
}
|
}
|
||||||
if options.Silent {
|
if options.Silent {
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelSilent)
|
options.Logger.SetMaxLevel(levels.LevelSilent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// disable standard logger (ref: https://github.com/golang/go/issues/19895)
|
// disable standard logger (ref: https://github.com/golang/go/issues/19895)
|
||||||
logutil.DisableDefaultLogger()
|
// logutil.DisableDefaultLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadResolvers loads resolvers from both user-provided flags and file
|
// loadResolvers loads resolvers from both user-provided flags and file
|
||||||
@ -381,11 +381,11 @@ func loadResolvers(options *types.Options) {
|
|||||||
|
|
||||||
file, err := os.Open(options.ResolversFile)
|
file, err := os.Open(options.ResolversFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Fatal().Msgf("Could not open resolvers file: %s\n", err)
|
options.Logger.Fatal().Msgf("Could not open resolvers file: %s\n", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
@ -401,7 +401,7 @@ func loadResolvers(options *types.Options) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateTemplatePaths(templatesDirectory string, templatePaths, workflowPaths []string) {
|
func validateTemplatePaths(logger *gologger.Logger, templatesDirectory string, templatePaths, workflowPaths []string) {
|
||||||
allGivenTemplatePaths := append(templatePaths, workflowPaths...)
|
allGivenTemplatePaths := append(templatePaths, workflowPaths...)
|
||||||
for _, templatePath := range allGivenTemplatePaths {
|
for _, templatePath := range allGivenTemplatePaths {
|
||||||
if templatesDirectory != templatePath && filepath.IsAbs(templatePath) {
|
if templatesDirectory != templatePath && filepath.IsAbs(templatePath) {
|
||||||
@ -409,7 +409,7 @@ func validateTemplatePaths(templatesDirectory string, templatePaths, workflowPat
|
|||||||
if err == nil && fileInfo.IsDir() {
|
if err == nil && fileInfo.IsDir() {
|
||||||
relativizedPath, err2 := filepath.Rel(templatesDirectory, templatePath)
|
relativizedPath, err2 := filepath.Rel(templatesDirectory, templatePath)
|
||||||
if err2 != nil || (len(relativizedPath) >= 2 && relativizedPath[:2] == "..") {
|
if err2 != nil || (len(relativizedPath) >= 2 && relativizedPath[:2] == "..") {
|
||||||
gologger.Warning().Msgf("The given path (%s) is outside the default template directory path (%s)! "+
|
logger.Warning().Msgf("The given path (%s) is outside the default template directory path (%s)! "+
|
||||||
"Referenced sub-templates with relative paths in workflows will be resolved against the default template directory.", templatePath, templatesDirectory)
|
"Referenced sub-templates with relative paths in workflows will be resolved against the default template directory.", templatePath, templatesDirectory)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -418,12 +418,12 @@ func validateTemplatePaths(templatesDirectory string, templatePaths, workflowPat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateCertificatePaths(certificatePaths ...string) {
|
func validateCertificatePaths(logger *gologger.Logger, certificatePaths ...string) {
|
||||||
for _, certificatePath := range certificatePaths {
|
for _, certificatePath := range certificatePaths {
|
||||||
if !fileutil.FileExists(certificatePath) {
|
if !fileutil.FileExists(certificatePath) {
|
||||||
// The provided path to the PEM certificate does not exist for the client authentication. As this is
|
// The provided path to the PEM certificate does not exist for the client authentication. As this is
|
||||||
// required for successful authentication, log and return an error
|
// required for successful authentication, log and return an error
|
||||||
gologger.Fatal().Msgf("The given path (%s) to the certificate does not exist!", certificatePath)
|
logger.Fatal().Msgf("The given path (%s) to the certificate does not exist!", certificatePath)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -450,7 +450,7 @@ func readEnvInputVars(options *types.Options) {
|
|||||||
// Attempt to convert the repo ID to an integer
|
// Attempt to convert the repo ID to an integer
|
||||||
repoIDInt, err := strconv.Atoi(repoID)
|
repoIDInt, err := strconv.Atoi(repoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("Invalid GitLab template repository ID: %s", repoID)
|
options.Logger.Warning().Msgf("Invalid GitLab template repository ID: %s", repoID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -7,7 +7,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
errorutil "github.com/projectdiscovery/utils/errors"
|
errorutil "github.com/projectdiscovery/utils/errors"
|
||||||
fileutil "github.com/projectdiscovery/utils/file"
|
fileutil "github.com/projectdiscovery/utils/file"
|
||||||
@ -31,8 +30,8 @@ func loadProxyServers(options *types.Options) error {
|
|||||||
return fmt.Errorf("could not open proxy file: %w", err)
|
return fmt.Errorf("could not open proxy file: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
proxy := scanner.Text()
|
proxy := scanner.Text()
|
||||||
@ -58,11 +57,11 @@ func loadProxyServers(options *types.Options) error {
|
|||||||
}
|
}
|
||||||
switch proxyURL.Scheme {
|
switch proxyURL.Scheme {
|
||||||
case proxyutils.HTTP, proxyutils.HTTPS:
|
case proxyutils.HTTP, proxyutils.HTTPS:
|
||||||
gologger.Verbose().Msgf("Using %s as proxy server", proxyURL.String())
|
options.Logger.Verbose().Msgf("Using %s as proxy server", proxyURL.String())
|
||||||
options.AliveHttpProxy = proxyURL.String()
|
options.AliveHttpProxy = proxyURL.String()
|
||||||
case proxyutils.SOCKS5:
|
case proxyutils.SOCKS5:
|
||||||
options.AliveSocksProxy = proxyURL.String()
|
options.AliveSocksProxy = proxyURL.String()
|
||||||
gologger.Verbose().Msgf("Using %s as socket proxy server", proxyURL.String())
|
options.Logger.Verbose().Msgf("Using %s as socket proxy server", proxyURL.String())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/projectdiscovery/gologger"
|
||||||
"github.com/projectdiscovery/nuclei/v3/internal/pdcp"
|
"github.com/projectdiscovery/nuclei/v3/internal/pdcp"
|
||||||
"github.com/projectdiscovery/nuclei/v3/internal/server"
|
"github.com/projectdiscovery/nuclei/v3/internal/server"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/authprovider"
|
"github.com/projectdiscovery/nuclei/v3/pkg/authprovider"
|
||||||
@ -32,7 +33,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectdiscovery/ratelimit"
|
"github.com/projectdiscovery/ratelimit"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/internal/colorizer"
|
"github.com/projectdiscovery/nuclei/v3/internal/colorizer"
|
||||||
"github.com/projectdiscovery/nuclei/v3/internal/httpapi"
|
"github.com/projectdiscovery/nuclei/v3/internal/httpapi"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog"
|
||||||
@ -95,6 +95,7 @@ type Runner struct {
|
|||||||
inputProvider provider.InputProvider
|
inputProvider provider.InputProvider
|
||||||
fuzzFrequencyCache *frequency.Tracker
|
fuzzFrequencyCache *frequency.Tracker
|
||||||
httpStats *outputstats.Tracker
|
httpStats *outputstats.Tracker
|
||||||
|
Logger *gologger.Logger
|
||||||
|
|
||||||
//general purpose temporary directory
|
//general purpose temporary directory
|
||||||
tmpDir string
|
tmpDir string
|
||||||
@ -108,10 +109,11 @@ type Runner struct {
|
|||||||
func New(options *types.Options) (*Runner, error) {
|
func New(options *types.Options) (*Runner, error) {
|
||||||
runner := &Runner{
|
runner := &Runner{
|
||||||
options: options,
|
options: options,
|
||||||
|
Logger: options.Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.HealthCheck {
|
if options.HealthCheck {
|
||||||
gologger.Print().Msgf("%s\n", DoHealthCheck(options))
|
runner.Logger.Print().Msgf("%s\n", DoHealthCheck(options))
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,14 +121,14 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
if config.DefaultConfig.CanCheckForUpdates() {
|
if config.DefaultConfig.CanCheckForUpdates() {
|
||||||
if err := installer.NucleiVersionCheck(); err != nil {
|
if err := installer.NucleiVersionCheck(); err != nil {
|
||||||
if options.Verbose || options.Debug {
|
if options.Verbose || options.Debug {
|
||||||
gologger.Error().Msgf("nuclei version check failed got: %s\n", err)
|
runner.Logger.Error().Msgf("nuclei version check failed got: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for custom template updates and update if available
|
// check for custom template updates and update if available
|
||||||
ctm, err := customtemplates.NewCustomTemplatesManager(options)
|
ctm, err := customtemplates.NewCustomTemplatesManager(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Label("custom-templates").Msgf("Failed to create custom templates manager: %s\n", err)
|
runner.Logger.Error().Label("custom-templates").Msgf("Failed to create custom templates manager: %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for template updates and update if available.
|
// Check for template updates and update if available.
|
||||||
@ -136,15 +138,15 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
DisablePublicTemplates: options.PublicTemplateDisableDownload,
|
DisablePublicTemplates: options.PublicTemplateDisableDownload,
|
||||||
}
|
}
|
||||||
if err := tm.FreshInstallIfNotExists(); err != nil {
|
if err := tm.FreshInstallIfNotExists(); err != nil {
|
||||||
gologger.Warning().Msgf("failed to install nuclei templates: %s\n", err)
|
runner.Logger.Warning().Msgf("failed to install nuclei templates: %s\n", err)
|
||||||
}
|
}
|
||||||
if err := tm.UpdateIfOutdated(); err != nil {
|
if err := tm.UpdateIfOutdated(); err != nil {
|
||||||
gologger.Warning().Msgf("failed to update nuclei templates: %s\n", err)
|
runner.Logger.Warning().Msgf("failed to update nuclei templates: %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.DefaultConfig.NeedsIgnoreFileUpdate() {
|
if config.DefaultConfig.NeedsIgnoreFileUpdate() {
|
||||||
if err := installer.UpdateIgnoreFile(); err != nil {
|
if err := installer.UpdateIgnoreFile(); err != nil {
|
||||||
gologger.Warning().Msgf("failed to update nuclei ignore file: %s\n", err)
|
runner.Logger.Warning().Msgf("failed to update nuclei ignore file: %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,7 +154,7 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
// we automatically check for updates unless explicitly disabled
|
// we automatically check for updates unless explicitly disabled
|
||||||
// this print statement is only to inform the user that there are no updates
|
// this print statement is only to inform the user that there are no updates
|
||||||
if !config.DefaultConfig.NeedsTemplateUpdate() {
|
if !config.DefaultConfig.NeedsTemplateUpdate() {
|
||||||
gologger.Info().Msgf("No new updates found for nuclei templates")
|
runner.Logger.Info().Msgf("No new updates found for nuclei templates")
|
||||||
}
|
}
|
||||||
// manually trigger update of custom templates
|
// manually trigger update of custom templates
|
||||||
if ctm != nil {
|
if ctm != nil {
|
||||||
@ -161,20 +163,25 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parser := templates.NewParser()
|
if op, ok := options.Parser.(*templates.Parser); ok {
|
||||||
|
// Enable passing in an existing parser instance
|
||||||
if options.Validate {
|
// This uses a type assertion to avoid an import loop
|
||||||
parser.ShouldValidate = true
|
runner.parser = op
|
||||||
|
} else {
|
||||||
|
parser := templates.NewParser()
|
||||||
|
if options.Validate {
|
||||||
|
parser.ShouldValidate = true
|
||||||
|
}
|
||||||
|
// TODO: refactor to pass options reference globally without cycles
|
||||||
|
parser.NoStrictSyntax = options.NoStrictSyntax
|
||||||
|
runner.parser = parser
|
||||||
}
|
}
|
||||||
// TODO: refactor to pass options reference globally without cycles
|
|
||||||
parser.NoStrictSyntax = options.NoStrictSyntax
|
|
||||||
runner.parser = parser
|
|
||||||
|
|
||||||
yaml.StrictSyntax = !options.NoStrictSyntax
|
yaml.StrictSyntax = !options.NoStrictSyntax
|
||||||
|
|
||||||
if options.Headless {
|
if options.Headless {
|
||||||
if engine.MustDisableSandbox() {
|
if engine.MustDisableSandbox() {
|
||||||
gologger.Warning().Msgf("The current platform and privileged user will run the browser without sandbox\n")
|
runner.Logger.Warning().Msgf("The current platform and privileged user will run the browser without sandbox\n")
|
||||||
}
|
}
|
||||||
browser, err := engine.New(options)
|
browser, err := engine.New(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -226,11 +233,11 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
|
|
||||||
if options.HttpApiEndpoint != "" {
|
if options.HttpApiEndpoint != "" {
|
||||||
apiServer := httpapi.New(options.HttpApiEndpoint, options)
|
apiServer := httpapi.New(options.HttpApiEndpoint, options)
|
||||||
gologger.Info().Msgf("Listening api endpoint on: %s", options.HttpApiEndpoint)
|
runner.Logger.Info().Msgf("Listening api endpoint on: %s", options.HttpApiEndpoint)
|
||||||
runner.httpApiEndpoint = apiServer
|
runner.httpApiEndpoint = apiServer
|
||||||
go func() {
|
go func() {
|
||||||
if err := apiServer.Start(); err != nil {
|
if err := apiServer.Start(); err != nil {
|
||||||
gologger.Error().Msgf("Failed to start API server: %s", err)
|
runner.Logger.Error().Msgf("Failed to start API server: %s", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -284,7 +291,7 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
// create the resume configuration structure
|
// create the resume configuration structure
|
||||||
resumeCfg := types.NewResumeCfg()
|
resumeCfg := types.NewResumeCfg()
|
||||||
if runner.options.ShouldLoadResume() {
|
if runner.options.ShouldLoadResume() {
|
||||||
gologger.Info().Msg("Resuming from save checkpoint")
|
runner.Logger.Info().Msg("Resuming from save checkpoint")
|
||||||
file, err := os.ReadFile(runner.options.Resume)
|
file, err := os.ReadFile(runner.options.Resume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -326,6 +333,7 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
opts := interactsh.DefaultOptions(runner.output, runner.issuesClient, runner.progress)
|
opts := interactsh.DefaultOptions(runner.output, runner.issuesClient, runner.progress)
|
||||||
|
opts.Logger = runner.Logger
|
||||||
opts.Debug = runner.options.Debug
|
opts.Debug = runner.options.Debug
|
||||||
opts.NoColor = runner.options.NoColor
|
opts.NoColor = runner.options.NoColor
|
||||||
if options.InteractshURL != "" {
|
if options.InteractshURL != "" {
|
||||||
@ -355,13 +363,13 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
}
|
}
|
||||||
interactshClient, err := interactsh.New(opts)
|
interactshClient, err := interactsh.New(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("Could not create interactsh client: %s", err)
|
runner.Logger.Error().Msgf("Could not create interactsh client: %s", err)
|
||||||
} else {
|
} else {
|
||||||
runner.interactsh = interactshClient
|
runner.interactsh = interactshClient
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.RateLimitMinute > 0 {
|
if options.RateLimitMinute > 0 {
|
||||||
gologger.Print().Msgf("[%v] %v", aurora.BrightYellow("WRN"), "rate limit per minute is deprecated - use rate-limit-duration")
|
runner.Logger.Print().Msgf("[%v] %v", aurora.BrightYellow("WRN"), "rate limit per minute is deprecated - use rate-limit-duration")
|
||||||
options.RateLimit = options.RateLimitMinute
|
options.RateLimit = options.RateLimitMinute
|
||||||
options.RateLimitDuration = time.Minute
|
options.RateLimitDuration = time.Minute
|
||||||
}
|
}
|
||||||
@ -382,7 +390,7 @@ func New(options *types.Options) (*Runner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runStandardEnumeration runs standard enumeration
|
// runStandardEnumeration runs standard enumeration
|
||||||
func (r *Runner) runStandardEnumeration(executerOpts protocols.ExecutorOptions, store *loader.Store, engine *core.Engine) (*atomic.Bool, error) {
|
func (r *Runner) runStandardEnumeration(executerOpts *protocols.ExecutorOptions, store *loader.Store, engine *core.Engine) (*atomic.Bool, error) {
|
||||||
if r.options.AutomaticScan {
|
if r.options.AutomaticScan {
|
||||||
return r.executeSmartWorkflowInput(executerOpts, store, engine)
|
return r.executeSmartWorkflowInput(executerOpts, store, engine)
|
||||||
}
|
}
|
||||||
@ -413,7 +421,7 @@ func (r *Runner) Close() {
|
|||||||
if r.inputProvider != nil {
|
if r.inputProvider != nil {
|
||||||
r.inputProvider.Close()
|
r.inputProvider.Close()
|
||||||
}
|
}
|
||||||
protocolinit.Close()
|
protocolinit.Close(r.options.ExecutionId)
|
||||||
if r.pprofServer != nil {
|
if r.pprofServer != nil {
|
||||||
r.pprofServer.Stop()
|
r.pprofServer.Stop()
|
||||||
}
|
}
|
||||||
@ -440,22 +448,21 @@ func (r *Runner) setupPDCPUpload(writer output.Writer) output.Writer {
|
|||||||
r.options.EnableCloudUpload = true
|
r.options.EnableCloudUpload = true
|
||||||
}
|
}
|
||||||
if !r.options.EnableCloudUpload && !EnableCloudUpload {
|
if !r.options.EnableCloudUpload && !EnableCloudUpload {
|
||||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] Scan results upload to cloud is disabled.", r.colorizer.BrightYellow("WRN"))
|
r.pdcpUploadErrMsg = "Scan results upload to cloud is disabled."
|
||||||
return writer
|
return writer
|
||||||
}
|
}
|
||||||
color := aurora.NewAurora(!r.options.NoColor)
|
|
||||||
h := &pdcpauth.PDCPCredHandler{}
|
h := &pdcpauth.PDCPCredHandler{}
|
||||||
creds, err := h.GetCreds()
|
creds, err := h.GetCreds()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != pdcpauth.ErrNoCreds && !HideAutoSaveMsg {
|
if err != pdcpauth.ErrNoCreds && !HideAutoSaveMsg {
|
||||||
gologger.Verbose().Msgf("Could not get credentials for cloud upload: %s\n", err)
|
r.Logger.Verbose().Msgf("Could not get credentials for cloud upload: %s\n", err)
|
||||||
}
|
}
|
||||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] To view results on Cloud Dashboard, Configure API key from %v", color.BrightYellow("WRN"), pdcpauth.DashBoardURL)
|
r.pdcpUploadErrMsg = fmt.Sprintf("To view results on Cloud Dashboard, configure API key from %v", pdcpauth.DashBoardURL)
|
||||||
return writer
|
return writer
|
||||||
}
|
}
|
||||||
uploadWriter, err := pdcp.NewUploadWriter(context.Background(), creds)
|
uploadWriter, err := pdcp.NewUploadWriter(context.Background(), r.Logger, creds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] PDCP (%v) Auto-Save Failed: %s\n", color.BrightYellow("WRN"), pdcpauth.DashBoardURL, err)
|
r.pdcpUploadErrMsg = fmt.Sprintf("PDCP (%v) Auto-Save Failed: %s\n", pdcpauth.DashBoardURL, err)
|
||||||
return writer
|
return writer
|
||||||
}
|
}
|
||||||
if r.options.ScanID != "" {
|
if r.options.ScanID != "" {
|
||||||
@ -491,6 +498,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
Parser: r.parser,
|
Parser: r.parser,
|
||||||
TemporaryDirectory: r.tmpDir,
|
TemporaryDirectory: r.tmpDir,
|
||||||
FuzzStatsDB: r.fuzzStats,
|
FuzzStatsDB: r.fuzzStats,
|
||||||
|
Logger: r.Logger,
|
||||||
}
|
}
|
||||||
dastServer, err := server.New(&server.Options{
|
dastServer, err := server.New(&server.Options{
|
||||||
Address: r.options.DASTServerAddress,
|
Address: r.options.DASTServerAddress,
|
||||||
@ -532,7 +540,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
|
|
||||||
// Create the executor options which will be used throughout the execution
|
// Create the executor options which will be used throughout the execution
|
||||||
// stage by the nuclei engine modules.
|
// stage by the nuclei engine modules.
|
||||||
executorOpts := protocols.ExecutorOptions{
|
executorOpts := &protocols.ExecutorOptions{
|
||||||
Output: r.output,
|
Output: r.output,
|
||||||
Options: r.options,
|
Options: r.options,
|
||||||
Progress: r.progress,
|
Progress: r.progress,
|
||||||
@ -550,6 +558,8 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
Parser: r.parser,
|
Parser: r.parser,
|
||||||
FuzzParamsFrequency: fuzzFreqCache,
|
FuzzParamsFrequency: fuzzFreqCache,
|
||||||
GlobalMatchers: globalmatchers.New(),
|
GlobalMatchers: globalmatchers.New(),
|
||||||
|
DoNotCache: r.options.DoNotCacheTemplates,
|
||||||
|
Logger: r.Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.DefaultConfig.IsDebugArgEnabled(config.DebugExportURLPattern) {
|
if config.DefaultConfig.IsDebugArgEnabled(config.DebugExportURLPattern) {
|
||||||
@ -558,7 +568,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(r.options.SecretsFile) > 0 && !r.options.Validate {
|
if len(r.options.SecretsFile) > 0 && !r.options.Validate {
|
||||||
authTmplStore, err := GetAuthTmplStore(*r.options, r.catalog, executorOpts)
|
authTmplStore, err := GetAuthTmplStore(r.options, r.catalog, executorOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to load dynamic auth templates")
|
return errors.Wrap(err, "failed to load dynamic auth templates")
|
||||||
}
|
}
|
||||||
@ -578,8 +588,8 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
if r.options.ShouldUseHostError() {
|
if r.options.ShouldUseHostError() {
|
||||||
maxHostError := r.options.MaxHostError
|
maxHostError := r.options.MaxHostError
|
||||||
if r.options.TemplateThreads > maxHostError {
|
if r.options.TemplateThreads > maxHostError {
|
||||||
gologger.Print().Msgf("[%v] The concurrency value is higher than max-host-error", r.colorizer.BrightYellow("WRN"))
|
r.Logger.Print().Msgf("[%v] The concurrency value is higher than max-host-error", r.colorizer.BrightYellow("WRN"))
|
||||||
gologger.Info().Msgf("Adjusting max-host-error to the concurrency value: %d", r.options.TemplateThreads)
|
r.Logger.Info().Msgf("Adjusting max-host-error to the concurrency value: %d", r.options.TemplateThreads)
|
||||||
|
|
||||||
maxHostError = r.options.TemplateThreads
|
maxHostError = r.options.TemplateThreads
|
||||||
}
|
}
|
||||||
@ -594,7 +604,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
executorEngine := core.New(r.options)
|
executorEngine := core.New(r.options)
|
||||||
executorEngine.SetExecuterOptions(executorOpts)
|
executorEngine.SetExecuterOptions(executorOpts)
|
||||||
|
|
||||||
workflowLoader, err := parsers.NewLoader(&executorOpts)
|
workflowLoader, err := parsers.NewLoader(executorOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Could not create loader.")
|
return errors.Wrap(err, "Could not create loader.")
|
||||||
}
|
}
|
||||||
@ -633,7 +643,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if stats.GetValue(templates.SyntaxErrorStats) == 0 && stats.GetValue(templates.SyntaxWarningStats) == 0 && stats.GetValue(templates.RuntimeWarningsStats) == 0 {
|
if stats.GetValue(templates.SyntaxErrorStats) == 0 && stats.GetValue(templates.SyntaxWarningStats) == 0 && stats.GetValue(templates.RuntimeWarningsStats) == 0 {
|
||||||
gologger.Info().Msgf("All templates validated successfully\n")
|
r.Logger.Info().Msgf("All templates validated successfully")
|
||||||
} else {
|
} else {
|
||||||
return errors.New("encountered errors while performing template validation")
|
return errors.New("encountered errors while performing template validation")
|
||||||
}
|
}
|
||||||
@ -655,7 +665,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
}
|
}
|
||||||
ret := uncover.GetUncoverTargetsFromMetadata(context.TODO(), store.Templates(), r.options.UncoverField, uncoverOpts)
|
ret := uncover.GetUncoverTargetsFromMetadata(context.TODO(), store.Templates(), r.options.UncoverField, uncoverOpts)
|
||||||
for host := range ret {
|
for host := range ret {
|
||||||
_ = r.inputProvider.SetWithExclusions(host)
|
_ = r.inputProvider.SetWithExclusions(r.options.ExecutionId, host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// display execution info like version , templates used etc
|
// display execution info like version , templates used etc
|
||||||
@ -663,7 +673,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
|
|
||||||
// prefetch secrets if enabled
|
// prefetch secrets if enabled
|
||||||
if executorOpts.AuthProvider != nil && r.options.PreFetchSecrets {
|
if executorOpts.AuthProvider != nil && r.options.PreFetchSecrets {
|
||||||
gologger.Info().Msgf("Pre-fetching secrets from authprovider[s]")
|
r.Logger.Info().Msgf("Pre-fetching secrets from authprovider[s]")
|
||||||
if err := executorOpts.AuthProvider.PreFetchSecrets(); err != nil {
|
if err := executorOpts.AuthProvider.PreFetchSecrets(); err != nil {
|
||||||
return errors.Wrap(err, "could not pre-fetch secrets")
|
return errors.Wrap(err, "could not pre-fetch secrets")
|
||||||
}
|
}
|
||||||
@ -697,7 +707,7 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
if r.dastServer != nil {
|
if r.dastServer != nil {
|
||||||
go func() {
|
go func() {
|
||||||
if err := r.dastServer.Start(); err != nil {
|
if err := r.dastServer.Start(); err != nil {
|
||||||
gologger.Error().Msgf("could not start dast server: %v", err)
|
r.Logger.Error().Msgf("could not start dast server: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -731,10 +741,10 @@ func (r *Runner) RunEnumeration() error {
|
|||||||
// todo: error propagation without canonical straight error check is required by cloud?
|
// todo: error propagation without canonical straight error check is required by cloud?
|
||||||
// use safe dereferencing to avoid potential panics in case of previous unchecked errors
|
// use safe dereferencing to avoid potential panics in case of previous unchecked errors
|
||||||
if v := ptrutil.Safe(results); !v.Load() {
|
if v := ptrutil.Safe(results); !v.Load() {
|
||||||
gologger.Info().Msgf("Scan completed in %s. No results found.", shortDur(timeTaken))
|
r.Logger.Info().Msgf("Scan completed in %s. No results found.", shortDur(timeTaken))
|
||||||
} else {
|
} else {
|
||||||
matchCount := r.output.ResultCount()
|
matchCount := r.output.ResultCount()
|
||||||
gologger.Info().Msgf("Scan completed in %s. %d matches found.", shortDur(timeTaken), matchCount)
|
r.Logger.Info().Msgf("Scan completed in %s. %d matches found.", shortDur(timeTaken), matchCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if a passive scan was requested but no target was provided
|
// check if a passive scan was requested but no target was provided
|
||||||
@ -775,7 +785,7 @@ func (r *Runner) isInputNonHTTP() bool {
|
|||||||
return nonURLInput
|
return nonURLInput
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Runner) executeSmartWorkflowInput(executorOpts protocols.ExecutorOptions, store *loader.Store, engine *core.Engine) (*atomic.Bool, error) {
|
func (r *Runner) executeSmartWorkflowInput(executorOpts *protocols.ExecutorOptions, store *loader.Store, engine *core.Engine) (*atomic.Bool, error) {
|
||||||
r.progress.Init(r.inputProvider.Count(), 0, 0)
|
r.progress.Init(r.inputProvider.Count(), 0, 0)
|
||||||
|
|
||||||
service, err := automaticscan.New(automaticscan.Options{
|
service, err := automaticscan.New(automaticscan.Options{
|
||||||
@ -843,7 +853,7 @@ func (r *Runner) displayExecutionInfo(store *loader.Store) {
|
|||||||
if tmplCount == 0 && workflowCount == 0 {
|
if tmplCount == 0 && workflowCount == 0 {
|
||||||
// if dast flag is used print explicit warning
|
// if dast flag is used print explicit warning
|
||||||
if r.options.DAST {
|
if r.options.DAST {
|
||||||
gologger.DefaultLogger.Print().Msgf("[%v] No DAST templates found", aurora.BrightYellow("WRN"))
|
r.Logger.Print().Msgf("[%v] No DAST templates found", aurora.BrightYellow("WRN"))
|
||||||
}
|
}
|
||||||
stats.ForceDisplayWarning(templates.SkippedCodeTmplTamperedStats)
|
stats.ForceDisplayWarning(templates.SkippedCodeTmplTamperedStats)
|
||||||
} else {
|
} else {
|
||||||
@ -867,34 +877,34 @@ func (r *Runner) displayExecutionInfo(store *loader.Store) {
|
|||||||
gologger.Info().Msg(versionInfo(cfg.TemplateVersion, cfg.LatestNucleiTemplatesVersion, "nuclei-templates"))
|
gologger.Info().Msg(versionInfo(cfg.TemplateVersion, cfg.LatestNucleiTemplatesVersion, "nuclei-templates"))
|
||||||
if !HideAutoSaveMsg {
|
if !HideAutoSaveMsg {
|
||||||
if r.pdcpUploadErrMsg != "" {
|
if r.pdcpUploadErrMsg != "" {
|
||||||
gologger.Print().Msgf("%s", r.pdcpUploadErrMsg)
|
r.Logger.Warning().Msgf("%s", r.pdcpUploadErrMsg)
|
||||||
} else {
|
} else {
|
||||||
gologger.Info().Msgf("To view results on cloud dashboard, visit %v/scans upon scan completion.", pdcpauth.DashBoardURL)
|
r.Logger.Info().Msgf("To view results on cloud dashboard, visit %v/scans upon scan completion.", pdcpauth.DashBoardURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmplCount > 0 || workflowCount > 0 {
|
if tmplCount > 0 || workflowCount > 0 {
|
||||||
if len(store.Templates()) > 0 {
|
if len(store.Templates()) > 0 {
|
||||||
gologger.Info().Msgf("New templates added in latest release: %d", len(config.DefaultConfig.GetNewAdditions()))
|
r.Logger.Info().Msgf("New templates added in latest release: %d", len(config.DefaultConfig.GetNewAdditions()))
|
||||||
gologger.Info().Msgf("Templates loaded for current scan: %d", len(store.Templates()))
|
r.Logger.Info().Msgf("Templates loaded for current scan: %d", len(store.Templates()))
|
||||||
}
|
}
|
||||||
if len(store.Workflows()) > 0 {
|
if len(store.Workflows()) > 0 {
|
||||||
gologger.Info().Msgf("Workflows loaded for current scan: %d", len(store.Workflows()))
|
r.Logger.Info().Msgf("Workflows loaded for current scan: %d", len(store.Workflows()))
|
||||||
}
|
}
|
||||||
for k, v := range templates.SignatureStats {
|
for k, v := range templates.SignatureStats {
|
||||||
value := v.Load()
|
value := v.Load()
|
||||||
if value > 0 {
|
if value > 0 {
|
||||||
if k == templates.Unsigned && !r.options.Silent && !config.DefaultConfig.HideTemplateSigWarning {
|
if k == templates.Unsigned && !r.options.Silent && !config.DefaultConfig.HideTemplateSigWarning {
|
||||||
gologger.Print().Msgf("[%v] Loading %d unsigned templates for scan. Use with caution.", r.colorizer.BrightYellow("WRN"), value)
|
r.Logger.Print().Msgf("[%v] Loading %d unsigned templates for scan. Use with caution.", r.colorizer.BrightYellow("WRN"), value)
|
||||||
} else {
|
} else {
|
||||||
gologger.Info().Msgf("Executing %d signed templates from %s", value, k)
|
r.Logger.Info().Msgf("Executing %d signed templates from %s", value, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.inputProvider.Count() > 0 {
|
if r.inputProvider.Count() > 0 {
|
||||||
gologger.Info().Msgf("Targets loaded for current scan: %d", r.inputProvider.Count())
|
r.Logger.Info().Msgf("Targets loaded for current scan: %d", r.inputProvider.Count())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -921,7 +931,7 @@ func UploadResultsToCloud(options *types.Options) error {
|
|||||||
return errors.Wrap(err, "could not get credentials for cloud upload")
|
return errors.Wrap(err, "could not get credentials for cloud upload")
|
||||||
}
|
}
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
uploadWriter, err := pdcp.NewUploadWriter(ctx, creds)
|
uploadWriter, err := pdcp.NewUploadWriter(ctx, options.Logger, creds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not create upload writer")
|
return errors.Wrap(err, "could not create upload writer")
|
||||||
}
|
}
|
||||||
@ -941,20 +951,20 @@ func UploadResultsToCloud(options *types.Options) error {
|
|||||||
return errors.Wrap(err, "could not open scan upload file")
|
return errors.Wrap(err, "could not open scan upload file")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
gologger.Info().Msgf("Uploading scan results to cloud dashboard from %s", options.ScanUploadFile)
|
options.Logger.Info().Msgf("Uploading scan results to cloud dashboard from %s", options.ScanUploadFile)
|
||||||
dec := json.NewDecoder(file)
|
dec := json.NewDecoder(file)
|
||||||
for dec.More() {
|
for dec.More() {
|
||||||
var r output.ResultEvent
|
var r output.ResultEvent
|
||||||
err := dec.Decode(&r)
|
err := dec.Decode(&r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("Could not decode jsonl: %s\n", err)
|
options.Logger.Warning().Msgf("Could not decode jsonl: %s\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = uploadWriter.Write(&r); err != nil {
|
if err = uploadWriter.Write(&r); err != nil {
|
||||||
gologger.Warning().Msgf("[%s] failed to upload: %s\n", r.TemplateID, err)
|
options.Logger.Warning().Msgf("[%s] failed to upload: %s\n", r.TemplateID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
uploadWriter.Close()
|
uploadWriter.Close()
|
||||||
|
|||||||
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/loader"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/loader"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
)
|
)
|
||||||
@ -25,7 +24,7 @@ func (r *Runner) logAvailableTemplate(tplPath string) {
|
|||||||
panic("not a template")
|
panic("not a template")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("Could not parse file '%s': %s\n", tplPath, err)
|
r.Logger.Error().Msgf("Could not parse file '%s': %s\n", tplPath, err)
|
||||||
} else {
|
} else {
|
||||||
r.verboseTemplate(tpl)
|
r.verboseTemplate(tpl)
|
||||||
}
|
}
|
||||||
@ -33,14 +32,14 @@ func (r *Runner) logAvailableTemplate(tplPath string) {
|
|||||||
|
|
||||||
// log available templates for verbose (-vv)
|
// log available templates for verbose (-vv)
|
||||||
func (r *Runner) verboseTemplate(tpl *templates.Template) {
|
func (r *Runner) verboseTemplate(tpl *templates.Template) {
|
||||||
gologger.Print().Msgf("%s\n", templates.TemplateLogMessage(tpl.ID,
|
r.Logger.Print().Msgf("%s\n", templates.TemplateLogMessage(tpl.ID,
|
||||||
types.ToString(tpl.Info.Name),
|
types.ToString(tpl.Info.Name),
|
||||||
tpl.Info.Authors.ToSlice(),
|
tpl.Info.Authors.ToSlice(),
|
||||||
tpl.Info.SeverityHolder.Severity))
|
tpl.Info.SeverityHolder.Severity))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Runner) listAvailableStoreTemplates(store *loader.Store) {
|
func (r *Runner) listAvailableStoreTemplates(store *loader.Store) {
|
||||||
gologger.Print().Msgf(
|
r.Logger.Print().Msgf(
|
||||||
"\nListing available %v nuclei templates for %v",
|
"\nListing available %v nuclei templates for %v",
|
||||||
config.DefaultConfig.TemplateVersion,
|
config.DefaultConfig.TemplateVersion,
|
||||||
config.DefaultConfig.TemplatesDirectory,
|
config.DefaultConfig.TemplatesDirectory,
|
||||||
@ -52,20 +51,20 @@ func (r *Runner) listAvailableStoreTemplates(store *loader.Store) {
|
|||||||
path := tpl.Path
|
path := tpl.Path
|
||||||
tplBody, err := store.ReadTemplateFromURI(path, true)
|
tplBody, err := store.ReadTemplateFromURI(path, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("Could not read the template %s: %s", path, err)
|
r.Logger.Error().Msgf("Could not read the template %s: %s", path, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if colorize {
|
if colorize {
|
||||||
path = aurora.Cyan(tpl.Path).String()
|
path = aurora.Cyan(tpl.Path).String()
|
||||||
tplBody, err = r.highlightTemplate(&tplBody)
|
tplBody, err = r.highlightTemplate(&tplBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("Could not highlight the template %s: %s", tpl.Path, err)
|
r.Logger.Error().Msgf("Could not highlight the template %s: %s", tpl.Path, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gologger.Silent().Msgf("Template: %s\n\n%s", path, tplBody)
|
r.Logger.Print().Msgf("Template: %s\n\n%s", path, tplBody)
|
||||||
} else {
|
} else {
|
||||||
gologger.Silent().Msgf("%s\n", strings.TrimPrefix(tpl.Path, config.DefaultConfig.TemplatesDirectory+string(filepath.Separator)))
|
r.Logger.Print().Msgf("%s\n", strings.TrimPrefix(tpl.Path, config.DefaultConfig.TemplatesDirectory+string(filepath.Separator)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r.verboseTemplate(tpl)
|
r.verboseTemplate(tpl)
|
||||||
@ -74,7 +73,7 @@ func (r *Runner) listAvailableStoreTemplates(store *loader.Store) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *Runner) listAvailableStoreTags(store *loader.Store) {
|
func (r *Runner) listAvailableStoreTags(store *loader.Store) {
|
||||||
gologger.Print().Msgf(
|
r.Logger.Print().Msgf(
|
||||||
"\nListing available %v nuclei tags for %v",
|
"\nListing available %v nuclei tags for %v",
|
||||||
config.DefaultConfig.TemplateVersion,
|
config.DefaultConfig.TemplateVersion,
|
||||||
config.DefaultConfig.TemplatesDirectory,
|
config.DefaultConfig.TemplatesDirectory,
|
||||||
@ -100,9 +99,9 @@ func (r *Runner) listAvailableStoreTags(store *loader.Store) {
|
|||||||
for _, tag := range tagsList {
|
for _, tag := range tagsList {
|
||||||
if r.options.JSONL {
|
if r.options.JSONL {
|
||||||
marshalled, _ := jsoniter.Marshal(tag)
|
marshalled, _ := jsoniter.Marshal(tag)
|
||||||
gologger.Silent().Msgf("%s\n", string(marshalled))
|
r.Logger.Debug().Msgf("%s", string(marshalled))
|
||||||
} else {
|
} else {
|
||||||
gologger.Silent().Msgf("%s (%d)\n", tag.Key, tag.Value)
|
r.Logger.Debug().Msgf("%s (%d)", tag.Key, tag.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -41,7 +41,7 @@ type nucleiExecutor struct {
|
|||||||
engine *core.Engine
|
engine *core.Engine
|
||||||
store *loader.Store
|
store *loader.Store
|
||||||
options *NucleiExecutorOptions
|
options *NucleiExecutorOptions
|
||||||
executorOpts protocols.ExecutorOptions
|
executorOpts *protocols.ExecutorOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
type NucleiExecutorOptions struct {
|
type NucleiExecutorOptions struct {
|
||||||
@ -58,6 +58,7 @@ type NucleiExecutorOptions struct {
|
|||||||
Colorizer aurora.Aurora
|
Colorizer aurora.Aurora
|
||||||
Parser parser.Parser
|
Parser parser.Parser
|
||||||
TemporaryDirectory string
|
TemporaryDirectory string
|
||||||
|
Logger *gologger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNucleiExecutor(opts *NucleiExecutorOptions) (*nucleiExecutor, error) {
|
func newNucleiExecutor(opts *NucleiExecutorOptions) (*nucleiExecutor, error) {
|
||||||
@ -66,7 +67,7 @@ func newNucleiExecutor(opts *NucleiExecutorOptions) (*nucleiExecutor, error) {
|
|||||||
|
|
||||||
// Create the executor options which will be used throughout the execution
|
// Create the executor options which will be used throughout the execution
|
||||||
// stage by the nuclei engine modules.
|
// stage by the nuclei engine modules.
|
||||||
executorOpts := protocols.ExecutorOptions{
|
executorOpts := &protocols.ExecutorOptions{
|
||||||
Output: opts.Output,
|
Output: opts.Output,
|
||||||
Options: opts.Options,
|
Options: opts.Options,
|
||||||
Progress: opts.Progress,
|
Progress: opts.Progress,
|
||||||
@ -85,6 +86,7 @@ func newNucleiExecutor(opts *NucleiExecutorOptions) (*nucleiExecutor, error) {
|
|||||||
FuzzParamsFrequency: fuzzFreqCache,
|
FuzzParamsFrequency: fuzzFreqCache,
|
||||||
GlobalMatchers: globalmatchers.New(),
|
GlobalMatchers: globalmatchers.New(),
|
||||||
FuzzStatsDB: opts.FuzzStatsDB,
|
FuzzStatsDB: opts.FuzzStatsDB,
|
||||||
|
Logger: opts.Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Options.ShouldUseHostError() {
|
if opts.Options.ShouldUseHostError() {
|
||||||
@ -93,7 +95,7 @@ func newNucleiExecutor(opts *NucleiExecutorOptions) (*nucleiExecutor, error) {
|
|||||||
maxHostError = 100 // auto adjust for fuzzings
|
maxHostError = 100 // auto adjust for fuzzings
|
||||||
}
|
}
|
||||||
if opts.Options.TemplateThreads > maxHostError {
|
if opts.Options.TemplateThreads > maxHostError {
|
||||||
gologger.Info().Msgf("Adjusting max-host-error to the concurrency value: %d", opts.Options.TemplateThreads)
|
opts.Logger.Info().Msgf("Adjusting max-host-error to the concurrency value: %d", opts.Options.TemplateThreads)
|
||||||
|
|
||||||
maxHostError = opts.Options.TemplateThreads
|
maxHostError = opts.Options.TemplateThreads
|
||||||
}
|
}
|
||||||
@ -107,7 +109,7 @@ func newNucleiExecutor(opts *NucleiExecutorOptions) (*nucleiExecutor, error) {
|
|||||||
executorEngine := core.New(opts.Options)
|
executorEngine := core.New(opts.Options)
|
||||||
executorEngine.SetExecuterOptions(executorOpts)
|
executorEngine.SetExecuterOptions(executorOpts)
|
||||||
|
|
||||||
workflowLoader, err := parsers.NewLoader(&executorOpts)
|
workflowLoader, err := parsers.NewLoader(executorOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Could not create loader options.")
|
return nil, errors.Wrap(err, "Could not create loader options.")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -112,7 +112,7 @@ func New(options *Options) (*DASTServer, error) {
|
|||||||
func NewStatsServer(fuzzStatsDB *stats.Tracker) (*DASTServer, error) {
|
func NewStatsServer(fuzzStatsDB *stats.Tracker) (*DASTServer, error) {
|
||||||
server := &DASTServer{
|
server := &DASTServer{
|
||||||
nucleiExecutor: &nucleiExecutor{
|
nucleiExecutor: &nucleiExecutor{
|
||||||
executorOpts: protocols.ExecutorOptions{
|
executorOpts: &protocols.ExecutorOptions{
|
||||||
FuzzStatsDB: fuzzStatsDB,
|
FuzzStatsDB: fuzzStatsDB,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/utils/vardump"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/utils/vardump"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/headless/engine"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/headless/engine"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
||||||
|
pkgtypes "github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TemplateSources contains template sources
|
// TemplateSources contains template sources
|
||||||
@ -205,7 +206,7 @@ func EnableHeadlessWithOpts(hopts *HeadlessOpts) NucleiSDKOptions {
|
|||||||
e.opts.UseInstalledChrome = hopts.UseChrome
|
e.opts.UseInstalledChrome = hopts.UseChrome
|
||||||
}
|
}
|
||||||
if engine.MustDisableSandbox() {
|
if engine.MustDisableSandbox() {
|
||||||
gologger.Warning().Msgf("The current platform and privileged user will run the browser without sandbox\n")
|
e.Logger.Warning().Msgf("The current platform and privileged user will run the browser without sandbox")
|
||||||
}
|
}
|
||||||
browser, err := engine.New(e.opts)
|
browser, err := engine.New(e.opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -296,8 +297,8 @@ func WithNetworkConfig(opts NetworkConfig) NucleiSDKOptions {
|
|||||||
if e.opts.ShouldUseHostError() {
|
if e.opts.ShouldUseHostError() {
|
||||||
maxHostError := opts.MaxHostError
|
maxHostError := opts.MaxHostError
|
||||||
if e.opts.TemplateThreads > maxHostError {
|
if e.opts.TemplateThreads > maxHostError {
|
||||||
gologger.Print().Msgf("[%v] The concurrency value is higher than max-host-error", e.executerOpts.Colorizer.BrightYellow("WRN"))
|
e.Logger.Warning().Msg("The concurrency value is higher than max-host-error")
|
||||||
gologger.Info().Msgf("Adjusting max-host-error to the concurrency value: %d", e.opts.TemplateThreads)
|
e.Logger.Info().Msgf("Adjusting max-host-error to the concurrency value: %d", e.opts.TemplateThreads)
|
||||||
maxHostError = e.opts.TemplateThreads
|
maxHostError = e.opts.TemplateThreads
|
||||||
e.opts.MaxHostError = maxHostError
|
e.opts.MaxHostError = maxHostError
|
||||||
}
|
}
|
||||||
@ -419,6 +420,14 @@ func EnableGlobalMatchersTemplates() NucleiSDKOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DisableTemplateCache disables template caching
|
||||||
|
func DisableTemplateCache() NucleiSDKOptions {
|
||||||
|
return func(e *NucleiEngine) error {
|
||||||
|
e.opts.DoNotCacheTemplates = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// EnableFileTemplates allows loading/executing file protocol templates
|
// EnableFileTemplates allows loading/executing file protocol templates
|
||||||
func EnableFileTemplates() NucleiSDKOptions {
|
func EnableFileTemplates() NucleiSDKOptions {
|
||||||
return func(e *NucleiEngine) error {
|
return func(e *NucleiEngine) error {
|
||||||
@ -527,3 +536,25 @@ func WithResumeFile(file string) NucleiSDKOptions {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithLogger allows setting a shared gologger instance
|
||||||
|
func WithLogger(logger *gologger.Logger) NucleiSDKOptions {
|
||||||
|
return func(e *NucleiEngine) error {
|
||||||
|
e.Logger = logger
|
||||||
|
if e.opts != nil {
|
||||||
|
e.opts.Logger = logger
|
||||||
|
}
|
||||||
|
if e.executerOpts != nil {
|
||||||
|
e.executerOpts.Logger = logger
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOptions sets all options at once
|
||||||
|
func WithOptions(opts *pkgtypes.Options) NucleiSDKOptions {
|
||||||
|
return func(e *NucleiEngine) error {
|
||||||
|
e.opts = opts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
17
lib/multi.go
17
lib/multi.go
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
"github.com/projectdiscovery/ratelimit"
|
"github.com/projectdiscovery/ratelimit"
|
||||||
errorutil "github.com/projectdiscovery/utils/errors"
|
errorutil "github.com/projectdiscovery/utils/errors"
|
||||||
|
"github.com/rs/xid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// unsafeOptions are those nuclei objects/instances/types
|
// unsafeOptions are those nuclei objects/instances/types
|
||||||
@ -21,14 +22,14 @@ import (
|
|||||||
// hence they are ephemeral and are created on every ExecuteNucleiWithOpts invocation
|
// hence they are ephemeral and are created on every ExecuteNucleiWithOpts invocation
|
||||||
// in ThreadSafeNucleiEngine
|
// in ThreadSafeNucleiEngine
|
||||||
type unsafeOptions struct {
|
type unsafeOptions struct {
|
||||||
executerOpts protocols.ExecutorOptions
|
executerOpts *protocols.ExecutorOptions
|
||||||
engine *core.Engine
|
engine *core.Engine
|
||||||
}
|
}
|
||||||
|
|
||||||
// createEphemeralObjects creates ephemeral nuclei objects/instances/types
|
// createEphemeralObjects creates ephemeral nuclei objects/instances/types
|
||||||
func createEphemeralObjects(ctx context.Context, base *NucleiEngine, opts *types.Options) (*unsafeOptions, error) {
|
func createEphemeralObjects(ctx context.Context, base *NucleiEngine, opts *types.Options) (*unsafeOptions, error) {
|
||||||
u := &unsafeOptions{}
|
u := &unsafeOptions{}
|
||||||
u.executerOpts = protocols.ExecutorOptions{
|
u.executerOpts = &protocols.ExecutorOptions{
|
||||||
Output: base.customWriter,
|
Output: base.customWriter,
|
||||||
Options: opts,
|
Options: opts,
|
||||||
Progress: base.customProgress,
|
Progress: base.customProgress,
|
||||||
@ -88,9 +89,11 @@ type ThreadSafeNucleiEngine struct {
|
|||||||
// whose methods are thread-safe and can be used concurrently
|
// whose methods are thread-safe and can be used concurrently
|
||||||
// Note: Non-thread-safe methods start with Global prefix
|
// Note: Non-thread-safe methods start with Global prefix
|
||||||
func NewThreadSafeNucleiEngineCtx(ctx context.Context, opts ...NucleiSDKOptions) (*ThreadSafeNucleiEngine, error) {
|
func NewThreadSafeNucleiEngineCtx(ctx context.Context, opts ...NucleiSDKOptions) (*ThreadSafeNucleiEngine, error) {
|
||||||
|
defaultOptions := types.DefaultOptions()
|
||||||
|
defaultOptions.ExecutionId = xid.New().String()
|
||||||
// default options
|
// default options
|
||||||
e := &NucleiEngine{
|
e := &NucleiEngine{
|
||||||
opts: types.DefaultOptions(),
|
opts: defaultOptions,
|
||||||
mode: threadSafe,
|
mode: threadSafe,
|
||||||
}
|
}
|
||||||
for _, option := range opts {
|
for _, option := range opts {
|
||||||
@ -125,8 +128,8 @@ func (e *ThreadSafeNucleiEngine) GlobalResultCallback(callback func(event *outpu
|
|||||||
// by invoking this method with different options and targets
|
// by invoking this method with different options and targets
|
||||||
// Note: Not all options are thread-safe. this method will throw error if you try to use non-thread-safe options
|
// Note: Not all options are thread-safe. this method will throw error if you try to use non-thread-safe options
|
||||||
func (e *ThreadSafeNucleiEngine) ExecuteNucleiWithOptsCtx(ctx context.Context, targets []string, opts ...NucleiSDKOptions) error {
|
func (e *ThreadSafeNucleiEngine) ExecuteNucleiWithOptsCtx(ctx context.Context, targets []string, opts ...NucleiSDKOptions) error {
|
||||||
baseOpts := *e.eng.opts
|
baseOpts := e.eng.opts.Copy()
|
||||||
tmpEngine := &NucleiEngine{opts: &baseOpts, mode: threadSafe}
|
tmpEngine := &NucleiEngine{opts: baseOpts, mode: threadSafe}
|
||||||
for _, option := range opts {
|
for _, option := range opts {
|
||||||
if err := option(tmpEngine); err != nil {
|
if err := option(tmpEngine); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -142,7 +145,7 @@ func (e *ThreadSafeNucleiEngine) ExecuteNucleiWithOptsCtx(ctx context.Context, t
|
|||||||
defer closeEphemeralObjects(unsafeOpts)
|
defer closeEphemeralObjects(unsafeOpts)
|
||||||
|
|
||||||
// load templates
|
// load templates
|
||||||
workflowLoader, err := workflow.NewLoader(&unsafeOpts.executerOpts)
|
workflowLoader, err := workflow.NewLoader(unsafeOpts.executerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.New("Could not create workflow loader: %s\n", err)
|
return errorutil.New("Could not create workflow loader: %s\n", err)
|
||||||
}
|
}
|
||||||
@ -154,7 +157,7 @@ func (e *ThreadSafeNucleiEngine) ExecuteNucleiWithOptsCtx(ctx context.Context, t
|
|||||||
}
|
}
|
||||||
store.Load()
|
store.Load()
|
||||||
|
|
||||||
inputProvider := provider.NewSimpleInputProviderWithUrls(targets...)
|
inputProvider := provider.NewSimpleInputProviderWithUrls(e.eng.opts.ExecutionId, targets...)
|
||||||
|
|
||||||
if len(store.Templates()) == 0 && len(store.Workflows()) == 0 {
|
if len(store.Templates()) == 0 && len(store.Workflows()) == 0 {
|
||||||
return ErrNoTemplatesAvailable
|
return ErrNoTemplatesAvailable
|
||||||
|
|||||||
72
lib/sdk.go
72
lib/sdk.go
@ -5,7 +5,9 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/projectdiscovery/gologger"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/authprovider"
|
"github.com/projectdiscovery/nuclei/v3/pkg/authprovider"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/loader"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/loader"
|
||||||
@ -27,6 +29,7 @@ import (
|
|||||||
"github.com/projectdiscovery/ratelimit"
|
"github.com/projectdiscovery/ratelimit"
|
||||||
"github.com/projectdiscovery/retryablehttp-go"
|
"github.com/projectdiscovery/retryablehttp-go"
|
||||||
errorutil "github.com/projectdiscovery/utils/errors"
|
errorutil "github.com/projectdiscovery/utils/errors"
|
||||||
|
"github.com/rs/xid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NucleiSDKOptions contains options for nuclei SDK
|
// NucleiSDKOptions contains options for nuclei SDK
|
||||||
@ -64,6 +67,7 @@ type NucleiEngine struct {
|
|||||||
templatesLoaded bool
|
templatesLoaded bool
|
||||||
|
|
||||||
// unexported core fields
|
// unexported core fields
|
||||||
|
ctx context.Context
|
||||||
interactshClient *interactsh.Client
|
interactshClient *interactsh.Client
|
||||||
catalog catalog.Catalog
|
catalog catalog.Catalog
|
||||||
rateLimiter *ratelimit.Limiter
|
rateLimiter *ratelimit.Limiter
|
||||||
@ -84,12 +88,15 @@ type NucleiEngine struct {
|
|||||||
customWriter output.Writer
|
customWriter output.Writer
|
||||||
customProgress progress.Progress
|
customProgress progress.Progress
|
||||||
rc reporting.Client
|
rc reporting.Client
|
||||||
executerOpts protocols.ExecutorOptions
|
executerOpts *protocols.ExecutorOptions
|
||||||
|
|
||||||
|
// Logger instance for the engine
|
||||||
|
Logger *gologger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadAllTemplates loads all nuclei template based on given options
|
// LoadAllTemplates loads all nuclei template based on given options
|
||||||
func (e *NucleiEngine) LoadAllTemplates() error {
|
func (e *NucleiEngine) LoadAllTemplates() error {
|
||||||
workflowLoader, err := workflow.NewLoader(&e.executerOpts)
|
workflowLoader, err := workflow.NewLoader(e.executerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorutil.New("Could not create workflow loader: %s\n", err)
|
return errorutil.New("Could not create workflow loader: %s\n", err)
|
||||||
}
|
}
|
||||||
@ -124,9 +131,9 @@ func (e *NucleiEngine) GetWorkflows() []*templates.Template {
|
|||||||
func (e *NucleiEngine) LoadTargets(targets []string, probeNonHttp bool) {
|
func (e *NucleiEngine) LoadTargets(targets []string, probeNonHttp bool) {
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
if probeNonHttp {
|
if probeNonHttp {
|
||||||
_ = e.inputProvider.SetWithProbe(target, e.httpxClient)
|
_ = e.inputProvider.SetWithProbe(e.opts.ExecutionId, target, e.httpxClient)
|
||||||
} else {
|
} else {
|
||||||
e.inputProvider.Set(target)
|
e.inputProvider.Set(e.opts.ExecutionId, target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -136,9 +143,9 @@ func (e *NucleiEngine) LoadTargetsFromReader(reader io.Reader, probeNonHttp bool
|
|||||||
buff := bufio.NewScanner(reader)
|
buff := bufio.NewScanner(reader)
|
||||||
for buff.Scan() {
|
for buff.Scan() {
|
||||||
if probeNonHttp {
|
if probeNonHttp {
|
||||||
_ = e.inputProvider.SetWithProbe(buff.Text(), e.httpxClient)
|
_ = e.inputProvider.SetWithProbe(e.opts.ExecutionId, buff.Text(), e.httpxClient)
|
||||||
} else {
|
} else {
|
||||||
e.inputProvider.Set(buff.Text())
|
e.inputProvider.Set(e.opts.ExecutionId, buff.Text())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -161,7 +168,7 @@ func (e *NucleiEngine) LoadTargetsWithHttpData(filePath string, filemode string)
|
|||||||
|
|
||||||
// GetExecuterOptions returns the nuclei executor options
|
// GetExecuterOptions returns the nuclei executor options
|
||||||
func (e *NucleiEngine) GetExecuterOptions() *protocols.ExecutorOptions {
|
func (e *NucleiEngine) GetExecuterOptions() *protocols.ExecutorOptions {
|
||||||
return &e.executerOpts
|
return e.executerOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseTemplate parses a template from given data
|
// ParseTemplate parses a template from given data
|
||||||
@ -229,7 +236,7 @@ func (e *NucleiEngine) closeInternal() {
|
|||||||
// Close all resources used by nuclei engine
|
// Close all resources used by nuclei engine
|
||||||
func (e *NucleiEngine) Close() {
|
func (e *NucleiEngine) Close() {
|
||||||
e.closeInternal()
|
e.closeInternal()
|
||||||
protocolinit.Close()
|
protocolinit.Close(e.opts.ExecutionId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecuteCallbackWithCtx executes templates on targets and calls callback on each result(only if results are found)
|
// ExecuteCallbackWithCtx executes templates on targets and calls callback on each result(only if results are found)
|
||||||
@ -246,9 +253,9 @@ func (e *NucleiEngine) ExecuteCallbackWithCtx(ctx context.Context, callback ...f
|
|||||||
}
|
}
|
||||||
|
|
||||||
filtered := []func(event *output.ResultEvent){}
|
filtered := []func(event *output.ResultEvent){}
|
||||||
for _, callback := range callback {
|
for _, cb := range callback {
|
||||||
if callback != nil {
|
if cb != nil {
|
||||||
filtered = append(filtered, callback)
|
filtered = append(filtered, cb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.resultCallbacks = append(e.resultCallbacks, filtered...)
|
e.resultCallbacks = append(e.resultCallbacks, filtered...)
|
||||||
@ -258,15 +265,32 @@ func (e *NucleiEngine) ExecuteCallbackWithCtx(ctx context.Context, callback ...f
|
|||||||
return ErrNoTemplatesAvailable
|
return ErrNoTemplatesAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = e.engine.ExecuteScanWithOpts(ctx, templatesAndWorkflows, e.inputProvider, false)
|
var wg sync.WaitGroup
|
||||||
defer e.engine.WorkPool().Wait()
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
_ = e.engine.ExecuteScanWithOpts(ctx, templatesAndWorkflows, e.inputProvider, false)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// wait for context to be cancelled
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
<-wait(&wg) // wait for scan to finish
|
||||||
|
return ctx.Err()
|
||||||
|
case <-wait(&wg):
|
||||||
|
// scan finished
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecuteWithCallback is same as ExecuteCallbackWithCtx but with default context
|
// ExecuteWithCallback is same as ExecuteCallbackWithCtx but with default context
|
||||||
// Note this is deprecated and will be removed in future major release
|
// Note this is deprecated and will be removed in future major release
|
||||||
func (e *NucleiEngine) ExecuteWithCallback(callback ...func(event *output.ResultEvent)) error {
|
func (e *NucleiEngine) ExecuteWithCallback(callback ...func(event *output.ResultEvent)) error {
|
||||||
return e.ExecuteCallbackWithCtx(context.Background(), callback...)
|
ctx := context.Background()
|
||||||
|
if e.ctx != nil {
|
||||||
|
ctx = e.ctx
|
||||||
|
}
|
||||||
|
return e.ExecuteCallbackWithCtx(ctx, callback...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options return nuclei Type Options
|
// Options return nuclei Type Options
|
||||||
@ -287,9 +311,12 @@ func (e *NucleiEngine) Store() *loader.Store {
|
|||||||
// NewNucleiEngineCtx creates a new nuclei engine instance with given context
|
// NewNucleiEngineCtx creates a new nuclei engine instance with given context
|
||||||
func NewNucleiEngineCtx(ctx context.Context, options ...NucleiSDKOptions) (*NucleiEngine, error) {
|
func NewNucleiEngineCtx(ctx context.Context, options ...NucleiSDKOptions) (*NucleiEngine, error) {
|
||||||
// default options
|
// default options
|
||||||
|
defaultOptions := types.DefaultOptions()
|
||||||
|
defaultOptions.ExecutionId = xid.New().String()
|
||||||
e := &NucleiEngine{
|
e := &NucleiEngine{
|
||||||
opts: types.DefaultOptions(),
|
opts: defaultOptions,
|
||||||
mode: singleInstance,
|
mode: singleInstance,
|
||||||
|
ctx: ctx,
|
||||||
}
|
}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
if err := option(e); err != nil {
|
if err := option(e); err != nil {
|
||||||
@ -306,3 +333,18 @@ func NewNucleiEngineCtx(ctx context.Context, options ...NucleiSDKOptions) (*Nucl
|
|||||||
func NewNucleiEngine(options ...NucleiSDKOptions) (*NucleiEngine, error) {
|
func NewNucleiEngine(options ...NucleiSDKOptions) (*NucleiEngine, error) {
|
||||||
return NewNucleiEngineCtx(context.Background(), options...)
|
return NewNucleiEngineCtx(context.Background(), options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetParser returns the template parser with cache
|
||||||
|
func (e *NucleiEngine) GetParser() *templates.Parser {
|
||||||
|
return e.parser
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for a waitgroup to finish
|
||||||
|
func wait(wg *sync.WaitGroup) <-chan struct{} {
|
||||||
|
ch := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
wg.Wait()
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/input"
|
"github.com/projectdiscovery/nuclei/v3/pkg/input"
|
||||||
|
"github.com/projectdiscovery/nuclei/v3/pkg/reporting"
|
||||||
|
|
||||||
"github.com/logrusorgru/aurora"
|
"github.com/logrusorgru/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -29,7 +30,6 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolinit"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolinit"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/httpclientpool"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/httpclientpool"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/reporting"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/testutils"
|
"github.com/projectdiscovery/nuclei/v3/pkg/testutils"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
@ -37,8 +37,6 @@ import (
|
|||||||
"github.com/projectdiscovery/ratelimit"
|
"github.com/projectdiscovery/ratelimit"
|
||||||
)
|
)
|
||||||
|
|
||||||
var sharedInit *sync.Once
|
|
||||||
|
|
||||||
// applyRequiredDefaults to options
|
// applyRequiredDefaults to options
|
||||||
func (e *NucleiEngine) applyRequiredDefaults(ctx context.Context) {
|
func (e *NucleiEngine) applyRequiredDefaults(ctx context.Context) {
|
||||||
mockoutput := testutils.NewMockOutputWriter(e.opts.OmitTemplate)
|
mockoutput := testutils.NewMockOutputWriter(e.opts.OmitTemplate)
|
||||||
@ -98,27 +96,39 @@ func (e *NucleiEngine) applyRequiredDefaults(ctx context.Context) {
|
|||||||
|
|
||||||
// init
|
// init
|
||||||
func (e *NucleiEngine) init(ctx context.Context) error {
|
func (e *NucleiEngine) init(ctx context.Context) error {
|
||||||
|
// Set a default logger if one isn't provided in the options
|
||||||
|
if e.opts.Logger != nil {
|
||||||
|
e.Logger = e.opts.Logger
|
||||||
|
} else {
|
||||||
|
e.opts.Logger = &gologger.Logger{}
|
||||||
|
}
|
||||||
|
e.Logger = e.opts.Logger
|
||||||
|
|
||||||
if e.opts.Verbose {
|
if e.opts.Verbose {
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelVerbose)
|
e.Logger.SetMaxLevel(levels.LevelVerbose)
|
||||||
} else if e.opts.Debug {
|
} else if e.opts.Debug {
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelDebug)
|
e.Logger.SetMaxLevel(levels.LevelDebug)
|
||||||
} else if e.opts.Silent {
|
} else if e.opts.Silent {
|
||||||
gologger.DefaultLogger.SetMaxLevel(levels.LevelSilent)
|
e.Logger.SetMaxLevel(levels.LevelSilent)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := runner.ValidateOptions(e.opts); err != nil {
|
if err := runner.ValidateOptions(e.opts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
e.parser = templates.NewParser()
|
if e.opts.Parser != nil {
|
||||||
|
if op, ok := e.opts.Parser.(*templates.Parser); ok {
|
||||||
if sharedInit == nil || protocolstate.ShouldInit() {
|
e.parser = op
|
||||||
sharedInit = &sync.Once{}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sharedInit.Do(func() {
|
if e.parser == nil {
|
||||||
|
e.parser = templates.NewParser()
|
||||||
|
}
|
||||||
|
|
||||||
|
if protocolstate.ShouldInit(e.opts.ExecutionId) {
|
||||||
_ = protocolinit.Init(e.opts)
|
_ = protocolinit.Init(e.opts)
|
||||||
})
|
}
|
||||||
|
|
||||||
if e.opts.ProxyInternal && e.opts.AliveHttpProxy != "" || e.opts.AliveSocksProxy != "" {
|
if e.opts.ProxyInternal && e.opts.AliveHttpProxy != "" || e.opts.AliveSocksProxy != "" {
|
||||||
httpclient, err := httpclientpool.Get(e.opts, &httpclientpool.Configuration{})
|
httpclient, err := httpclientpool.Get(e.opts, &httpclientpool.Configuration{})
|
||||||
@ -160,7 +170,7 @@ func (e *NucleiEngine) init(ctx context.Context) error {
|
|||||||
e.catalog = disk.NewCatalog(config.DefaultConfig.TemplatesDirectory)
|
e.catalog = disk.NewCatalog(config.DefaultConfig.TemplatesDirectory)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.executerOpts = protocols.ExecutorOptions{
|
e.executerOpts = &protocols.ExecutorOptions{
|
||||||
Output: e.customWriter,
|
Output: e.customWriter,
|
||||||
Options: e.opts,
|
Options: e.opts,
|
||||||
Progress: e.customProgress,
|
Progress: e.customProgress,
|
||||||
@ -173,12 +183,13 @@ func (e *NucleiEngine) init(ctx context.Context) error {
|
|||||||
Browser: e.browserInstance,
|
Browser: e.browserInstance,
|
||||||
Parser: e.parser,
|
Parser: e.parser,
|
||||||
InputHelper: input.NewHelper(),
|
InputHelper: input.NewHelper(),
|
||||||
|
Logger: e.opts.Logger,
|
||||||
}
|
}
|
||||||
if e.opts.ShouldUseHostError() && e.hostErrCache != nil {
|
if e.opts.ShouldUseHostError() && e.hostErrCache != nil {
|
||||||
e.executerOpts.HostErrorsCache = e.hostErrCache
|
e.executerOpts.HostErrorsCache = e.hostErrCache
|
||||||
}
|
}
|
||||||
if len(e.opts.SecretsFile) > 0 {
|
if len(e.opts.SecretsFile) > 0 {
|
||||||
authTmplStore, err := runner.GetAuthTmplStore(*e.opts, e.catalog, e.executerOpts)
|
authTmplStore, err := runner.GetAuthTmplStore(e.opts, e.catalog, e.executerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to load dynamic auth templates")
|
return errors.Wrap(err, "failed to load dynamic auth templates")
|
||||||
}
|
}
|
||||||
@ -220,6 +231,25 @@ func (e *NucleiEngine) init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle the case where the user passed an existing parser that we can use as a cache
|
||||||
|
if e.opts.Parser != nil {
|
||||||
|
if cachedParser, ok := e.opts.Parser.(*templates.Parser); ok {
|
||||||
|
e.parser = cachedParser
|
||||||
|
e.opts.Parser = cachedParser
|
||||||
|
e.executerOpts.Parser = cachedParser
|
||||||
|
e.executerOpts.Options.Parser = cachedParser
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new parser if necessary
|
||||||
|
if e.parser == nil {
|
||||||
|
op := templates.NewParser()
|
||||||
|
e.parser = op
|
||||||
|
e.opts.Parser = op
|
||||||
|
e.executerOpts.Parser = op
|
||||||
|
e.executerOpts.Options.Parser = op
|
||||||
|
}
|
||||||
|
|
||||||
e.engine = core.New(e.opts)
|
e.engine = core.New(e.opts)
|
||||||
e.engine.SetExecuterOptions(e.executerOpts)
|
e.engine.SetExecuterOptions(e.executerOpts)
|
||||||
|
|
||||||
|
|||||||
37
lib/sdk_test.go
Normal file
37
lib/sdk_test.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
package nuclei_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
nuclei "github.com/projectdiscovery/nuclei/v3/lib"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestContextCancelNucleiEngine(t *testing.T) {
|
||||||
|
// create nuclei engine with options
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ne, err := nuclei.NewNucleiEngineCtx(ctx,
|
||||||
|
nuclei.WithTemplateFilters(nuclei.TemplateFilters{Tags: []string{"oast"}}),
|
||||||
|
nuclei.EnableStatsWithOpts(nuclei.StatsOptions{MetricServerPort: 0}),
|
||||||
|
)
|
||||||
|
require.NoError(t, err, "could not create nuclei engine")
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Second * 2)
|
||||||
|
cancel()
|
||||||
|
log.Println("Test: context cancelled")
|
||||||
|
}()
|
||||||
|
|
||||||
|
// load targets and optionally probe non http/https targets
|
||||||
|
ne.LoadTargets([]string{"http://honey.scanme.sh"}, false)
|
||||||
|
// when callback is nil it nuclei will print JSON output to stdout
|
||||||
|
err = ne.ExecuteWithCallback(nil)
|
||||||
|
if err != nil {
|
||||||
|
// we expect a context cancellation error
|
||||||
|
require.ErrorIs(t, err, context.Canceled, "was expecting context cancellation error")
|
||||||
|
}
|
||||||
|
defer ne.Close()
|
||||||
|
}
|
||||||
@ -2,6 +2,7 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
@ -17,7 +18,7 @@ type IgnoreFile struct {
|
|||||||
func ReadIgnoreFile() IgnoreFile {
|
func ReadIgnoreFile() IgnoreFile {
|
||||||
file, err := os.Open(DefaultConfig.GetIgnoreFilePath())
|
file, err := os.Open(DefaultConfig.GetIgnoreFilePath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Error().Msgf("Could not read nuclei-ignore file: %s\n", err)
|
gologger.Error().Msgf("Could not read nuclei-ignore file: %s\n%s\n", err, string(debug.Stack()))
|
||||||
return IgnoreFile{}
|
return IgnoreFile{}
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@ -4,13 +4,12 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/projectdiscovery/goflags"
|
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils/json"
|
"github.com/projectdiscovery/nuclei/v3/pkg/utils/json"
|
||||||
"github.com/projectdiscovery/utils/env"
|
"github.com/projectdiscovery/utils/env"
|
||||||
@ -42,15 +41,18 @@ type Config struct {
|
|||||||
// local cache of nuclei version check endpoint
|
// local cache of nuclei version check endpoint
|
||||||
// these fields are only update during nuclei version check
|
// these fields are only update during nuclei version check
|
||||||
// TODO: move these fields to a separate unexported struct as they are not meant to be used directly
|
// TODO: move these fields to a separate unexported struct as they are not meant to be used directly
|
||||||
LatestNucleiVersion string `json:"nuclei-latest-version"`
|
LatestNucleiVersion string `json:"nuclei-latest-version"`
|
||||||
LatestNucleiTemplatesVersion string `json:"nuclei-templates-latest-version"`
|
LatestNucleiTemplatesVersion string `json:"nuclei-templates-latest-version"`
|
||||||
LatestNucleiIgnoreHash string `json:"nuclei-latest-ignore-hash,omitempty"`
|
LatestNucleiIgnoreHash string `json:"nuclei-latest-ignore-hash,omitempty"`
|
||||||
|
Logger *gologger.Logger `json:"-"` // logger
|
||||||
|
|
||||||
// internal / unexported fields
|
// internal / unexported fields
|
||||||
disableUpdates bool `json:"-"` // disable updates both version check and template updates
|
disableUpdates bool `json:"-"` // disable updates both version check and template updates
|
||||||
homeDir string `json:"-"` // User Home Directory
|
homeDir string `json:"-"` // User Home Directory
|
||||||
configDir string `json:"-"` // Nuclei Global Config Directory
|
configDir string `json:"-"` // Nuclei Global Config Directory
|
||||||
debugArgs []string `json:"-"` // debug args
|
debugArgs []string `json:"-"` // debug args
|
||||||
|
|
||||||
|
m sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsCustomTemplate determines whether a given template is custom-built or part of the official Nuclei templates.
|
// IsCustomTemplate determines whether a given template is custom-built or part of the official Nuclei templates.
|
||||||
@ -105,21 +107,29 @@ func (c *Config) GetTemplateDir() string {
|
|||||||
|
|
||||||
// DisableUpdateCheck disables update check and template updates
|
// DisableUpdateCheck disables update check and template updates
|
||||||
func (c *Config) DisableUpdateCheck() {
|
func (c *Config) DisableUpdateCheck() {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
c.disableUpdates = true
|
c.disableUpdates = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanCheckForUpdates returns true if update check is enabled
|
// CanCheckForUpdates returns true if update check is enabled
|
||||||
func (c *Config) CanCheckForUpdates() bool {
|
func (c *Config) CanCheckForUpdates() bool {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
return !c.disableUpdates
|
return !c.disableUpdates
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedsTemplateUpdate returns true if template installation/update is required
|
// NeedsTemplateUpdate returns true if template installation/update is required
|
||||||
func (c *Config) NeedsTemplateUpdate() bool {
|
func (c *Config) NeedsTemplateUpdate() bool {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
return !c.disableUpdates && (c.TemplateVersion == "" || IsOutdatedVersion(c.TemplateVersion, c.LatestNucleiTemplatesVersion) || !fileutil.FolderExists(c.TemplatesDirectory))
|
return !c.disableUpdates && (c.TemplateVersion == "" || IsOutdatedVersion(c.TemplateVersion, c.LatestNucleiTemplatesVersion) || !fileutil.FolderExists(c.TemplatesDirectory))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedsIgnoreFileUpdate returns true if Ignore file hash is different (aka ignore file is outdated)
|
// NeedsIgnoreFileUpdate returns true if Ignore file hash is different (aka ignore file is outdated)
|
||||||
func (c *Config) NeedsIgnoreFileUpdate() bool {
|
func (c *Config) NeedsIgnoreFileUpdate() bool {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
return c.NucleiIgnoreHash == "" || c.NucleiIgnoreHash != c.LatestNucleiIgnoreHash
|
return c.NucleiIgnoreHash == "" || c.NucleiIgnoreHash != c.LatestNucleiIgnoreHash
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,7 +221,7 @@ func (c *Config) GetCacheDir() string {
|
|||||||
func (c *Config) SetConfigDir(dir string) {
|
func (c *Config) SetConfigDir(dir string) {
|
||||||
c.configDir = dir
|
c.configDir = dir
|
||||||
if err := c.createConfigDirIfNotExists(); err != nil {
|
if err := c.createConfigDirIfNotExists(); err != nil {
|
||||||
gologger.Fatal().Msgf("Could not create nuclei config directory at %s: %s", c.configDir, err)
|
c.Logger.Fatal().Msgf("Could not create nuclei config directory at %s: %s", c.configDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if folder already exists read config or create new
|
// if folder already exists read config or create new
|
||||||
@ -219,7 +229,7 @@ func (c *Config) SetConfigDir(dir string) {
|
|||||||
// create new config
|
// create new config
|
||||||
applyDefaultConfig()
|
applyDefaultConfig()
|
||||||
if err2 := c.WriteTemplatesConfig(); err2 != nil {
|
if err2 := c.WriteTemplatesConfig(); err2 != nil {
|
||||||
gologger.Fatal().Msgf("Could not create nuclei config file at %s: %s", c.getTemplatesConfigFilePath(), err2)
|
c.Logger.Fatal().Msgf("Could not create nuclei config file at %s: %s", c.getTemplatesConfigFilePath(), err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,14 +329,14 @@ func (c *Config) createConfigDirIfNotExists() error {
|
|||||||
// to the current config directory
|
// to the current config directory
|
||||||
func (c *Config) copyIgnoreFile() {
|
func (c *Config) copyIgnoreFile() {
|
||||||
if err := c.createConfigDirIfNotExists(); err != nil {
|
if err := c.createConfigDirIfNotExists(); err != nil {
|
||||||
gologger.Error().Msgf("Could not create nuclei config directory at %s: %s", c.configDir, err)
|
c.Logger.Error().Msgf("Could not create nuclei config directory at %s: %s", c.configDir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ignoreFilePath := c.GetIgnoreFilePath()
|
ignoreFilePath := c.GetIgnoreFilePath()
|
||||||
if !fileutil.FileExists(ignoreFilePath) {
|
if !fileutil.FileExists(ignoreFilePath) {
|
||||||
// copy ignore file from default config directory
|
// copy ignore file from default config directory
|
||||||
if err := fileutil.CopyFile(filepath.Join(folderutil.AppConfigDirOrDefault(FallbackConfigFolderName, BinaryName), NucleiIgnoreFileName), ignoreFilePath); err != nil {
|
if err := fileutil.CopyFile(filepath.Join(folderutil.AppConfigDirOrDefault(FallbackConfigFolderName, BinaryName), NucleiIgnoreFileName), ignoreFilePath); err != nil {
|
||||||
gologger.Error().Msgf("Could not copy nuclei ignore file at %s: %s", ignoreFilePath, err)
|
c.Logger.Error().Msgf("Could not copy nuclei ignore file at %s: %s", ignoreFilePath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -367,9 +377,6 @@ func (c *Config) parseDebugArgs(data string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// first attempt to migrate all files from old config directory to new config directory
|
|
||||||
goflags.AttemptConfigMigration() // regardless how many times this is called it will only migrate once based on condition
|
|
||||||
|
|
||||||
ConfigDir := folderutil.AppConfigDirOrDefault(FallbackConfigFolderName, BinaryName)
|
ConfigDir := folderutil.AppConfigDirOrDefault(FallbackConfigFolderName, BinaryName)
|
||||||
|
|
||||||
if cfgDir := os.Getenv(NucleiConfigDirEnv); cfgDir != "" {
|
if cfgDir := os.Getenv(NucleiConfigDirEnv); cfgDir != "" {
|
||||||
@ -385,6 +392,7 @@ func init() {
|
|||||||
DefaultConfig = &Config{
|
DefaultConfig = &Config{
|
||||||
homeDir: folderutil.HomeDirOrDefault(""),
|
homeDir: folderutil.HomeDirOrDefault(""),
|
||||||
configDir: ConfigDir,
|
configDir: ConfigDir,
|
||||||
|
Logger: gologger.DefaultLogger,
|
||||||
}
|
}
|
||||||
|
|
||||||
// when enabled will log events in more verbosity than -v or -debug
|
// when enabled will log events in more verbosity than -v or -debug
|
||||||
@ -406,9 +414,7 @@ func init() {
|
|||||||
gologger.Error().Msgf("failed to write config file at %s got: %s", DefaultConfig.getTemplatesConfigFilePath(), err)
|
gologger.Error().Msgf("failed to write config file at %s got: %s", DefaultConfig.getTemplatesConfigFilePath(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// attempt to migrate resume files
|
|
||||||
// this also happens once regardless of how many times this is called
|
|
||||||
migrateResumeFiles()
|
|
||||||
// Loads/updates paths of custom templates
|
// Loads/updates paths of custom templates
|
||||||
// Note: custom templates paths should not be updated in config file
|
// Note: custom templates paths should not be updated in config file
|
||||||
// and even if it is changed we don't follow it since it is not expected behavior
|
// and even if it is changed we don't follow it since it is not expected behavior
|
||||||
@ -423,61 +429,3 @@ func applyDefaultConfig() {
|
|||||||
// updates all necessary paths
|
// updates all necessary paths
|
||||||
DefaultConfig.SetTemplatesDir(DefaultConfig.TemplatesDirectory)
|
DefaultConfig.SetTemplatesDir(DefaultConfig.TemplatesDirectory)
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateResumeFiles() {
|
|
||||||
// attempt to migrate old resume files to new directory structure
|
|
||||||
// after migration has been done in goflags
|
|
||||||
oldResumeDir := DefaultConfig.GetConfigDir()
|
|
||||||
// migrate old resume file to new directory structure
|
|
||||||
if !fileutil.FileOrFolderExists(DefaultConfig.GetCacheDir()) && fileutil.FileOrFolderExists(oldResumeDir) {
|
|
||||||
// this means new cache dir doesn't exist, so we need to migrate
|
|
||||||
// first check if old resume file exists if not then no need to migrate
|
|
||||||
exists := false
|
|
||||||
files, err := os.ReadDir(oldResumeDir)
|
|
||||||
if err != nil {
|
|
||||||
// log silently
|
|
||||||
log.Printf("could not read old resume dir: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, file := range files {
|
|
||||||
if strings.HasSuffix(file.Name(), ".cfg") {
|
|
||||||
exists = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
// no need to migrate
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new cache dir
|
|
||||||
err = os.MkdirAll(DefaultConfig.GetCacheDir(), os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
// log silently
|
|
||||||
log.Printf("could not create new cache dir: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = filepath.WalkDir(oldResumeDir, func(path string, d os.DirEntry, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if d.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !strings.HasSuffix(path, ".cfg") {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err = os.Rename(path, filepath.Join(DefaultConfig.GetCacheDir(), filepath.Base(path)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
// log silently
|
|
||||||
log.Printf("could not migrate old resume files: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -7,7 +7,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates/extensions"
|
"github.com/projectdiscovery/nuclei/v3/pkg/templates/extensions"
|
||||||
fileutil "github.com/projectdiscovery/utils/file"
|
fileutil "github.com/projectdiscovery/utils/file"
|
||||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||||
@ -98,7 +97,7 @@ func GetNucleiTemplatesIndex() (map[string]string, error) {
|
|||||||
return index, nil
|
return index, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gologger.Error().Msgf("failed to read index file creating new one: %v", err)
|
DefaultConfig.Logger.Error().Msgf("failed to read index file creating new one: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ignoreDirs := DefaultConfig.GetAllCustomTemplateDirs()
|
ignoreDirs := DefaultConfig.GetAllCustomTemplateDirs()
|
||||||
@ -109,7 +108,7 @@ func GetNucleiTemplatesIndex() (map[string]string, error) {
|
|||||||
}
|
}
|
||||||
err := filepath.WalkDir(DefaultConfig.TemplatesDirectory, func(path string, d os.DirEntry, err error) error {
|
err := filepath.WalkDir(DefaultConfig.TemplatesDirectory, func(path string, d os.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Verbose().Msgf("failed to walk path=%v err=%v", path, err)
|
DefaultConfig.Logger.Verbose().Msgf("failed to walk path=%v err=%v", path, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if d.IsDir() || !IsTemplate(path) || stringsutil.ContainsAny(path, ignoreDirs...) {
|
if d.IsDir() || !IsTemplate(path) || stringsutil.ContainsAny(path, ignoreDirs...) {
|
||||||
@ -118,7 +117,7 @@ func GetNucleiTemplatesIndex() (map[string]string, error) {
|
|||||||
// get template id from file
|
// get template id from file
|
||||||
id, err := getTemplateID(path)
|
id, err := getTemplateID(path)
|
||||||
if err != nil || id == "" {
|
if err != nil || id == "" {
|
||||||
gologger.Verbose().Msgf("failed to get template id from file=%v got id=%v err=%v", path, id, err)
|
DefaultConfig.Logger.Verbose().Msgf("failed to get template id from file=%v got id=%v err=%v", path, id, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
index[id] = path
|
index[id] = path
|
||||||
|
|||||||
@ -8,7 +8,6 @@ import (
|
|||||||
|
|
||||||
"github.com/logrusorgru/aurora"
|
"github.com/logrusorgru/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||||
updateutils "github.com/projectdiscovery/utils/update"
|
updateutils "github.com/projectdiscovery/utils/update"
|
||||||
@ -84,7 +83,7 @@ func (c *DiskCatalog) GetTemplatePath(target string) ([]string, error) {
|
|||||||
absPath = BackwardsCompatiblePaths(c.templatesDirectory, target)
|
absPath = BackwardsCompatiblePaths(c.templatesDirectory, target)
|
||||||
if absPath != target && strings.TrimPrefix(absPath, c.templatesDirectory+string(filepath.Separator)) != target {
|
if absPath != target && strings.TrimPrefix(absPath, c.templatesDirectory+string(filepath.Separator)) != target {
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.DefaultLogger.Print().Msgf("[%v] requested Template path %s is deprecated, please update to %s\n", aurora.Yellow("WRN").String(), target, absPath)
|
config.DefaultConfig.Logger.Print().Msgf("[%v] requested Template path %s is deprecated, please update to %s\n", aurora.Yellow("WRN").String(), target, absPath)
|
||||||
}
|
}
|
||||||
deprecatedPathsCounter++
|
deprecatedPathsCounter++
|
||||||
}
|
}
|
||||||
@ -302,6 +301,6 @@ func PrintDeprecatedPathsMsgIfApplicable(isSilent bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if deprecatedPathsCounter > 0 && !isSilent {
|
if deprecatedPathsCounter > 0 && !isSilent {
|
||||||
gologger.Print().Msgf("[%v] Found %v template[s] loaded with deprecated paths, update before v3 for continued support.\n", aurora.Yellow("WRN").String(), deprecatedPathsCounter)
|
config.DefaultConfig.Logger.Print().Msgf("[%v] Found %v template[s] loaded with deprecated paths, update before v3 for continued support.\n", aurora.Yellow("WRN").String(), deprecatedPathsCounter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,7 +10,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alecthomas/chroma/quick"
|
"github.com/alecthomas/chroma/quick"
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
"github.com/projectdiscovery/retryablehttp-go"
|
"github.com/projectdiscovery/retryablehttp-go"
|
||||||
@ -57,8 +56,8 @@ func getAIGeneratedTemplates(prompt string, options *types.Options) ([]string, e
|
|||||||
return nil, errorutil.New("Failed to generate template: %v", err)
|
return nil, errorutil.New("Failed to generate template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gologger.Info().Msgf("Generated template available at: https://cloud.projectdiscovery.io/templates/%s", templateID)
|
options.Logger.Info().Msgf("Generated template available at: https://cloud.projectdiscovery.io/templates/%s", templateID)
|
||||||
gologger.Info().Msgf("Generated template path: %s", templateFile)
|
options.Logger.Info().Msgf("Generated template path: %s", templateFile)
|
||||||
|
|
||||||
// Check if we should display the template
|
// Check if we should display the template
|
||||||
// This happens when:
|
// This happens when:
|
||||||
@ -76,7 +75,7 @@ func getAIGeneratedTemplates(prompt string, options *types.Options) ([]string, e
|
|||||||
template = buf.String()
|
template = buf.String()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gologger.Silent().Msgf("\n%s", template)
|
options.Logger.Debug().Msgf("\n%s", template)
|
||||||
// FIXME:
|
// FIXME:
|
||||||
// we should not be exiting the program here
|
// we should not be exiting the program here
|
||||||
// but we need to find a better way to handle this
|
// but we need to find a better way to handle this
|
||||||
|
|||||||
@ -8,8 +8,6 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
syncutil "github.com/projectdiscovery/utils/sync"
|
|
||||||
|
|
||||||
"github.com/logrusorgru/aurora"
|
"github.com/logrusorgru/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
@ -19,6 +17,7 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/keys"
|
"github.com/projectdiscovery/nuclei/v3/pkg/keys"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/model/types/severity"
|
"github.com/projectdiscovery/nuclei/v3/pkg/model/types/severity"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
||||||
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
||||||
templateTypes "github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
templateTypes "github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
@ -28,7 +27,9 @@ import (
|
|||||||
errorutil "github.com/projectdiscovery/utils/errors"
|
errorutil "github.com/projectdiscovery/utils/errors"
|
||||||
sliceutil "github.com/projectdiscovery/utils/slice"
|
sliceutil "github.com/projectdiscovery/utils/slice"
|
||||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||||
|
syncutil "github.com/projectdiscovery/utils/sync"
|
||||||
urlutil "github.com/projectdiscovery/utils/url"
|
urlutil "github.com/projectdiscovery/utils/url"
|
||||||
|
"github.com/rs/xid"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -66,7 +67,8 @@ type Config struct {
|
|||||||
IncludeConditions []string
|
IncludeConditions []string
|
||||||
|
|
||||||
Catalog catalog.Catalog
|
Catalog catalog.Catalog
|
||||||
ExecutorOptions protocols.ExecutorOptions
|
ExecutorOptions *protocols.ExecutorOptions
|
||||||
|
Logger *gologger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store is a storage for loaded nuclei templates
|
// Store is a storage for loaded nuclei templates
|
||||||
@ -83,13 +85,15 @@ type Store struct {
|
|||||||
|
|
||||||
preprocessor templates.Preprocessor
|
preprocessor templates.Preprocessor
|
||||||
|
|
||||||
|
logger *gologger.Logger
|
||||||
|
|
||||||
// NotFoundCallback is called for each not found template
|
// NotFoundCallback is called for each not found template
|
||||||
// This overrides error handling for not found templates
|
// This overrides error handling for not found templates
|
||||||
NotFoundCallback func(template string) bool
|
NotFoundCallback func(template string) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig returns a new loader config
|
// NewConfig returns a new loader config
|
||||||
func NewConfig(options *types.Options, catalog catalog.Catalog, executerOpts protocols.ExecutorOptions) *Config {
|
func NewConfig(options *types.Options, catalog catalog.Catalog, executerOpts *protocols.ExecutorOptions) *Config {
|
||||||
loaderConfig := Config{
|
loaderConfig := Config{
|
||||||
Templates: options.Templates,
|
Templates: options.Templates,
|
||||||
Workflows: options.Workflows,
|
Workflows: options.Workflows,
|
||||||
@ -112,6 +116,7 @@ func NewConfig(options *types.Options, catalog catalog.Catalog, executerOpts pro
|
|||||||
Catalog: catalog,
|
Catalog: catalog,
|
||||||
ExecutorOptions: executerOpts,
|
ExecutorOptions: executerOpts,
|
||||||
AITemplatePrompt: options.AITemplatePrompt,
|
AITemplatePrompt: options.AITemplatePrompt,
|
||||||
|
Logger: options.Logger,
|
||||||
}
|
}
|
||||||
loaderConfig.RemoteTemplateDomainList = append(loaderConfig.RemoteTemplateDomainList, TrustedTemplateDomains...)
|
loaderConfig.RemoteTemplateDomainList = append(loaderConfig.RemoteTemplateDomainList, TrustedTemplateDomains...)
|
||||||
return &loaderConfig
|
return &loaderConfig
|
||||||
@ -146,6 +151,7 @@ func New(cfg *Config) (*Store, error) {
|
|||||||
}, cfg.Catalog),
|
}, cfg.Catalog),
|
||||||
finalTemplates: cfg.Templates,
|
finalTemplates: cfg.Templates,
|
||||||
finalWorkflows: cfg.Workflows,
|
finalWorkflows: cfg.Workflows,
|
||||||
|
logger: cfg.Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do a check to see if we have URLs in templates flag, if so
|
// Do a check to see if we have URLs in templates flag, if so
|
||||||
@ -296,11 +302,11 @@ func (store *Store) LoadTemplatesOnlyMetadata() error {
|
|||||||
if strings.Contains(err.Error(), templates.ErrExcluded.Error()) {
|
if strings.Contains(err.Error(), templates.ErrExcluded.Error()) {
|
||||||
stats.Increment(templates.TemplatesExcludedStats)
|
stats.Increment(templates.TemplatesExcludedStats)
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.Print().Msgf("[%v] %v\n", aurora.Yellow("WRN").String(), err.Error())
|
store.logger.Print().Msgf("[%v] %v\n", aurora.Yellow("WRN").String(), err.Error())
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
gologger.Warning().Msg(err.Error())
|
store.logger.Warning().Msg(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
parserItem, ok := store.config.ExecutorOptions.Parser.(*templates.Parser)
|
parserItem, ok := store.config.ExecutorOptions.Parser.(*templates.Parser)
|
||||||
@ -359,15 +365,13 @@ func (store *Store) ValidateTemplates() error {
|
|||||||
|
|
||||||
func (store *Store) areWorkflowsValid(filteredWorkflowPaths map[string]struct{}) bool {
|
func (store *Store) areWorkflowsValid(filteredWorkflowPaths map[string]struct{}) bool {
|
||||||
return store.areWorkflowOrTemplatesValid(filteredWorkflowPaths, true, func(templatePath string, tagFilter *templates.TagFilter) (bool, error) {
|
return store.areWorkflowOrTemplatesValid(filteredWorkflowPaths, true, func(templatePath string, tagFilter *templates.TagFilter) (bool, error) {
|
||||||
return false, nil
|
return store.config.ExecutorOptions.Parser.LoadWorkflow(templatePath, store.config.Catalog)
|
||||||
// return store.config.ExecutorOptions.Parser.LoadWorkflow(templatePath, store.config.Catalog)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *Store) areTemplatesValid(filteredTemplatePaths map[string]struct{}) bool {
|
func (store *Store) areTemplatesValid(filteredTemplatePaths map[string]struct{}) bool {
|
||||||
return store.areWorkflowOrTemplatesValid(filteredTemplatePaths, false, func(templatePath string, tagFilter *templates.TagFilter) (bool, error) {
|
return store.areWorkflowOrTemplatesValid(filteredTemplatePaths, false, func(templatePath string, tagFilter *templates.TagFilter) (bool, error) {
|
||||||
return false, nil
|
return store.config.ExecutorOptions.Parser.LoadTemplate(templatePath, store.tagFilter, nil, store.config.Catalog)
|
||||||
// return store.config.ExecutorOptions.Parser.LoadTemplate(templatePath, store.tagFilter, nil, store.config.Catalog)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -376,7 +380,7 @@ func (store *Store) areWorkflowOrTemplatesValid(filteredTemplatePaths map[string
|
|||||||
|
|
||||||
for templatePath := range filteredTemplatePaths {
|
for templatePath := range filteredTemplatePaths {
|
||||||
if _, err := load(templatePath, store.tagFilter); err != nil {
|
if _, err := load(templatePath, store.tagFilter); err != nil {
|
||||||
if isParsingError("Error occurred loading template %s: %s\n", templatePath, err) {
|
if isParsingError(store, "Error occurred loading template %s: %s\n", templatePath, err) {
|
||||||
areTemplatesValid = false
|
areTemplatesValid = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -384,7 +388,7 @@ func (store *Store) areWorkflowOrTemplatesValid(filteredTemplatePaths map[string
|
|||||||
|
|
||||||
template, err := templates.Parse(templatePath, store.preprocessor, store.config.ExecutorOptions)
|
template, err := templates.Parse(templatePath, store.preprocessor, store.config.ExecutorOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isParsingError("Error occurred parsing template %s: %s\n", templatePath, err) {
|
if isParsingError(store, "Error occurred parsing template %s: %s\n", templatePath, err) {
|
||||||
areTemplatesValid = false
|
areTemplatesValid = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -409,7 +413,7 @@ func (store *Store) areWorkflowOrTemplatesValid(filteredTemplatePaths map[string
|
|||||||
// TODO: until https://github.com/projectdiscovery/nuclei-templates/issues/11324 is deployed
|
// TODO: until https://github.com/projectdiscovery/nuclei-templates/issues/11324 is deployed
|
||||||
// disable strict validation to allow GH actions to run
|
// disable strict validation to allow GH actions to run
|
||||||
// areTemplatesValid = false
|
// areTemplatesValid = false
|
||||||
gologger.Warning().Msgf("Found duplicate template ID during validation '%s' => '%s': %s\n", templatePath, existingTemplatePath, template.ID)
|
store.logger.Warning().Msgf("Found duplicate template ID during validation '%s' => '%s': %s\n", templatePath, existingTemplatePath, template.ID)
|
||||||
}
|
}
|
||||||
if !isWorkflow && len(template.Workflows) > 0 {
|
if !isWorkflow && len(template.Workflows) > 0 {
|
||||||
continue
|
continue
|
||||||
@ -432,7 +436,7 @@ func areWorkflowTemplatesValid(store *Store, workflows []*workflows.WorkflowTemp
|
|||||||
}
|
}
|
||||||
_, err := store.config.Catalog.GetTemplatePath(workflow.Template)
|
_, err := store.config.Catalog.GetTemplatePath(workflow.Template)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isParsingError("Error occurred loading template %s: %s\n", workflow.Template, err) {
|
if isParsingError(store, "Error occurred loading template %s: %s\n", workflow.Template, err) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -440,14 +444,14 @@ func areWorkflowTemplatesValid(store *Store, workflows []*workflows.WorkflowTemp
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func isParsingError(message string, template string, err error) bool {
|
func isParsingError(store *Store, message string, template string, err error) bool {
|
||||||
if errors.Is(err, templates.ErrExcluded) {
|
if errors.Is(err, templates.ErrExcluded) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if errors.Is(err, templates.ErrCreateTemplateExecutor) {
|
if errors.Is(err, templates.ErrCreateTemplateExecutor) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
gologger.Error().Msgf(message, template, err)
|
store.logger.Error().Msgf(message, template, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -466,12 +470,12 @@ func (store *Store) LoadWorkflows(workflowsList []string) []*templates.Template
|
|||||||
for workflowPath := range workflowPathMap {
|
for workflowPath := range workflowPathMap {
|
||||||
loaded, err := store.config.ExecutorOptions.Parser.LoadWorkflow(workflowPath, store.config.Catalog)
|
loaded, err := store.config.ExecutorOptions.Parser.LoadWorkflow(workflowPath, store.config.Catalog)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("Could not load workflow %s: %s\n", workflowPath, err)
|
store.logger.Warning().Msgf("Could not load workflow %s: %s\n", workflowPath, err)
|
||||||
}
|
}
|
||||||
if loaded {
|
if loaded {
|
||||||
parsed, err := templates.Parse(workflowPath, store.preprocessor, store.config.ExecutorOptions)
|
parsed, err := templates.Parse(workflowPath, store.preprocessor, store.config.ExecutorOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("Could not parse workflow %s: %s\n", workflowPath, err)
|
store.logger.Warning().Msgf("Could not parse workflow %s: %s\n", workflowPath, err)
|
||||||
} else if parsed != nil {
|
} else if parsed != nil {
|
||||||
loadedWorkflows = append(loadedWorkflows, parsed)
|
loadedWorkflows = append(loadedWorkflows, parsed)
|
||||||
}
|
}
|
||||||
@ -503,12 +507,19 @@ func (store *Store) LoadTemplatesWithTags(templatesList, tags []string) []*templ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use adaptive wait-group to cap concurrent loaders and auto-scale with demand.
|
wgLoadTemplates, errWg := syncutil.New(syncutil.WithSize(50))
|
||||||
maxConcurrency := store.config.ExecutorOptions.Options.TemplateThreads
|
if errWg != nil {
|
||||||
if maxConcurrency <= 0 {
|
panic("could not create wait group")
|
||||||
maxConcurrency = 25
|
}
|
||||||
|
|
||||||
|
if store.config.ExecutorOptions.Options.ExecutionId == "" {
|
||||||
|
store.config.ExecutorOptions.Options.ExecutionId = xid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
dialers := protocolstate.GetDialersWithId(store.config.ExecutorOptions.Options.ExecutionId)
|
||||||
|
if dialers == nil {
|
||||||
|
panic("dialers with executionId " + store.config.ExecutorOptions.Options.ExecutionId + " not found")
|
||||||
}
|
}
|
||||||
wgLoadTemplates, _ := syncutil.New(syncutil.WithSize(maxConcurrency))
|
|
||||||
|
|
||||||
for templatePath := range templatePathMap {
|
for templatePath := range templatePathMap {
|
||||||
wgLoadTemplates.Add()
|
wgLoadTemplates.Add()
|
||||||
@ -523,7 +534,7 @@ func (store *Store) LoadTemplatesWithTags(templatesList, tags []string) []*templ
|
|||||||
if !errors.Is(err, templates.ErrIncompatibleWithOfflineMatching) {
|
if !errors.Is(err, templates.ErrIncompatibleWithOfflineMatching) {
|
||||||
stats.Increment(templates.RuntimeWarningsStats)
|
stats.Increment(templates.RuntimeWarningsStats)
|
||||||
}
|
}
|
||||||
gologger.Warning().Msgf("Could not parse template %s: %s\n", templatePath, err)
|
store.logger.Warning().Msgf("Could not parse template %s: %s\n", templatePath, err)
|
||||||
} else if parsed != nil {
|
} else if parsed != nil {
|
||||||
if !parsed.Verified && store.config.ExecutorOptions.Options.DisableUnsignedTemplates {
|
if !parsed.Verified && store.config.ExecutorOptions.Options.DisableUnsignedTemplates {
|
||||||
// skip unverified templates when prompted to
|
// skip unverified templates when prompted to
|
||||||
@ -558,13 +569,13 @@ func (store *Store) LoadTemplatesWithTags(templatesList, tags []string) []*templ
|
|||||||
// donot include headless template in final list if headless flag is not set
|
// donot include headless template in final list if headless flag is not set
|
||||||
stats.Increment(templates.ExcludedHeadlessTmplStats)
|
stats.Increment(templates.ExcludedHeadlessTmplStats)
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.Print().Msgf("[%v] Headless flag is required for headless template '%s'.\n", aurora.Yellow("WRN").String(), templatePath)
|
store.logger.Print().Msgf("[%v] Headless flag is required for headless template '%s'.\n", aurora.Yellow("WRN").String(), templatePath)
|
||||||
}
|
}
|
||||||
} else if len(parsed.RequestsCode) > 0 && !store.config.ExecutorOptions.Options.EnableCodeTemplates {
|
} else if len(parsed.RequestsCode) > 0 && !store.config.ExecutorOptions.Options.EnableCodeTemplates {
|
||||||
// donot include 'Code' protocol custom template in final list if code flag is not set
|
// donot include 'Code' protocol custom template in final list if code flag is not set
|
||||||
stats.Increment(templates.ExcludedCodeTmplStats)
|
stats.Increment(templates.ExcludedCodeTmplStats)
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.Print().Msgf("[%v] Code flag is required for code protocol template '%s'.\n", aurora.Yellow("WRN").String(), templatePath)
|
store.logger.Print().Msgf("[%v] Code flag is required for code protocol template '%s'.\n", aurora.Yellow("WRN").String(), templatePath)
|
||||||
}
|
}
|
||||||
} else if len(parsed.RequestsCode) > 0 && !parsed.Verified && len(parsed.Workflows) == 0 {
|
} else if len(parsed.RequestsCode) > 0 && !parsed.Verified && len(parsed.Workflows) == 0 {
|
||||||
// donot include unverified 'Code' protocol custom template in final list
|
// donot include unverified 'Code' protocol custom template in final list
|
||||||
@ -572,12 +583,12 @@ func (store *Store) LoadTemplatesWithTags(templatesList, tags []string) []*templ
|
|||||||
// these will be skipped so increment skip counter
|
// these will be skipped so increment skip counter
|
||||||
stats.Increment(templates.SkippedUnsignedStats)
|
stats.Increment(templates.SkippedUnsignedStats)
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.Print().Msgf("[%v] Tampered/Unsigned template at %v.\n", aurora.Yellow("WRN").String(), templatePath)
|
store.logger.Print().Msgf("[%v] Tampered/Unsigned template at %v.\n", aurora.Yellow("WRN").String(), templatePath)
|
||||||
}
|
}
|
||||||
} else if parsed.IsFuzzing() && !store.config.ExecutorOptions.Options.DAST {
|
} else if parsed.IsFuzzing() && !store.config.ExecutorOptions.Options.DAST {
|
||||||
stats.Increment(templates.ExludedDastTmplStats)
|
stats.Increment(templates.ExludedDastTmplStats)
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.Print().Msgf("[%v] -dast flag is required for DAST template '%s'.\n", aurora.Yellow("WRN").String(), templatePath)
|
store.logger.Print().Msgf("[%v] -dast flag is required for DAST template '%s'.\n", aurora.Yellow("WRN").String(), templatePath)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
loadTemplate(parsed)
|
loadTemplate(parsed)
|
||||||
@ -588,11 +599,11 @@ func (store *Store) LoadTemplatesWithTags(templatesList, tags []string) []*templ
|
|||||||
if strings.Contains(err.Error(), templates.ErrExcluded.Error()) {
|
if strings.Contains(err.Error(), templates.ErrExcluded.Error()) {
|
||||||
stats.Increment(templates.TemplatesExcludedStats)
|
stats.Increment(templates.TemplatesExcludedStats)
|
||||||
if config.DefaultConfig.LogAllEvents {
|
if config.DefaultConfig.LogAllEvents {
|
||||||
gologger.Print().Msgf("[%v] %v\n", aurora.Yellow("WRN").String(), err.Error())
|
store.logger.Print().Msgf("[%v] %v\n", aurora.Yellow("WRN").String(), err.Error())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
gologger.Warning().Msg(err.Error())
|
store.logger.Warning().Msg(err.Error())
|
||||||
}
|
}
|
||||||
}(templatePath)
|
}(templatePath)
|
||||||
}
|
}
|
||||||
@ -648,7 +659,7 @@ func workflowContainsProtocol(workflow []*workflows.WorkflowTemplate) bool {
|
|||||||
func (s *Store) logErroredTemplates(erred map[string]error) {
|
func (s *Store) logErroredTemplates(erred map[string]error) {
|
||||||
for template, err := range erred {
|
for template, err := range erred {
|
||||||
if s.NotFoundCallback == nil || !s.NotFoundCallback(template) {
|
if s.NotFoundCallback == nil || !s.NotFoundCallback(template) {
|
||||||
gologger.Error().Msgf("Could not find template '%s': %s", template, err)
|
s.logger.Error().Msgf("Could not find template '%s': %s", template, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,13 +5,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates/extensions"
|
"github.com/projectdiscovery/nuclei/v3/pkg/templates/extensions"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils"
|
"github.com/projectdiscovery/nuclei/v3/pkg/utils"
|
||||||
"github.com/projectdiscovery/retryablehttp-go"
|
"github.com/projectdiscovery/retryablehttp-go"
|
||||||
|
sliceutil "github.com/projectdiscovery/utils/slice"
|
||||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||||
|
syncutil "github.com/projectdiscovery/utils/sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ContentType string
|
type ContentType string
|
||||||
@ -28,67 +31,73 @@ type RemoteContent struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getRemoteTemplatesAndWorkflows(templateURLs, workflowURLs, remoteTemplateDomainList []string) ([]string, []string, error) {
|
func getRemoteTemplatesAndWorkflows(templateURLs, workflowURLs, remoteTemplateDomainList []string) ([]string, []string, error) {
|
||||||
remoteContentChannel := make(chan RemoteContent)
|
var (
|
||||||
|
err error
|
||||||
|
muErr sync.Mutex
|
||||||
|
)
|
||||||
|
remoteTemplateList := sliceutil.NewSyncSlice[string]()
|
||||||
|
remoteWorkFlowList := sliceutil.NewSyncSlice[string]()
|
||||||
|
|
||||||
for _, templateURL := range templateURLs {
|
awg, errAwg := syncutil.New(syncutil.WithSize(50))
|
||||||
go getRemoteContent(templateURL, remoteTemplateDomainList, remoteContentChannel, Template)
|
if errAwg != nil {
|
||||||
}
|
return nil, nil, errAwg
|
||||||
for _, workflowURL := range workflowURLs {
|
|
||||||
go getRemoteContent(workflowURL, remoteTemplateDomainList, remoteContentChannel, Workflow)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var remoteTemplateList []string
|
loadItem := func(URL string, contentType ContentType) {
|
||||||
var remoteWorkFlowList []string
|
defer awg.Done()
|
||||||
var err error
|
|
||||||
for i := 0; i < (len(templateURLs) + len(workflowURLs)); i++ {
|
remoteContent := getRemoteContent(URL, remoteTemplateDomainList, contentType)
|
||||||
remoteContent := <-remoteContentChannel
|
|
||||||
if remoteContent.Error != nil {
|
if remoteContent.Error != nil {
|
||||||
|
muErr.Lock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.New(remoteContent.Error.Error() + ": " + err.Error())
|
err = errors.New(remoteContent.Error.Error() + ": " + err.Error())
|
||||||
} else {
|
} else {
|
||||||
err = remoteContent.Error
|
err = remoteContent.Error
|
||||||
}
|
}
|
||||||
|
muErr.Unlock()
|
||||||
} else {
|
} else {
|
||||||
switch remoteContent.Type {
|
switch remoteContent.Type {
|
||||||
case Template:
|
case Template:
|
||||||
remoteTemplateList = append(remoteTemplateList, remoteContent.Content...)
|
remoteTemplateList.Append(remoteContent.Content...)
|
||||||
case Workflow:
|
case Workflow:
|
||||||
remoteWorkFlowList = append(remoteWorkFlowList, remoteContent.Content...)
|
remoteWorkFlowList.Append(remoteContent.Content...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return remoteTemplateList, remoteWorkFlowList, err
|
|
||||||
|
for _, templateURL := range templateURLs {
|
||||||
|
awg.Add()
|
||||||
|
go loadItem(templateURL, Template)
|
||||||
|
}
|
||||||
|
for _, workflowURL := range workflowURLs {
|
||||||
|
awg.Add()
|
||||||
|
go loadItem(workflowURL, Workflow)
|
||||||
|
}
|
||||||
|
|
||||||
|
awg.Wait()
|
||||||
|
|
||||||
|
return remoteTemplateList.Slice, remoteWorkFlowList.Slice, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRemoteContent(URL string, remoteTemplateDomainList []string, remoteContentChannel chan<- RemoteContent, contentType ContentType) {
|
func getRemoteContent(URL string, remoteTemplateDomainList []string, contentType ContentType) RemoteContent {
|
||||||
if err := validateRemoteTemplateURL(URL, remoteTemplateDomainList); err != nil {
|
if err := validateRemoteTemplateURL(URL, remoteTemplateDomainList); err != nil {
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{Error: err}
|
||||||
Error: err,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(URL, "http") && stringsutil.HasSuffixAny(URL, extensions.YAML) {
|
if strings.HasPrefix(URL, "http") && stringsutil.HasSuffixAny(URL, extensions.YAML) {
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{
|
||||||
Content: []string{URL},
|
Content: []string{URL},
|
||||||
Type: contentType,
|
Type: contentType,
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
response, err := retryablehttp.DefaultClient().Get(URL)
|
response, err := retryablehttp.DefaultClient().Get(URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{Error: err}
|
||||||
Error: err,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = response.Body.Close()
|
_ = response.Body.Close()
|
||||||
}()
|
}()
|
||||||
if response.StatusCode < 200 || response.StatusCode > 299 {
|
if response.StatusCode < 200 || response.StatusCode > 299 {
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{Error: fmt.Errorf("get \"%s\": unexpect status %d", URL, response.StatusCode)}
|
||||||
Error: fmt.Errorf("get \"%s\": unexpect status %d", URL, response.StatusCode),
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(response.Body)
|
scanner := bufio.NewScanner(response.Body)
|
||||||
@ -100,23 +109,17 @@ func getRemoteContent(URL string, remoteTemplateDomainList []string, remoteConte
|
|||||||
}
|
}
|
||||||
if utils.IsURL(text) {
|
if utils.IsURL(text) {
|
||||||
if err := validateRemoteTemplateURL(text, remoteTemplateDomainList); err != nil {
|
if err := validateRemoteTemplateURL(text, remoteTemplateDomainList); err != nil {
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{Error: err}
|
||||||
Error: err,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
templateList = append(templateList, text)
|
templateList = append(templateList, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{Error: errors.Wrap(err, "get \"%s\"")}
|
||||||
Error: errors.Wrap(err, "get \"%s\""),
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteContentChannel <- RemoteContent{
|
return RemoteContent{
|
||||||
Content: templateList,
|
Content: templateList,
|
||||||
Type: contentType,
|
Type: contentType,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/projectdiscovery/gologger"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
@ -17,14 +18,16 @@ import (
|
|||||||
type Engine struct {
|
type Engine struct {
|
||||||
workPool *WorkPool
|
workPool *WorkPool
|
||||||
options *types.Options
|
options *types.Options
|
||||||
executerOpts protocols.ExecutorOptions
|
executerOpts *protocols.ExecutorOptions
|
||||||
Callback func(*output.ResultEvent) // Executed on results
|
Callback func(*output.ResultEvent) // Executed on results
|
||||||
|
Logger *gologger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new Engine instance
|
// New returns a new Engine instance
|
||||||
func New(options *types.Options) *Engine {
|
func New(options *types.Options) *Engine {
|
||||||
engine := &Engine{
|
engine := &Engine{
|
||||||
options: options,
|
options: options,
|
||||||
|
Logger: options.Logger,
|
||||||
}
|
}
|
||||||
engine.workPool = engine.GetWorkPool()
|
engine.workPool = engine.GetWorkPool()
|
||||||
return engine
|
return engine
|
||||||
@ -47,12 +50,12 @@ func (e *Engine) GetWorkPool() *WorkPool {
|
|||||||
|
|
||||||
// SetExecuterOptions sets the executer options for the engine. This is required
|
// SetExecuterOptions sets the executer options for the engine. This is required
|
||||||
// before using the engine to perform any execution.
|
// before using the engine to perform any execution.
|
||||||
func (e *Engine) SetExecuterOptions(options protocols.ExecutorOptions) {
|
func (e *Engine) SetExecuterOptions(options *protocols.ExecutorOptions) {
|
||||||
e.executerOpts = options
|
e.executerOpts = options
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecuterOptions returns protocols.ExecutorOptions for nuclei engine.
|
// ExecuterOptions returns protocols.ExecutorOptions for nuclei engine.
|
||||||
func (e *Engine) ExecuterOptions() protocols.ExecutorOptions {
|
func (e *Engine) ExecuterOptions() *protocols.ExecutorOptions {
|
||||||
return e.executerOpts
|
return e.executerOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/input/provider"
|
"github.com/projectdiscovery/nuclei/v3/pkg/input/provider"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||||
@ -50,7 +49,7 @@ func (e *Engine) ExecuteScanWithOpts(ctx context.Context, templatesList []*templ
|
|||||||
totalReqAfterClustering := getRequestCount(finalTemplates) * int(target.Count())
|
totalReqAfterClustering := getRequestCount(finalTemplates) * int(target.Count())
|
||||||
|
|
||||||
if !noCluster && totalReqAfterClustering < totalReqBeforeCluster {
|
if !noCluster && totalReqAfterClustering < totalReqBeforeCluster {
|
||||||
gologger.Info().Msgf("Templates clustered: %d (Reduced %d Requests)", clusterCount, totalReqBeforeCluster-totalReqAfterClustering)
|
e.Logger.Info().Msgf("Templates clustered: %d (Reduced %d Requests)", clusterCount, totalReqBeforeCluster-totalReqAfterClustering)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0 matches means no templates were found in the directory
|
// 0 matches means no templates were found in the directory
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/input/provider"
|
"github.com/projectdiscovery/nuclei/v3/pkg/input/provider"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||||
@ -40,7 +39,7 @@ func (e *Engine) executeAllSelfContained(ctx context.Context, alltemplates []*te
|
|||||||
match, err = template.Executer.Execute(ctx)
|
match, err = template.Executer.Execute(ctx)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("[%s] Could not execute step (self-contained): %s\n", e.executerOpts.Colorizer.BrightBlue(template.ID), err)
|
e.options.Logger.Warning().Msgf("[%s] Could not execute step (self-contained): %s\n", e.executerOpts.Colorizer.BrightBlue(template.ID), err)
|
||||||
}
|
}
|
||||||
results.CompareAndSwap(false, match)
|
results.CompareAndSwap(false, match)
|
||||||
}(v)
|
}(v)
|
||||||
@ -90,13 +89,13 @@ func (e *Engine) executeTemplateWithTargets(ctx context.Context, template *templ
|
|||||||
// skips indexes lower than the minimum in-flight at interruption time
|
// skips indexes lower than the minimum in-flight at interruption time
|
||||||
var skip bool
|
var skip bool
|
||||||
if resumeFromInfo.Completed { // the template was completed
|
if resumeFromInfo.Completed { // the template was completed
|
||||||
gologger.Debug().Msgf("[%s] Skipping \"%s\": Resume - Template already completed\n", template.ID, scannedValue.Input)
|
e.options.Logger.Debug().Msgf("[%s] Skipping \"%s\": Resume - Template already completed", template.ID, scannedValue.Input)
|
||||||
skip = true
|
skip = true
|
||||||
} else if index < resumeFromInfo.SkipUnder { // index lower than the sliding window (bulk-size)
|
} else if index < resumeFromInfo.SkipUnder { // index lower than the sliding window (bulk-size)
|
||||||
gologger.Debug().Msgf("[%s] Skipping \"%s\": Resume - Target already processed\n", template.ID, scannedValue.Input)
|
e.options.Logger.Debug().Msgf("[%s] Skipping \"%s\": Resume - Target already processed", template.ID, scannedValue.Input)
|
||||||
skip = true
|
skip = true
|
||||||
} else if _, isInFlight := resumeFromInfo.InFlight[index]; isInFlight { // the target wasn't completed successfully
|
} else if _, isInFlight := resumeFromInfo.InFlight[index]; isInFlight { // the target wasn't completed successfully
|
||||||
gologger.Debug().Msgf("[%s] Repeating \"%s\": Resume - Target wasn't completed\n", template.ID, scannedValue.Input)
|
e.options.Logger.Debug().Msgf("[%s] Repeating \"%s\": Resume - Target wasn't completed", template.ID, scannedValue.Input)
|
||||||
// skip is already false, but leaving it here for clarity
|
// skip is already false, but leaving it here for clarity
|
||||||
skip = false
|
skip = false
|
||||||
} else if index > resumeFromInfo.DoAbove { // index above the sliding window (bulk-size)
|
} else if index > resumeFromInfo.DoAbove { // index above the sliding window (bulk-size)
|
||||||
@ -158,7 +157,7 @@ func (e *Engine) executeTemplateWithTargets(ctx context.Context, template *templ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("[%s] Could not execute step on %s: %s\n", e.executerOpts.Colorizer.BrightBlue(template.ID), value.Input, err)
|
e.options.Logger.Warning().Msgf("[%s] Could not execute step on %s: %s\n", e.executerOpts.Colorizer.BrightBlue(template.ID), value.Input, err)
|
||||||
}
|
}
|
||||||
results.CompareAndSwap(false, match)
|
results.CompareAndSwap(false, match)
|
||||||
}(index, skip, scannedValue)
|
}(index, skip, scannedValue)
|
||||||
@ -224,7 +223,7 @@ func (e *Engine) executeTemplatesOnTarget(ctx context.Context, alltemplates []*t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gologger.Warning().Msgf("[%s] Could not execute step on %s: %s\n", e.executerOpts.Colorizer.BrightBlue(template.ID), value.Input, err)
|
e.options.Logger.Warning().Msgf("[%s] Could not execute step on %s: %s\n", e.executerOpts.Colorizer.BrightBlue(template.ID), value.Input, err)
|
||||||
}
|
}
|
||||||
results.CompareAndSwap(false, match)
|
results.CompareAndSwap(false, match)
|
||||||
}(tpl, target, sg)
|
}(tpl, target, sg)
|
||||||
|
|||||||
21
pkg/external/customtemplates/github_test.go
vendored
21
pkg/external/customtemplates/github_test.go
vendored
@ -1,23 +1,25 @@
|
|||||||
package customtemplates
|
package customtemplates
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
|
"github.com/projectdiscovery/gologger/levels"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/testutils"
|
"github.com/projectdiscovery/nuclei/v3/pkg/testutils"
|
||||||
osutils "github.com/projectdiscovery/utils/os"
|
"github.com/projectdiscovery/nuclei/v3/pkg/utils"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDownloadCustomTemplatesFromGitHub(t *testing.T) {
|
func TestDownloadCustomTemplatesFromGitHub(t *testing.T) {
|
||||||
if osutils.IsOSX() {
|
// Capture output to check for rate limit errors
|
||||||
t.Skip("skipping on macos due to unknown failure (works locally)")
|
outputBuffer := &bytes.Buffer{}
|
||||||
}
|
gologger.DefaultLogger.SetWriter(&utils.CaptureWriter{Buffer: outputBuffer})
|
||||||
|
gologger.DefaultLogger.SetMaxLevel(levels.LevelDebug)
|
||||||
gologger.DefaultLogger.SetWriter(&testutils.NoopWriter{})
|
|
||||||
|
|
||||||
templatesDirectory := t.TempDir()
|
templatesDirectory := t.TempDir()
|
||||||
config.DefaultConfig.SetTemplatesDir(templatesDirectory)
|
config.DefaultConfig.SetTemplatesDir(templatesDirectory)
|
||||||
@ -29,5 +31,12 @@ func TestDownloadCustomTemplatesFromGitHub(t *testing.T) {
|
|||||||
require.Nil(t, err, "could not create custom templates manager")
|
require.Nil(t, err, "could not create custom templates manager")
|
||||||
|
|
||||||
ctm.Download(context.Background())
|
ctm.Download(context.Background())
|
||||||
|
|
||||||
|
// Check if output contains rate limit error and skip test if so
|
||||||
|
output := outputBuffer.String()
|
||||||
|
if strings.Contains(output, "API rate limit exceeded") {
|
||||||
|
t.Skip("GitHub API rate limit exceeded, skipping test")
|
||||||
|
}
|
||||||
|
|
||||||
require.DirExists(t, filepath.Join(templatesDirectory, "github", "projectdiscovery", "nuclei-templates-test"), "cloned directory does not exists")
|
require.DirExists(t, filepath.Join(templatesDirectory, "github", "projectdiscovery", "nuclei-templates-test"), "cloned directory does not exists")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -61,7 +61,6 @@ func checkTimingDependency(
|
|||||||
|
|
||||||
var requestsSent []requestsSentMetadata
|
var requestsSent []requestsSentMetadata
|
||||||
for requestsLeft > 0 {
|
for requestsLeft > 0 {
|
||||||
|
|
||||||
isCorrelationPossible, delayRecieved, err := sendRequestAndTestConfidence(regression, highSleepTimeSeconds, requestSender, baselineDelay)
|
isCorrelationPossible, delayRecieved, err := sendRequestAndTestConfidence(regression, highSleepTimeSeconds, requestSender, baselineDelay)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "", err
|
return false, "", err
|
||||||
|
|||||||
@ -143,8 +143,8 @@ func (m *MultiPartForm) Decode(data string) (KV, error) {
|
|||||||
return KV{}, err
|
return KV{}, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
if _, err := buffer.ReadFrom(file); err != nil {
|
if _, err := buffer.ReadFrom(file); err != nil {
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package openapi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/getkin/kin-openapi/openapi3"
|
"github.com/getkin/kin-openapi/openapi3"
|
||||||
@ -162,9 +163,7 @@ func openAPIExample(schema *openapi3.Schema, cache map[*openapi3.Schema]*cachedS
|
|||||||
return nil, ErrNoExample
|
return nil, ErrNoExample
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range value {
|
maps.Copy(example, value)
|
||||||
example[k] = v
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return example, nil
|
return example, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -115,17 +115,17 @@ func (i *HttpInputProvider) Iterate(callback func(value *contextargs.MetaInput)
|
|||||||
|
|
||||||
// Set adds item to input provider
|
// Set adds item to input provider
|
||||||
// No-op for this provider
|
// No-op for this provider
|
||||||
func (i *HttpInputProvider) Set(value string) {}
|
func (i *HttpInputProvider) Set(_ string, value string) {}
|
||||||
|
|
||||||
// SetWithProbe adds item to input provider with http probing
|
// SetWithProbe adds item to input provider with http probing
|
||||||
// No-op for this provider
|
// No-op for this provider
|
||||||
func (i *HttpInputProvider) SetWithProbe(value string, probe types.InputLivenessProbe) error {
|
func (i *HttpInputProvider) SetWithProbe(_ string, value string, probe types.InputLivenessProbe) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWithExclusions adds item to input provider if it doesn't match any of the exclusions
|
// SetWithExclusions adds item to input provider if it doesn't match any of the exclusions
|
||||||
// No-op for this provider
|
// No-op for this provider
|
||||||
func (i *HttpInputProvider) SetWithExclusions(value string) error {
|
func (i *HttpInputProvider) SetWithExclusions(_ string, value string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -59,11 +59,11 @@ type InputProvider interface {
|
|||||||
// Iterate over all inputs in order
|
// Iterate over all inputs in order
|
||||||
Iterate(callback func(value *contextargs.MetaInput) bool)
|
Iterate(callback func(value *contextargs.MetaInput) bool)
|
||||||
// Set adds item to input provider
|
// Set adds item to input provider
|
||||||
Set(value string)
|
Set(executionId string, value string)
|
||||||
// SetWithProbe adds item to input provider with http probing
|
// SetWithProbe adds item to input provider with http probing
|
||||||
SetWithProbe(value string, probe types.InputLivenessProbe) error
|
SetWithProbe(executionId string, value string, probe types.InputLivenessProbe) error
|
||||||
// SetWithExclusions adds item to input provider if it doesn't match any of the exclusions
|
// SetWithExclusions adds item to input provider if it doesn't match any of the exclusions
|
||||||
SetWithExclusions(value string) error
|
SetWithExclusions(executionId string, value string) error
|
||||||
// InputType returns the type of input provider
|
// InputType returns the type of input provider
|
||||||
InputType() string
|
InputType() string
|
||||||
// Close the input provider and cleanup any resources
|
// Close the input provider and cleanup any resources
|
||||||
|
|||||||
@ -139,7 +139,7 @@ func (i *ListInputProvider) Iterate(callback func(value *contextargs.MetaInput)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set normalizes and stores passed input values
|
// Set normalizes and stores passed input values
|
||||||
func (i *ListInputProvider) Set(value string) {
|
func (i *ListInputProvider) Set(executionId string, value string) {
|
||||||
URL := strings.TrimSpace(value)
|
URL := strings.TrimSpace(value)
|
||||||
if URL == "" {
|
if URL == "" {
|
||||||
return
|
return
|
||||||
@ -169,7 +169,12 @@ func (i *ListInputProvider) Set(value string) {
|
|||||||
|
|
||||||
if i.ipOptions.ScanAllIPs {
|
if i.ipOptions.ScanAllIPs {
|
||||||
// scan all ips
|
// scan all ips
|
||||||
dnsData, err := protocolstate.Dialer.GetDNSData(urlx.Hostname())
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsData, err := dialers.Fastdialer.GetDNSData(urlx.Hostname())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if (len(dnsData.A) + len(dnsData.AAAA)) > 0 {
|
if (len(dnsData.A) + len(dnsData.AAAA)) > 0 {
|
||||||
var ips []string
|
var ips []string
|
||||||
@ -201,7 +206,12 @@ func (i *ListInputProvider) Set(value string) {
|
|||||||
ips := []string{}
|
ips := []string{}
|
||||||
// only scan the target but ipv6 if it has one
|
// only scan the target but ipv6 if it has one
|
||||||
if i.ipOptions.IPV6 {
|
if i.ipOptions.IPV6 {
|
||||||
dnsData, err := protocolstate.Dialer.GetDNSData(urlx.Hostname())
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsData, err := dialers.Fastdialer.GetDNSData(urlx.Hostname())
|
||||||
if err == nil && len(dnsData.AAAA) > 0 {
|
if err == nil && len(dnsData.AAAA) > 0 {
|
||||||
// pick/ prefer 1st
|
// pick/ prefer 1st
|
||||||
ips = append(ips, dnsData.AAAA[0])
|
ips = append(ips, dnsData.AAAA[0])
|
||||||
@ -228,17 +238,17 @@ func (i *ListInputProvider) Set(value string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetWithProbe only sets the input if it is live
|
// SetWithProbe only sets the input if it is live
|
||||||
func (i *ListInputProvider) SetWithProbe(value string, probe providerTypes.InputLivenessProbe) error {
|
func (i *ListInputProvider) SetWithProbe(executionId string, value string, probe providerTypes.InputLivenessProbe) error {
|
||||||
probedValue, err := probe.ProbeURL(value)
|
probedValue, err := probe.ProbeURL(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
i.Set(probedValue)
|
i.Set(executionId, probedValue)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWithExclusions normalizes and stores passed input values if not excluded
|
// SetWithExclusions normalizes and stores passed input values if not excluded
|
||||||
func (i *ListInputProvider) SetWithExclusions(value string) error {
|
func (i *ListInputProvider) SetWithExclusions(executionId string, value string) error {
|
||||||
URL := strings.TrimSpace(value)
|
URL := strings.TrimSpace(value)
|
||||||
if URL == "" {
|
if URL == "" {
|
||||||
return nil
|
return nil
|
||||||
@ -247,7 +257,7 @@ func (i *ListInputProvider) SetWithExclusions(value string) error {
|
|||||||
i.skippedCount++
|
i.skippedCount++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
i.Set(URL)
|
i.Set(executionId, URL)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,18 +283,20 @@ func (i *ListInputProvider) initializeInputSources(opts *Options) error {
|
|||||||
switch {
|
switch {
|
||||||
case iputil.IsCIDR(target):
|
case iputil.IsCIDR(target):
|
||||||
ips := expand.CIDR(target)
|
ips := expand.CIDR(target)
|
||||||
i.addTargets(ips)
|
i.addTargets(options.ExecutionId, ips)
|
||||||
case asn.IsASN(target):
|
case asn.IsASN(target):
|
||||||
ips := expand.ASN(target)
|
ips := expand.ASN(target)
|
||||||
i.addTargets(ips)
|
i.addTargets(options.ExecutionId, ips)
|
||||||
default:
|
default:
|
||||||
i.Set(target)
|
i.Set(options.ExecutionId, target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle stdin
|
// Handle stdin
|
||||||
if options.Stdin {
|
if options.Stdin {
|
||||||
i.scanInputFromReader(readerutil.TimeoutReader{Reader: os.Stdin, Timeout: time.Duration(options.InputReadTimeout)})
|
i.scanInputFromReader(
|
||||||
|
options.ExecutionId,
|
||||||
|
readerutil.TimeoutReader{Reader: os.Stdin, Timeout: time.Duration(options.InputReadTimeout)})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle target file
|
// Handle target file
|
||||||
@ -297,7 +309,7 @@ func (i *ListInputProvider) initializeInputSources(opts *Options) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if input != nil {
|
if input != nil {
|
||||||
i.scanInputFromReader(input)
|
i.scanInputFromReader(options.ExecutionId, input)
|
||||||
_ = input.Close()
|
_ = input.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -317,7 +329,7 @@ func (i *ListInputProvider) initializeInputSources(opts *Options) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for c := range ch {
|
for c := range ch {
|
||||||
i.Set(c)
|
i.Set(options.ExecutionId, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -331,7 +343,7 @@ func (i *ListInputProvider) initializeInputSources(opts *Options) error {
|
|||||||
ips := expand.ASN(target)
|
ips := expand.ASN(target)
|
||||||
i.removeTargets(ips)
|
i.removeTargets(ips)
|
||||||
default:
|
default:
|
||||||
i.Del(target)
|
i.Del(options.ExecutionId, target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -340,19 +352,19 @@ func (i *ListInputProvider) initializeInputSources(opts *Options) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// scanInputFromReader scans a line of input from reader and passes it for storage
|
// scanInputFromReader scans a line of input from reader and passes it for storage
|
||||||
func (i *ListInputProvider) scanInputFromReader(reader io.Reader) {
|
func (i *ListInputProvider) scanInputFromReader(executionId string, reader io.Reader) {
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
item := scanner.Text()
|
item := scanner.Text()
|
||||||
switch {
|
switch {
|
||||||
case iputil.IsCIDR(item):
|
case iputil.IsCIDR(item):
|
||||||
ips := expand.CIDR(item)
|
ips := expand.CIDR(item)
|
||||||
i.addTargets(ips)
|
i.addTargets(executionId, ips)
|
||||||
case asn.IsASN(item):
|
case asn.IsASN(item):
|
||||||
ips := expand.ASN(item)
|
ips := expand.ASN(item)
|
||||||
i.addTargets(ips)
|
i.addTargets(executionId, ips)
|
||||||
default:
|
default:
|
||||||
i.Set(item)
|
i.Set(executionId, item)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -371,7 +383,7 @@ func (i *ListInputProvider) isExcluded(URL string) bool {
|
|||||||
return exists
|
return exists
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *ListInputProvider) Del(value string) {
|
func (i *ListInputProvider) Del(executionId string, value string) {
|
||||||
URL := strings.TrimSpace(value)
|
URL := strings.TrimSpace(value)
|
||||||
if URL == "" {
|
if URL == "" {
|
||||||
return
|
return
|
||||||
@ -401,7 +413,12 @@ func (i *ListInputProvider) Del(value string) {
|
|||||||
|
|
||||||
if i.ipOptions.ScanAllIPs {
|
if i.ipOptions.ScanAllIPs {
|
||||||
// scan all ips
|
// scan all ips
|
||||||
dnsData, err := protocolstate.Dialer.GetDNSData(urlx.Hostname())
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsData, err := dialers.Fastdialer.GetDNSData(urlx.Hostname())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if (len(dnsData.A) + len(dnsData.AAAA)) > 0 {
|
if (len(dnsData.A) + len(dnsData.AAAA)) > 0 {
|
||||||
var ips []string
|
var ips []string
|
||||||
@ -433,7 +450,12 @@ func (i *ListInputProvider) Del(value string) {
|
|||||||
ips := []string{}
|
ips := []string{}
|
||||||
// only scan the target but ipv6 if it has one
|
// only scan the target but ipv6 if it has one
|
||||||
if i.ipOptions.IPV6 {
|
if i.ipOptions.IPV6 {
|
||||||
dnsData, err := protocolstate.Dialer.GetDNSData(urlx.Hostname())
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsData, err := dialers.Fastdialer.GetDNSData(urlx.Hostname())
|
||||||
if err == nil && len(dnsData.AAAA) > 0 {
|
if err == nil && len(dnsData.AAAA) > 0 {
|
||||||
// pick/ prefer 1st
|
// pick/ prefer 1st
|
||||||
ips = append(ips, dnsData.AAAA[0])
|
ips = append(ips, dnsData.AAAA[0])
|
||||||
@ -519,9 +541,9 @@ func (i *ListInputProvider) setHostMapStream(data string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *ListInputProvider) addTargets(targets []string) {
|
func (i *ListInputProvider) addTargets(executionId string, targets []string) {
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
i.Set(target)
|
i.Set(executionId, target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -36,7 +36,7 @@ func Test_expandCIDR(t *testing.T) {
|
|||||||
input := &ListInputProvider{hostMap: hm}
|
input := &ListInputProvider{hostMap: hm}
|
||||||
|
|
||||||
ips := expand.CIDR(tt.cidr)
|
ips := expand.CIDR(tt.cidr)
|
||||||
input.addTargets(ips)
|
input.addTargets("", ips)
|
||||||
// scan
|
// scan
|
||||||
got := []string{}
|
got := []string{}
|
||||||
input.hostMap.Scan(func(k, _ []byte) error {
|
input.hostMap.Scan(func(k, _ []byte) error {
|
||||||
@ -137,7 +137,7 @@ func Test_scanallips_normalizeStoreInputValue(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
input.Set(tt.hostname)
|
input.Set("", tt.hostname)
|
||||||
// scan
|
// scan
|
||||||
got := []string{}
|
got := []string{}
|
||||||
input.hostMap.Scan(func(k, v []byte) error {
|
input.hostMap.Scan(func(k, v []byte) error {
|
||||||
@ -180,7 +180,7 @@ func Test_expandASNInputValue(t *testing.T) {
|
|||||||
input := &ListInputProvider{hostMap: hm}
|
input := &ListInputProvider{hostMap: hm}
|
||||||
// get the IP addresses for ASN number
|
// get the IP addresses for ASN number
|
||||||
ips := expand.ASN(tt.asn)
|
ips := expand.ASN(tt.asn)
|
||||||
input.addTargets(ips)
|
input.addTargets("", ips)
|
||||||
// scan the hmap
|
// scan the hmap
|
||||||
got := []string{}
|
got := []string{}
|
||||||
input.hostMap.Scan(func(k, v []byte) error {
|
input.hostMap.Scan(func(k, v []byte) error {
|
||||||
|
|||||||
@ -19,10 +19,10 @@ func NewSimpleInputProvider() *SimpleInputProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSimpleInputProviderWithUrls creates a new simple input provider with the given urls
|
// NewSimpleInputProviderWithUrls creates a new simple input provider with the given urls
|
||||||
func NewSimpleInputProviderWithUrls(urls ...string) *SimpleInputProvider {
|
func NewSimpleInputProviderWithUrls(executionId string, urls ...string) *SimpleInputProvider {
|
||||||
provider := NewSimpleInputProvider()
|
provider := NewSimpleInputProvider()
|
||||||
for _, url := range urls {
|
for _, url := range urls {
|
||||||
provider.Set(url)
|
provider.Set(executionId, url)
|
||||||
}
|
}
|
||||||
return provider
|
return provider
|
||||||
}
|
}
|
||||||
@ -42,14 +42,14 @@ func (s *SimpleInputProvider) Iterate(callback func(value *contextargs.MetaInput
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set adds an item to the input provider
|
// Set adds an item to the input provider
|
||||||
func (s *SimpleInputProvider) Set(value string) {
|
func (s *SimpleInputProvider) Set(_ string, value string) {
|
||||||
metaInput := contextargs.NewMetaInput()
|
metaInput := contextargs.NewMetaInput()
|
||||||
metaInput.Input = value
|
metaInput.Input = value
|
||||||
s.Inputs = append(s.Inputs, metaInput)
|
s.Inputs = append(s.Inputs, metaInput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWithProbe adds an item to the input provider with HTTP probing
|
// SetWithProbe adds an item to the input provider with HTTP probing
|
||||||
func (s *SimpleInputProvider) SetWithProbe(value string, probe types.InputLivenessProbe) error {
|
func (s *SimpleInputProvider) SetWithProbe(_ string, value string, probe types.InputLivenessProbe) error {
|
||||||
probedValue, err := probe.ProbeURL(value)
|
probedValue, err := probe.ProbeURL(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -61,7 +61,7 @@ func (s *SimpleInputProvider) SetWithProbe(value string, probe types.InputLivene
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetWithExclusions adds an item to the input provider if it doesn't match any of the exclusions
|
// SetWithExclusions adds an item to the input provider if it doesn't match any of the exclusions
|
||||||
func (s *SimpleInputProvider) SetWithExclusions(value string) error {
|
func (s *SimpleInputProvider) SetWithExclusions(_ string, value string) error {
|
||||||
metaInput := contextargs.NewMetaInput()
|
metaInput := contextargs.NewMetaInput()
|
||||||
metaInput.Input = value
|
metaInput.Input = value
|
||||||
s.Inputs = append(s.Inputs, metaInput)
|
s.Inputs = append(s.Inputs, metaInput)
|
||||||
|
|||||||
@ -53,11 +53,14 @@ func (t *templateUpdateResults) String() string {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
table := tablewriter.NewWriter(&buff)
|
table := tablewriter.NewWriter(&buff)
|
||||||
table.SetHeader([]string{"Total", "Added", "Modified", "Removed"})
|
table.Header([]string{"Total", "Added", "Modified", "Removed"})
|
||||||
for _, v := range data {
|
for _, v := range data {
|
||||||
table.Append(v)
|
_ = table.Append(v)
|
||||||
}
|
}
|
||||||
table.Render()
|
_ = table.Render()
|
||||||
|
defer func() {
|
||||||
|
_ = table.Close()
|
||||||
|
}()
|
||||||
return buff.String()
|
return buff.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -5,7 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/kitabisa/go-ci"
|
"github.com/kitabisa/go-ci"
|
||||||
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/generators"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/generators"
|
||||||
@ -32,6 +32,9 @@ func New() *Compiler {
|
|||||||
|
|
||||||
// ExecuteOptions provides options for executing a script.
|
// ExecuteOptions provides options for executing a script.
|
||||||
type ExecuteOptions struct {
|
type ExecuteOptions struct {
|
||||||
|
// ExecutionId is the id of the execution
|
||||||
|
ExecutionId string
|
||||||
|
|
||||||
// Callback can be used to register new runtime helper functions
|
// Callback can be used to register new runtime helper functions
|
||||||
// ex: export etc
|
// ex: export etc
|
||||||
Callback func(runtime *goja.Runtime) error
|
Callback func(runtime *goja.Runtime) error
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
package compiler
|
package compiler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -9,10 +11,13 @@ import (
|
|||||||
var (
|
var (
|
||||||
PoolingJsVmConcurrency = 100
|
PoolingJsVmConcurrency = 100
|
||||||
NonPoolingVMConcurrency = 20
|
NonPoolingVMConcurrency = 20
|
||||||
|
m sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
||||||
// Init initializes the javascript protocol
|
// Init initializes the javascript protocol
|
||||||
func Init(opts *types.Options) error {
|
func Init(opts *types.Options) error {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
if opts.JsConcurrency < 100 {
|
if opts.JsConcurrency < 100 {
|
||||||
// 100 is reasonable default
|
// 100 is reasonable default
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package compiler
|
|||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
syncutil "github.com/projectdiscovery/utils/sync"
|
syncutil "github.com/projectdiscovery/utils/sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -7,9 +7,9 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/dop251/goja_nodejs/console"
|
"github.com/Mzack9999/goja_nodejs/console"
|
||||||
"github.com/dop251/goja_nodejs/require"
|
"github.com/Mzack9999/goja_nodejs/require"
|
||||||
"github.com/kitabisa/go-ci"
|
"github.com/kitabisa/go-ci"
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libbytes"
|
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libbytes"
|
||||||
@ -84,6 +84,7 @@ func executeWithRuntime(runtime *goja.Runtime, p *goja.Program, args *ExecuteArg
|
|||||||
if opts != nil && opts.Cleanup != nil {
|
if opts != nil && opts.Cleanup != nil {
|
||||||
opts.Cleanup(runtime)
|
opts.Cleanup(runtime)
|
||||||
}
|
}
|
||||||
|
runtime.RemoveContextValue("executionId")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// TODO(dwisiswant0): remove this once we get the RCA.
|
// TODO(dwisiswant0): remove this once we get the RCA.
|
||||||
@ -108,8 +109,11 @@ func executeWithRuntime(runtime *goja.Runtime, p *goja.Program, args *ExecuteArg
|
|||||||
if err := opts.Callback(runtime); err != nil {
|
if err := opts.Callback(runtime); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// inject execution id and context
|
||||||
|
runtime.SetContextValue("executionId", opts.ExecutionId)
|
||||||
|
|
||||||
// execute the script
|
// execute the script
|
||||||
return runtime.RunProgram(p)
|
return runtime.RunProgram(p)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -92,8 +92,8 @@ func (d *TemplateData) WriteMarkdownIndexTemplate(outputDirectory string) error
|
|||||||
return errors.Wrap(err, "could not create markdown index template")
|
return errors.Wrap(err, "could not create markdown index template")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = output.Close()
|
_ = output.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
buffer := &bytes.Buffer{}
|
buffer := &bytes.Buffer{}
|
||||||
_, _ = buffer.WriteString("# Index\n\n")
|
_, _ = buffer.WriteString("# Index\n\n")
|
||||||
|
|||||||
@ -5,7 +5,7 @@ package {{.PackageName}}
|
|||||||
import (
|
import (
|
||||||
{{$pkgName}} "{{.PackagePath}}"
|
{{$pkgName}} "{{.PackagePath}}"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package bytes
|
|||||||
import (
|
import (
|
||||||
lib_bytes "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/bytes"
|
lib_bytes "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/bytes"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package fs
|
|||||||
import (
|
import (
|
||||||
lib_fs "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/fs"
|
lib_fs "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/fs"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package goconsole
|
|||||||
import (
|
import (
|
||||||
lib_goconsole "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/goconsole"
|
lib_goconsole "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/goconsole"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package ikev2
|
|||||||
import (
|
import (
|
||||||
lib_ikev2 "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/ikev2"
|
lib_ikev2 "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/ikev2"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package kerberos
|
|||||||
import (
|
import (
|
||||||
lib_kerberos "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/kerberos"
|
lib_kerberos "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/kerberos"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package ldap
|
|||||||
import (
|
import (
|
||||||
lib_ldap "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/ldap"
|
lib_ldap "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/ldap"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package mssql
|
|||||||
import (
|
import (
|
||||||
lib_mssql "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/mssql"
|
lib_mssql "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/mssql"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package mysql
|
|||||||
import (
|
import (
|
||||||
lib_mysql "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/mysql"
|
lib_mysql "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/mysql"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package net
|
|||||||
import (
|
import (
|
||||||
lib_net "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/net"
|
lib_net "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/net"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package oracle
|
|||||||
import (
|
import (
|
||||||
lib_oracle "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/oracle"
|
lib_oracle "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/oracle"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package pop3
|
|||||||
import (
|
import (
|
||||||
lib_pop3 "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/pop3"
|
lib_pop3 "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/pop3"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package postgres
|
|||||||
import (
|
import (
|
||||||
lib_postgres "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/postgres"
|
lib_postgres "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/postgres"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package rdp
|
|||||||
import (
|
import (
|
||||||
lib_rdp "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/rdp"
|
lib_rdp "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/rdp"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package redis
|
|||||||
import (
|
import (
|
||||||
lib_redis "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/redis"
|
lib_redis "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/redis"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package rsync
|
|||||||
import (
|
import (
|
||||||
lib_rsync "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/rsync"
|
lib_rsync "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/rsync"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package smb
|
|||||||
import (
|
import (
|
||||||
lib_smb "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/smb"
|
lib_smb "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/smb"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package smtp
|
|||||||
import (
|
import (
|
||||||
lib_smtp "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/smtp"
|
lib_smtp "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/smtp"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package ssh
|
|||||||
import (
|
import (
|
||||||
lib_ssh "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/ssh"
|
lib_ssh "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/ssh"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package structs
|
|||||||
import (
|
import (
|
||||||
lib_structs "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/structs"
|
lib_structs "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/structs"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package telnet
|
|||||||
import (
|
import (
|
||||||
lib_telnet "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/telnet"
|
lib_telnet "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/telnet"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package vnc
|
|||||||
import (
|
import (
|
||||||
lib_vnc "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/vnc"
|
lib_vnc "github.com/projectdiscovery/nuclei/v3/pkg/js/libs/vnc"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package global
|
|||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/logrusorgru/aurora"
|
"github.com/logrusorgru/aurora"
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/gojs"
|
||||||
@ -113,8 +113,7 @@ func initBuiltInFunc(runtime *goja.Runtime) {
|
|||||||
"isPortOpen(host string, port string, [timeout int]) bool",
|
"isPortOpen(host string, port string, [timeout int]) bool",
|
||||||
},
|
},
|
||||||
Description: "isPortOpen checks if given TCP port is open on host. timeout is optional and defaults to 5 seconds",
|
Description: "isPortOpen checks if given TCP port is open on host. timeout is optional and defaults to 5 seconds",
|
||||||
FuncDecl: func(host string, port string, timeout ...int) (bool, error) {
|
FuncDecl: func(ctx context.Context, host string, port string, timeout ...int) (bool, error) {
|
||||||
ctx := context.Background()
|
|
||||||
if len(timeout) > 0 {
|
if len(timeout) > 0 {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout[0])*time.Second)
|
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout[0])*time.Second)
|
||||||
@ -123,7 +122,14 @@ func initBuiltInFunc(runtime *goja.Runtime) {
|
|||||||
if host == "" || port == "" {
|
if host == "" || port == "" {
|
||||||
return false, errkit.New("isPortOpen: host or port is empty")
|
return false, errkit.New("isPortOpen: host or port is empty")
|
||||||
}
|
}
|
||||||
conn, err := protocolstate.Dialer.Dial(ctx, "tcp", net.JoinHostPort(host, port))
|
|
||||||
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.Dial(ctx, "tcp", net.JoinHostPort(host, port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -138,8 +144,7 @@ func initBuiltInFunc(runtime *goja.Runtime) {
|
|||||||
"isUDPPortOpen(host string, port string, [timeout int]) bool",
|
"isUDPPortOpen(host string, port string, [timeout int]) bool",
|
||||||
},
|
},
|
||||||
Description: "isUDPPortOpen checks if the given UDP port is open on the host. Timeout is optional and defaults to 5 seconds.",
|
Description: "isUDPPortOpen checks if the given UDP port is open on the host. Timeout is optional and defaults to 5 seconds.",
|
||||||
FuncDecl: func(host string, port string, timeout ...int) (bool, error) {
|
FuncDecl: func(ctx context.Context, host string, port string, timeout ...int) (bool, error) {
|
||||||
ctx := context.Background()
|
|
||||||
if len(timeout) > 0 {
|
if len(timeout) > 0 {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout[0])*time.Second)
|
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout[0])*time.Second)
|
||||||
@ -148,7 +153,14 @@ func initBuiltInFunc(runtime *goja.Runtime) {
|
|||||||
if host == "" || port == "" {
|
if host == "" || port == "" {
|
||||||
return false, errkit.New("isPortOpen: host or port is empty")
|
return false, errkit.New("isPortOpen: host or port is empty")
|
||||||
}
|
}
|
||||||
conn, err := protocolstate.Dialer.Dial(ctx, "udp", net.JoinHostPort(host, port))
|
|
||||||
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.Dial(ctx, "udp", net.JoinHostPort(host, port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,9 +3,9 @@ package global
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/dop251/goja_nodejs/console"
|
"github.com/Mzack9999/goja_nodejs/console"
|
||||||
"github.com/dop251/goja_nodejs/require"
|
"github.com/Mzack9999/goja_nodejs/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScriptsRuntime(t *testing.T) {
|
func TestScriptsRuntime(t *testing.T) {
|
||||||
|
|||||||
@ -1,10 +1,13 @@
|
|||||||
package gojs
|
package gojs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"maps"
|
||||||
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/dop251/goja_nodejs/require"
|
"github.com/Mzack9999/goja_nodejs/require"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,17 +50,65 @@ func (p *GojaModule) Name() string {
|
|||||||
return p.name
|
return p.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GojaModule) Set(objects Objects) Module {
|
// wrapModuleFunc wraps a Go function with context injection for modules
|
||||||
|
// nolint
|
||||||
for k, v := range objects {
|
func wrapModuleFunc(runtime *goja.Runtime, fn interface{}) interface{} {
|
||||||
p.sets[k] = v
|
fnType := reflect.TypeOf(fn)
|
||||||
|
if fnType.Kind() != reflect.Func {
|
||||||
|
return fn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only wrap if first parameter is context.Context
|
||||||
|
if fnType.NumIn() == 0 || fnType.In(0) != reflect.TypeOf((*context.Context)(nil)).Elem() {
|
||||||
|
return fn // Return original function unchanged if it doesn't have context.Context as first arg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create input and output type slices
|
||||||
|
inTypes := make([]reflect.Type, fnType.NumIn())
|
||||||
|
for i := 0; i < fnType.NumIn(); i++ {
|
||||||
|
inTypes[i] = fnType.In(i)
|
||||||
|
}
|
||||||
|
outTypes := make([]reflect.Type, fnType.NumOut())
|
||||||
|
for i := 0; i < fnType.NumOut(); i++ {
|
||||||
|
outTypes[i] = fnType.Out(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new function with same signature
|
||||||
|
newFnType := reflect.FuncOf(inTypes, outTypes, fnType.IsVariadic())
|
||||||
|
newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
|
||||||
|
// Get context from runtime
|
||||||
|
var ctx context.Context
|
||||||
|
if ctxVal := runtime.Get("context"); ctxVal != nil {
|
||||||
|
if ctxObj, ok := ctxVal.Export().(context.Context); ok {
|
||||||
|
ctx = ctxObj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add execution ID to context if available
|
||||||
|
if execID := runtime.Get("executionId"); execID != nil {
|
||||||
|
//nolint
|
||||||
|
ctx = context.WithValue(ctx, "executionId", execID.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace first argument (context) with our context
|
||||||
|
args[0] = reflect.ValueOf(ctx)
|
||||||
|
|
||||||
|
// Call original function with modified arguments
|
||||||
|
return reflect.ValueOf(fn).Call(args)
|
||||||
|
})
|
||||||
|
|
||||||
|
return newFn.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *GojaModule) Set(objects Objects) Module {
|
||||||
|
maps.Copy(p.sets, objects)
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GojaModule) Require(runtime *goja.Runtime, module *goja.Object) {
|
func (p *GojaModule) Require(runtime *goja.Runtime, module *goja.Object) {
|
||||||
|
|
||||||
o := module.Get("exports").(*goja.Object)
|
o := module.Get("exports").(*goja.Object)
|
||||||
|
|
||||||
for k, v := range p.sets {
|
for k, v := range p.sets {
|
||||||
|
|||||||
@ -1,7 +1,10 @@
|
|||||||
package gojs
|
package gojs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/dop251/goja"
|
"context"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/Mzack9999/goja"
|
||||||
errorutil "github.com/projectdiscovery/utils/errors"
|
errorutil "github.com/projectdiscovery/utils/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -22,6 +25,58 @@ func (f *FuncOpts) valid() bool {
|
|||||||
return f.Name != "" && f.FuncDecl != nil && len(f.Signatures) > 0 && f.Description != ""
|
return f.Name != "" && f.FuncDecl != nil && len(f.Signatures) > 0 && f.Description != ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wrapWithContext wraps a Go function with context injection
|
||||||
|
// nolint
|
||||||
|
func wrapWithContext(runtime *goja.Runtime, fn interface{}) interface{} {
|
||||||
|
fnType := reflect.TypeOf(fn)
|
||||||
|
if fnType.Kind() != reflect.Func {
|
||||||
|
return fn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only wrap if first parameter is context.Context
|
||||||
|
if fnType.NumIn() == 0 || fnType.In(0) != reflect.TypeOf((*context.Context)(nil)).Elem() {
|
||||||
|
return fn // Return original function unchanged if it doesn't have context.Context as first arg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create input and output type slices
|
||||||
|
inTypes := make([]reflect.Type, fnType.NumIn())
|
||||||
|
for i := 0; i < fnType.NumIn(); i++ {
|
||||||
|
inTypes[i] = fnType.In(i)
|
||||||
|
}
|
||||||
|
outTypes := make([]reflect.Type, fnType.NumOut())
|
||||||
|
for i := 0; i < fnType.NumOut(); i++ {
|
||||||
|
outTypes[i] = fnType.Out(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new function with same signature
|
||||||
|
newFnType := reflect.FuncOf(inTypes, outTypes, fnType.IsVariadic())
|
||||||
|
newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
|
||||||
|
// Get context from runtime
|
||||||
|
var ctx context.Context
|
||||||
|
if ctxVal := runtime.Get("context"); ctxVal != nil {
|
||||||
|
if ctxObj, ok := ctxVal.Export().(context.Context); ok {
|
||||||
|
ctx = ctxObj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add execution ID to context if available
|
||||||
|
if execID := runtime.Get("executionId"); execID != nil {
|
||||||
|
ctx = context.WithValue(ctx, "executionId", execID.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace first argument (context) with our context
|
||||||
|
args[0] = reflect.ValueOf(ctx)
|
||||||
|
|
||||||
|
// Call original function with modified arguments
|
||||||
|
return reflect.ValueOf(fn).Call(args)
|
||||||
|
})
|
||||||
|
|
||||||
|
return newFn.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterFunc registers a function with given name, signatures and description
|
// RegisterFunc registers a function with given name, signatures and description
|
||||||
func RegisterFuncWithSignature(runtime *goja.Runtime, opts FuncOpts) error {
|
func RegisterFuncWithSignature(runtime *goja.Runtime, opts FuncOpts) error {
|
||||||
if runtime == nil {
|
if runtime == nil {
|
||||||
@ -30,5 +85,8 @@ func RegisterFuncWithSignature(runtime *goja.Runtime, opts FuncOpts) error {
|
|||||||
if !opts.valid() {
|
if !opts.valid() {
|
||||||
return ErrInvalidFuncOpts.Msgf("name: %s, signatures: %v, description: %s", opts.Name, opts.Signatures, opts.Description)
|
return ErrInvalidFuncOpts.Msgf("name: %s, signatures: %v, description: %s", opts.Name, opts.Signatures, opts.Description)
|
||||||
}
|
}
|
||||||
return runtime.Set(opts.Name, opts.FuncDecl)
|
|
||||||
|
// Wrap the function with context injection
|
||||||
|
// wrappedFn := wrapWithContext(runtime, opts.FuncDecl)
|
||||||
|
return runtime.Set(opts.Name, opts.FuncDecl /* wrappedFn */)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package bytes
|
|||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/libs/structs"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/libs/structs"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package fs
|
package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
@ -27,8 +28,9 @@ import (
|
|||||||
// // when no itemType is provided, it will return both files and directories
|
// // when no itemType is provided, it will return both files and directories
|
||||||
// const items = fs.ListDir('/tmp');
|
// const items = fs.ListDir('/tmp');
|
||||||
// ```
|
// ```
|
||||||
func ListDir(path string, itemType string) ([]string, error) {
|
func ListDir(ctx context.Context, path string, itemType string) ([]string, error) {
|
||||||
finalPath, err := protocolstate.NormalizePath(path)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
finalPath, err := protocolstate.NormalizePathWithExecutionId(executionId, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -57,8 +59,9 @@ func ListDir(path string, itemType string) ([]string, error) {
|
|||||||
// // here permitted directories are $HOME/nuclei-templates/*
|
// // here permitted directories are $HOME/nuclei-templates/*
|
||||||
// const content = fs.ReadFile('helpers/usernames.txt');
|
// const content = fs.ReadFile('helpers/usernames.txt');
|
||||||
// ```
|
// ```
|
||||||
func ReadFile(path string) ([]byte, error) {
|
func ReadFile(ctx context.Context, path string) ([]byte, error) {
|
||||||
finalPath, err := protocolstate.NormalizePath(path)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
finalPath, err := protocolstate.NormalizePathWithExecutionId(executionId, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -74,8 +77,8 @@ func ReadFile(path string) ([]byte, error) {
|
|||||||
// // here permitted directories are $HOME/nuclei-templates/*
|
// // here permitted directories are $HOME/nuclei-templates/*
|
||||||
// const content = fs.ReadFileAsString('helpers/usernames.txt');
|
// const content = fs.ReadFileAsString('helpers/usernames.txt');
|
||||||
// ```
|
// ```
|
||||||
func ReadFileAsString(path string) (string, error) {
|
func ReadFileAsString(ctx context.Context, path string) (string, error) {
|
||||||
bin, err := ReadFile(path)
|
bin, err := ReadFile(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -91,14 +94,14 @@ func ReadFileAsString(path string) (string, error) {
|
|||||||
// const contents = fs.ReadFilesFromDir('helpers/ssh-keys');
|
// const contents = fs.ReadFilesFromDir('helpers/ssh-keys');
|
||||||
// log(contents);
|
// log(contents);
|
||||||
// ```
|
// ```
|
||||||
func ReadFilesFromDir(dir string) ([]string, error) {
|
func ReadFilesFromDir(ctx context.Context, dir string) ([]string, error) {
|
||||||
files, err := ListDir(dir, "file")
|
files, err := ListDir(ctx, dir, "file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var results []string
|
var results []string
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
content, err := ReadFileAsString(dir + "/" + file)
|
content, err := ReadFileAsString(ctx, dir+"/"+file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
package goconsole
|
package goconsole
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/dop251/goja_nodejs/console"
|
"github.com/Mzack9999/goja_nodejs/console"
|
||||||
"github.com/projectdiscovery/gologger"
|
"github.com/projectdiscovery/gologger"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package kerberos
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
kclient "github.com/jcmturner/gokrb5/v8/client"
|
kclient "github.com/jcmturner/gokrb5/v8/client"
|
||||||
kconfig "github.com/jcmturner/gokrb5/v8/config"
|
kconfig "github.com/jcmturner/gokrb5/v8/config"
|
||||||
"github.com/jcmturner/gokrb5/v8/iana/errorcode"
|
"github.com/jcmturner/gokrb5/v8/iana/errorcode"
|
||||||
@ -109,7 +109,8 @@ func NewKerberosClient(call goja.ConstructorCall, runtime *goja.Runtime) *goja.O
|
|||||||
|
|
||||||
if controller != "" {
|
if controller != "" {
|
||||||
// validate controller hostport
|
// validate controller hostport
|
||||||
if !protocolstate.IsHostAllowed(controller) {
|
executionId := c.nj.ExecutionId()
|
||||||
|
if !protocolstate.IsHostAllowed(executionId, controller) {
|
||||||
c.nj.Throw("domain controller address blacklisted by network policy")
|
c.nj.Throw("domain controller address blacklisted by network policy")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,16 +247,18 @@ func (c *Client) GetServiceTicket(User, Pass, SPN string) (TGS, error) {
|
|||||||
c.nj.Require(Pass != "", "Pass cannot be empty")
|
c.nj.Require(Pass != "", "Pass cannot be empty")
|
||||||
c.nj.Require(SPN != "", "SPN cannot be empty")
|
c.nj.Require(SPN != "", "SPN cannot be empty")
|
||||||
|
|
||||||
|
executionId := c.nj.ExecutionId()
|
||||||
|
|
||||||
if len(c.Krb5Config.Realms) > 0 {
|
if len(c.Krb5Config.Realms) > 0 {
|
||||||
// this means dc address was given
|
// this means dc address was given
|
||||||
for _, r := range c.Krb5Config.Realms {
|
for _, r := range c.Krb5Config.Realms {
|
||||||
for _, kdc := range r.KDC {
|
for _, kdc := range r.KDC {
|
||||||
if !protocolstate.IsHostAllowed(kdc) {
|
if !protocolstate.IsHostAllowed(executionId, kdc) {
|
||||||
c.nj.Throw("KDC address %v blacklisted by network policy", kdc)
|
c.nj.Throw("KDC address %v blacklisted by network policy", kdc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, kpasswd := range r.KPasswdServer {
|
for _, kpasswd := range r.KPasswdServer {
|
||||||
if !protocolstate.IsHostAllowed(kpasswd) {
|
if !protocolstate.IsHostAllowed(executionId, kpasswd) {
|
||||||
c.nj.Throw("Kpasswd address %v blacklisted by network policy", kpasswd)
|
c.nj.Throw("Kpasswd address %v blacklisted by network policy", kpasswd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -265,7 +268,7 @@ func (c *Client) GetServiceTicket(User, Pass, SPN string) (TGS, error) {
|
|||||||
// and check if they are allowed by network policy
|
// and check if they are allowed by network policy
|
||||||
_, kdcs, _ := c.Krb5Config.GetKDCs(c.Realm, true)
|
_, kdcs, _ := c.Krb5Config.GetKDCs(c.Realm, true)
|
||||||
for _, v := range kdcs {
|
for _, v := range kdcs {
|
||||||
if !protocolstate.IsHostAllowed(v) {
|
if !protocolstate.IsHostAllowed(executionId, v) {
|
||||||
c.nj.Throw("KDC address %v blacklisted by network policy", v)
|
c.nj.Throw("KDC address %v blacklisted by network policy", v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -68,6 +68,12 @@ func sendToKDCTcp(kclient *Client, msg string) ([]byte, error) {
|
|||||||
kclient.nj.HandleError(err, "error getting KDCs")
|
kclient.nj.HandleError(err, "error getting KDCs")
|
||||||
kclient.nj.Require(len(kdcs) > 0, "no KDCs found")
|
kclient.nj.Require(len(kdcs) > 0, "no KDCs found")
|
||||||
|
|
||||||
|
executionId := kclient.nj.ExecutionId()
|
||||||
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
return nil, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
var errs []string
|
var errs []string
|
||||||
for i := 1; i <= len(kdcs); i++ {
|
for i := 1; i <= len(kdcs); i++ {
|
||||||
host, port, err := net.SplitHostPort(kdcs[i])
|
host, port, err := net.SplitHostPort(kdcs[i])
|
||||||
@ -75,14 +81,14 @@ func sendToKDCTcp(kclient *Client, msg string) ([]byte, error) {
|
|||||||
// use that ip address instead of realm/domain for resolving
|
// use that ip address instead of realm/domain for resolving
|
||||||
host = kclient.config.ip
|
host = kclient.config.ip
|
||||||
}
|
}
|
||||||
tcpConn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, port))
|
tcpConn, err := dialers.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Sprintf("error establishing connection to %s: %v", kdcs[i], err))
|
errs = append(errs, fmt.Sprintf("error establishing connection to %s: %v", kdcs[i], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = tcpConn.Close()
|
_ = tcpConn.Close()
|
||||||
}()
|
}()
|
||||||
_ = tcpConn.SetDeadline(time.Now().Add(time.Duration(kclient.config.timeout) * time.Second)) //read and write deadline
|
_ = tcpConn.SetDeadline(time.Now().Add(time.Duration(kclient.config.timeout) * time.Second)) //read and write deadline
|
||||||
rb, err := sendTCP(tcpConn.(*net.TCPConn), []byte(msg))
|
rb, err := sendTCP(tcpConn.(*net.TCPConn), []byte(msg))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -103,6 +109,11 @@ func sendToKDCUdp(kclient *Client, msg string) ([]byte, error) {
|
|||||||
kclient.nj.HandleError(err, "error getting KDCs")
|
kclient.nj.HandleError(err, "error getting KDCs")
|
||||||
kclient.nj.Require(len(kdcs) > 0, "no KDCs found")
|
kclient.nj.Require(len(kdcs) > 0, "no KDCs found")
|
||||||
|
|
||||||
|
executionId := kclient.nj.ExecutionId()
|
||||||
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
return nil, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
var errs []string
|
var errs []string
|
||||||
for i := 1; i <= len(kdcs); i++ {
|
for i := 1; i <= len(kdcs); i++ {
|
||||||
host, port, err := net.SplitHostPort(kdcs[i])
|
host, port, err := net.SplitHostPort(kdcs[i])
|
||||||
@ -110,14 +121,14 @@ func sendToKDCUdp(kclient *Client, msg string) ([]byte, error) {
|
|||||||
// use that ip address instead of realm/domain for resolving
|
// use that ip address instead of realm/domain for resolving
|
||||||
host = kclient.config.ip
|
host = kclient.config.ip
|
||||||
}
|
}
|
||||||
udpConn, err := protocolstate.Dialer.Dial(context.TODO(), "udp", net.JoinHostPort(host, port))
|
udpConn, err := dialers.Fastdialer.Dial(context.TODO(), "udp", net.JoinHostPort(host, port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Sprintf("error establishing connection to %s: %v", kdcs[i], err))
|
errs = append(errs, fmt.Sprintf("error establishing connection to %s: %v", kdcs[i], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = udpConn.Close()
|
_ = udpConn.Close()
|
||||||
}()
|
}()
|
||||||
_ = udpConn.SetDeadline(time.Now().Add(time.Duration(kclient.config.timeout) * time.Second)) //read and write deadline
|
_ = udpConn.SetDeadline(time.Now().Add(time.Duration(kclient.config.timeout) * time.Second)) //read and write deadline
|
||||||
rb, err := sendUDP(udpConn.(*net.UDPConn), []byte(msg))
|
rb, err := sendUDP(udpConn.(*net.UDPConn), []byte(msg))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -137,8 +148,8 @@ func sendToKDCUdp(kclient *Client, msg string) ([]byte, error) {
|
|||||||
func sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) {
|
func sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) {
|
||||||
var r []byte
|
var r []byte
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
_, err := conn.Write(b)
|
_, err := conn.Write(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err)
|
return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err)
|
||||||
@ -158,8 +169,8 @@ func sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) {
|
|||||||
// sendTCP sends bytes to connection over TCP.
|
// sendTCP sends bytes to connection over TCP.
|
||||||
func sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) {
|
func sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
var r []byte
|
var r []byte
|
||||||
// RFC 4120 7.2.2 specifies the first 4 bytes indicate the length of the message in big endian order.
|
// RFC 4120 7.2.2 specifies the first 4 bytes indicate the length of the message in big endian order.
|
||||||
hb := make([]byte, 4)
|
hb := make([]byte, 4)
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/Mzack9999/goja"
|
||||||
"github.com/go-ldap/ldap/v3"
|
"github.com/go-ldap/ldap/v3"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
"github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
@ -86,12 +86,18 @@ func NewClient(call goja.ConstructorCall, runtime *goja.Runtime) *goja.Object {
|
|||||||
u, err := url.Parse(ldapUrl)
|
u, err := url.Parse(ldapUrl)
|
||||||
c.nj.HandleError(err, "invalid ldap url supported schemas are ldap://, ldaps://, ldapi://, and cldap://")
|
c.nj.HandleError(err, "invalid ldap url supported schemas are ldap://, ldaps://, ldapi://, and cldap://")
|
||||||
|
|
||||||
|
executionId := c.nj.ExecutionId()
|
||||||
|
dialers := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialers == nil {
|
||||||
|
panic("dialers with executionId " + executionId + " not found")
|
||||||
|
}
|
||||||
|
|
||||||
var conn net.Conn
|
var conn net.Conn
|
||||||
if u.Scheme == "ldapi" {
|
if u.Scheme == "ldapi" {
|
||||||
if u.Path == "" || u.Path == "/" {
|
if u.Path == "" || u.Path == "/" {
|
||||||
u.Path = "/var/run/slapd/ldapi"
|
u.Path = "/var/run/slapd/ldapi"
|
||||||
}
|
}
|
||||||
conn, err = protocolstate.Dialer.Dial(context.TODO(), "unix", u.Path)
|
conn, err = dialers.Fastdialer.Dial(context.TODO(), "unix", u.Path)
|
||||||
c.nj.HandleError(err, "failed to connect to ldap server")
|
c.nj.HandleError(err, "failed to connect to ldap server")
|
||||||
} else {
|
} else {
|
||||||
host, port, err := net.SplitHostPort(u.Host)
|
host, port, err := net.SplitHostPort(u.Host)
|
||||||
@ -110,12 +116,12 @@ func NewClient(call goja.ConstructorCall, runtime *goja.Runtime) *goja.Object {
|
|||||||
if port == "" {
|
if port == "" {
|
||||||
port = ldap.DefaultLdapPort
|
port = ldap.DefaultLdapPort
|
||||||
}
|
}
|
||||||
conn, err = protocolstate.Dialer.Dial(context.TODO(), "udp", net.JoinHostPort(host, port))
|
conn, err = dialers.Fastdialer.Dial(context.TODO(), "udp", net.JoinHostPort(host, port))
|
||||||
case "ldap":
|
case "ldap":
|
||||||
if port == "" {
|
if port == "" {
|
||||||
port = ldap.DefaultLdapPort
|
port = ldap.DefaultLdapPort
|
||||||
}
|
}
|
||||||
conn, err = protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, port))
|
conn, err = dialers.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, port))
|
||||||
case "ldaps":
|
case "ldaps":
|
||||||
if port == "" {
|
if port == "" {
|
||||||
port = ldap.DefaultLdapsPort
|
port = ldap.DefaultLdapsPort
|
||||||
@ -124,7 +130,7 @@ func NewClient(call goja.ConstructorCall, runtime *goja.Runtime) *goja.Object {
|
|||||||
if c.cfg.ServerName != "" {
|
if c.cfg.ServerName != "" {
|
||||||
serverName = c.cfg.ServerName
|
serverName = c.cfg.ServerName
|
||||||
}
|
}
|
||||||
conn, err = protocolstate.Dialer.DialTLSWithConfig(context.TODO(), "tcp", net.JoinHostPort(host, port),
|
conn, err = dialers.Fastdialer.DialTLSWithConfig(context.TODO(), "tcp", net.JoinHostPort(host, port),
|
||||||
&tls.Config{InsecureSkipVerify: true, MinVersion: tls.VersionTLS10, ServerName: serverName})
|
&tls.Config{InsecureSkipVerify: true, MinVersion: tls.VersionTLS10, ServerName: serverName})
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unsupported ldap url schema %v", u.Scheme)
|
err = fmt.Errorf("unsupported ldap url schema %v", u.Scheme)
|
||||||
|
|||||||
@ -10,11 +10,11 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func memoizedconnect(host string, port int, username string, password string, dbName string) (bool, error) {
|
func memoizedconnect(executionId string, host string, port int, username string, password string, dbName string) (bool, error) {
|
||||||
hash := "connect" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port) + ":" + fmt.Sprint(username) + ":" + fmt.Sprint(password) + ":" + fmt.Sprint(dbName)
|
hash := "connect" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port) + ":" + fmt.Sprint(username) + ":" + fmt.Sprint(password) + ":" + fmt.Sprint(dbName)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return connect(host, port, username, password, dbName)
|
return connect(executionId, host, port, username, password, dbName)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -26,11 +26,11 @@ func memoizedconnect(host string, port int, username string, password string, db
|
|||||||
return false, errors.New("could not convert cached result")
|
return false, errors.New("could not convert cached result")
|
||||||
}
|
}
|
||||||
|
|
||||||
func memoizedisMssql(host string, port int) (bool, error) {
|
func memoizedisMssql(executionId string, host string, port int) (bool, error) {
|
||||||
hash := "isMssql" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "isMssql" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return isMssql(host, port)
|
return isMssql(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
|||||||
@ -36,8 +36,9 @@ type (
|
|||||||
// const client = new mssql.MSSQLClient;
|
// const client = new mssql.MSSQLClient;
|
||||||
// const connected = client.Connect('acme.com', 1433, 'username', 'password');
|
// const connected = client.Connect('acme.com', 1433, 'username', 'password');
|
||||||
// ```
|
// ```
|
||||||
func (c *MSSQLClient) Connect(host string, port int, username, password string) (bool, error) {
|
func (c *MSSQLClient) Connect(ctx context.Context, host string, port int, username, password string) (bool, error) {
|
||||||
return memoizedconnect(host, port, username, password, "master")
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedconnect(executionId, host, port, username, password, "master")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectWithDB connects to MS SQL database using given credentials and database name.
|
// ConnectWithDB connects to MS SQL database using given credentials and database name.
|
||||||
@ -50,16 +51,17 @@ func (c *MSSQLClient) Connect(host string, port int, username, password string)
|
|||||||
// const client = new mssql.MSSQLClient;
|
// const client = new mssql.MSSQLClient;
|
||||||
// const connected = client.ConnectWithDB('acme.com', 1433, 'username', 'password', 'master');
|
// const connected = client.ConnectWithDB('acme.com', 1433, 'username', 'password', 'master');
|
||||||
// ```
|
// ```
|
||||||
func (c *MSSQLClient) ConnectWithDB(host string, port int, username, password, dbName string) (bool, error) {
|
func (c *MSSQLClient) ConnectWithDB(ctx context.Context, host string, port int, username, password, dbName string) (bool, error) {
|
||||||
return memoizedconnect(host, port, username, password, dbName)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedconnect(executionId, host, port, username, password, dbName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func connect(host string, port int, username string, password string, dbName string) (bool, error) {
|
func connect(executionId string, host string, port int, username string, password string, dbName string) (bool, error) {
|
||||||
if host == "" || port <= 0 {
|
if host == "" || port <= 0 {
|
||||||
return false, fmt.Errorf("invalid host or port")
|
return false, fmt.Errorf("invalid host or port")
|
||||||
}
|
}
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
@ -77,8 +79,8 @@ func connect(host string, port int, username string, password string, dbName str
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
_, err = db.Exec("select 1")
|
_, err = db.Exec("select 1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -107,24 +109,30 @@ func connect(host string, port int, username string, password string, dbName str
|
|||||||
// const mssql = require('nuclei/mssql');
|
// const mssql = require('nuclei/mssql');
|
||||||
// const isMssql = mssql.IsMssql('acme.com', 1433);
|
// const isMssql = mssql.IsMssql('acme.com', 1433);
|
||||||
// ```
|
// ```
|
||||||
func (c *MSSQLClient) IsMssql(host string, port int) (bool, error) {
|
func (c *MSSQLClient) IsMssql(ctx context.Context, host string, port int) (bool, error) {
|
||||||
return memoizedisMssql(host, port)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedisMssql(executionId, host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func isMssql(host string, port int) (bool, error) {
|
func isMssql(executionId string, host string, port int) (bool, error) {
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, fmt.Sprintf("%d", port)))
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return false, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, fmt.Sprintf("%d", port)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
data, check, err := mssql.DetectMSSQL(conn, 5*time.Second)
|
data, check, err := mssql.DetectMSSQL(conn, 5*time.Second)
|
||||||
if check && err != nil {
|
if check && err != nil {
|
||||||
@ -147,18 +155,19 @@ func isMssql(host string, port int) (bool, error) {
|
|||||||
// const result = client.ExecuteQuery('acme.com', 1433, 'username', 'password', 'master', 'SELECT @@version');
|
// const result = client.ExecuteQuery('acme.com', 1433, 'username', 'password', 'master', 'SELECT @@version');
|
||||||
// log(to_json(result));
|
// log(to_json(result));
|
||||||
// ```
|
// ```
|
||||||
func (c *MSSQLClient) ExecuteQuery(host string, port int, username, password, dbName, query string) (*utils.SQLResult, error) {
|
func (c *MSSQLClient) ExecuteQuery(ctx context.Context, host string, port int, username, password, dbName, query string) (*utils.SQLResult, error) {
|
||||||
|
executionId := ctx.Value("executionId").(string)
|
||||||
if host == "" || port <= 0 {
|
if host == "" || port <= 0 {
|
||||||
return nil, fmt.Errorf("invalid host or port")
|
return nil, fmt.Errorf("invalid host or port")
|
||||||
}
|
}
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return nil, protocolstate.ErrHostDenied.Msgf(host)
|
return nil, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
|
|
||||||
target := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
target := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||||
|
|
||||||
ok, err := c.IsMssql(host, port)
|
ok, err := c.IsMssql(ctx, host, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -177,8 +186,8 @@ func (c *MSSQLClient) ExecuteQuery(host string, port int, username, password, db
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
db.SetMaxOpenConns(1)
|
db.SetMaxOpenConns(1)
|
||||||
db.SetMaxIdleConns(0)
|
db.SetMaxIdleConns(0)
|
||||||
|
|||||||
@ -8,11 +8,11 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func memoizedisMySQL(host string, port int) (bool, error) {
|
func memoizedisMySQL(executionId string, host string, port int) (bool, error) {
|
||||||
hash := "isMySQL" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "isMySQL" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return isMySQL(host, port)
|
return isMySQL(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -24,11 +24,11 @@ func memoizedisMySQL(host string, port int) (bool, error) {
|
|||||||
return false, errors.New("could not convert cached result")
|
return false, errors.New("could not convert cached result")
|
||||||
}
|
}
|
||||||
|
|
||||||
func memoizedfingerprintMySQL(host string, port int) (MySQLInfo, error) {
|
func memoizedfingerprintMySQL(executionId string, host string, port int) (MySQLInfo, error) {
|
||||||
hash := "fingerprintMySQL" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "fingerprintMySQL" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return fingerprintMySQL(host, port)
|
return fingerprintMySQL(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return MySQLInfo{}, err
|
return MySQLInfo{}, err
|
||||||
|
|||||||
@ -35,24 +35,30 @@ type (
|
|||||||
// const mysql = require('nuclei/mysql');
|
// const mysql = require('nuclei/mysql');
|
||||||
// const isMySQL = mysql.IsMySQL('acme.com', 3306);
|
// const isMySQL = mysql.IsMySQL('acme.com', 3306);
|
||||||
// ```
|
// ```
|
||||||
func (c *MySQLClient) IsMySQL(host string, port int) (bool, error) {
|
func (c *MySQLClient) IsMySQL(ctx context.Context, host string, port int) (bool, error) {
|
||||||
|
executionId := ctx.Value("executionId").(string)
|
||||||
// todo: why this is exposed? Service fingerprint should be automatic
|
// todo: why this is exposed? Service fingerprint should be automatic
|
||||||
return memoizedisMySQL(host, port)
|
return memoizedisMySQL(executionId, host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func isMySQL(host string, port int) (bool, error) {
|
func isMySQL(executionId string, host string, port int) (bool, error) {
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, fmt.Sprintf("%d", port)))
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return false, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, fmt.Sprintf("%d", port)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
plugin := &mysqlplugin.MYSQLPlugin{}
|
plugin := &mysqlplugin.MYSQLPlugin{}
|
||||||
service, err := plugin.Run(conn, 5*time.Second, plugins.Target{Host: host})
|
service, err := plugin.Run(conn, 5*time.Second, plugins.Target{Host: host})
|
||||||
@ -75,14 +81,15 @@ func isMySQL(host string, port int) (bool, error) {
|
|||||||
// const client = new mysql.MySQLClient;
|
// const client = new mysql.MySQLClient;
|
||||||
// const connected = client.Connect('acme.com', 3306, 'username', 'password');
|
// const connected = client.Connect('acme.com', 3306, 'username', 'password');
|
||||||
// ```
|
// ```
|
||||||
func (c *MySQLClient) Connect(host string, port int, username, password string) (bool, error) {
|
func (c *MySQLClient) Connect(ctx context.Context, host string, port int, username, password string) (bool, error) {
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
|
|
||||||
// executing queries implies the remote mysql service
|
// executing queries implies the remote mysql service
|
||||||
ok, err := c.IsMySQL(host, port)
|
ok, err := c.IsMySQL(ctx, host, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -127,24 +134,30 @@ type (
|
|||||||
// const info = mysql.FingerprintMySQL('acme.com', 3306);
|
// const info = mysql.FingerprintMySQL('acme.com', 3306);
|
||||||
// log(to_json(info));
|
// log(to_json(info));
|
||||||
// ```
|
// ```
|
||||||
func (c *MySQLClient) FingerprintMySQL(host string, port int) (MySQLInfo, error) {
|
func (c *MySQLClient) FingerprintMySQL(ctx context.Context, host string, port int) (MySQLInfo, error) {
|
||||||
return memoizedfingerprintMySQL(host, port)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedfingerprintMySQL(executionId, host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func fingerprintMySQL(host string, port int) (MySQLInfo, error) {
|
func fingerprintMySQL(executionId string, host string, port int) (MySQLInfo, error) {
|
||||||
info := MySQLInfo{}
|
info := MySQLInfo{}
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return info, protocolstate.ErrHostDenied.Msgf(host)
|
return info, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, fmt.Sprintf("%d", port)))
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return MySQLInfo{}, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, fmt.Sprintf("%d", port)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
plugin := &mysqlplugin.MYSQLPlugin{}
|
plugin := &mysqlplugin.MYSQLPlugin{}
|
||||||
service, err := plugin.Run(conn, 5*time.Second, plugins.Target{Host: host})
|
service, err := plugin.Run(conn, 5*time.Second, plugins.Target{Host: host})
|
||||||
@ -192,14 +205,15 @@ func (c *MySQLClient) ConnectWithDSN(dsn string) (bool, error) {
|
|||||||
// const result = mysql.ExecuteQueryWithOpts(options, 'SELECT * FROM users');
|
// const result = mysql.ExecuteQueryWithOpts(options, 'SELECT * FROM users');
|
||||||
// log(to_json(result));
|
// log(to_json(result));
|
||||||
// ```
|
// ```
|
||||||
func (c *MySQLClient) ExecuteQueryWithOpts(opts MySQLOptions, query string) (*utils.SQLResult, error) {
|
func (c *MySQLClient) ExecuteQueryWithOpts(ctx context.Context, opts MySQLOptions, query string) (*utils.SQLResult, error) {
|
||||||
if !protocolstate.IsHostAllowed(opts.Host) {
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
if !protocolstate.IsHostAllowed(executionId, opts.Host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return nil, protocolstate.ErrHostDenied.Msgf(opts.Host)
|
return nil, protocolstate.ErrHostDenied.Msgf(opts.Host)
|
||||||
}
|
}
|
||||||
|
|
||||||
// executing queries implies the remote mysql service
|
// executing queries implies the remote mysql service
|
||||||
ok, err := c.IsMySQL(opts.Host, opts.Port)
|
ok, err := c.IsMySQL(ctx, opts.Host, opts.Port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -217,8 +231,8 @@ func (c *MySQLClient) ExecuteQueryWithOpts(opts MySQLOptions, query string) (*ut
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
}()
|
}()
|
||||||
db.SetMaxOpenConns(1)
|
db.SetMaxOpenConns(1)
|
||||||
db.SetMaxIdleConns(0)
|
db.SetMaxIdleConns(0)
|
||||||
|
|
||||||
@ -246,9 +260,9 @@ func (c *MySQLClient) ExecuteQueryWithOpts(opts MySQLOptions, query string) (*ut
|
|||||||
// const result = mysql.ExecuteQuery('acme.com', 3306, 'username', 'password', 'SELECT * FROM users');
|
// const result = mysql.ExecuteQuery('acme.com', 3306, 'username', 'password', 'SELECT * FROM users');
|
||||||
// log(to_json(result));
|
// log(to_json(result));
|
||||||
// ```
|
// ```
|
||||||
func (c *MySQLClient) ExecuteQuery(host string, port int, username, password, query string) (*utils.SQLResult, error) {
|
func (c *MySQLClient) ExecuteQuery(ctx context.Context, host string, port int, username, password, query string) (*utils.SQLResult, error) {
|
||||||
// executing queries implies the remote mysql service
|
// executing queries implies the remote mysql service
|
||||||
ok, err := c.IsMySQL(host, port)
|
ok, err := c.IsMySQL(ctx, host, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -256,7 +270,7 @@ func (c *MySQLClient) ExecuteQuery(host string, port int, username, password, qu
|
|||||||
return nil, fmt.Errorf("not a mysql service")
|
return nil, fmt.Errorf("not a mysql service")
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.ExecuteQueryWithOpts(MySQLOptions{
|
return c.ExecuteQueryWithOpts(ctx, MySQLOptions{
|
||||||
Host: host,
|
Host: host,
|
||||||
Port: port,
|
Port: port,
|
||||||
Protocol: "tcp",
|
Protocol: "tcp",
|
||||||
@ -273,8 +287,8 @@ func (c *MySQLClient) ExecuteQuery(host string, port int, username, password, qu
|
|||||||
// const result = mysql.ExecuteQueryOnDB('acme.com', 3306, 'username', 'password', 'dbname', 'SELECT * FROM users');
|
// const result = mysql.ExecuteQueryOnDB('acme.com', 3306, 'username', 'password', 'dbname', 'SELECT * FROM users');
|
||||||
// log(to_json(result));
|
// log(to_json(result));
|
||||||
// ```
|
// ```
|
||||||
func (c *MySQLClient) ExecuteQueryOnDB(host string, port int, username, password, dbname, query string) (*utils.SQLResult, error) {
|
func (c *MySQLClient) ExecuteQueryOnDB(ctx context.Context, host string, port int, username, password, dbname, query string) (*utils.SQLResult, error) {
|
||||||
return c.ExecuteQueryWithOpts(MySQLOptions{
|
return c.ExecuteQueryWithOpts(ctx, MySQLOptions{
|
||||||
Host: host,
|
Host: host,
|
||||||
Port: port,
|
Port: port,
|
||||||
Protocol: "tcp",
|
Protocol: "tcp",
|
||||||
|
|||||||
@ -78,8 +78,8 @@ func connectWithDSN(dsn string) (bool, error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
}()
|
}()
|
||||||
db.SetMaxOpenConns(1)
|
db.SetMaxOpenConns(1)
|
||||||
db.SetMaxIdleConns(0)
|
db.SetMaxIdleConns(0)
|
||||||
|
|
||||||
|
|||||||
@ -25,8 +25,13 @@ var (
|
|||||||
// const net = require('nuclei/net');
|
// const net = require('nuclei/net');
|
||||||
// const conn = net.Open('tcp', 'acme.com:80');
|
// const conn = net.Open('tcp', 'acme.com:80');
|
||||||
// ```
|
// ```
|
||||||
func Open(protocol, address string) (*NetConn, error) {
|
func Open(ctx context.Context, protocol, address string) (*NetConn, error) {
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), protocol, address)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return nil, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
conn, err := dialer.Fastdialer.Dial(ctx, protocol, address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -40,7 +45,7 @@ func Open(protocol, address string) (*NetConn, error) {
|
|||||||
// const net = require('nuclei/net');
|
// const net = require('nuclei/net');
|
||||||
// const conn = net.OpenTLS('tcp', 'acme.com:443');
|
// const conn = net.OpenTLS('tcp', 'acme.com:443');
|
||||||
// ```
|
// ```
|
||||||
func OpenTLS(protocol, address string) (*NetConn, error) {
|
func OpenTLS(ctx context.Context, protocol, address string) (*NetConn, error) {
|
||||||
config := &tls.Config{InsecureSkipVerify: true, MinVersion: tls.VersionTLS10}
|
config := &tls.Config{InsecureSkipVerify: true, MinVersion: tls.VersionTLS10}
|
||||||
host, _, _ := net.SplitHostPort(address)
|
host, _, _ := net.SplitHostPort(address)
|
||||||
if host != "" {
|
if host != "" {
|
||||||
@ -48,7 +53,13 @@ func OpenTLS(protocol, address string) (*NetConn, error) {
|
|||||||
c.ServerName = host
|
c.ServerName = host
|
||||||
config = c
|
config = c
|
||||||
}
|
}
|
||||||
conn, err := protocolstate.Dialer.DialTLSWithConfig(context.TODO(), protocol, address, config)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return nil, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.DialTLSWithConfig(ctx, protocol, address, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,11 +8,11 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func memoizedisOracle(host string, port int) (IsOracleResponse, error) {
|
func memoizedisOracle(executionId string, host string, port int) (IsOracleResponse, error) {
|
||||||
hash := "isOracle" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "isOracle" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return isOracle(host, port)
|
return isOracle(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IsOracleResponse{}, err
|
return IsOracleResponse{}, err
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package oracle
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@ -32,16 +33,22 @@ type (
|
|||||||
// const isOracle = oracle.IsOracle('acme.com', 1521);
|
// const isOracle = oracle.IsOracle('acme.com', 1521);
|
||||||
// log(toJSON(isOracle));
|
// log(toJSON(isOracle));
|
||||||
// ```
|
// ```
|
||||||
func IsOracle(host string, port int) (IsOracleResponse, error) {
|
func IsOracle(ctx context.Context, host string, port int) (IsOracleResponse, error) {
|
||||||
return memoizedisOracle(host, port)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedisOracle(executionId, host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func isOracle(host string, port int) (IsOracleResponse, error) {
|
func isOracle(executionId string, host string, port int) (IsOracleResponse, error) {
|
||||||
resp := IsOracleResponse{}
|
resp := IsOracleResponse{}
|
||||||
|
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return IsOracleResponse{}, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
timeout := 5 * time.Second
|
timeout := 5 * time.Second
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
conn, err := dialer.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,11 +8,11 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func memoizedisPoP3(host string, port int) (IsPOP3Response, error) {
|
func memoizedisPoP3(executionId string, host string, port int) (IsPOP3Response, error) {
|
||||||
hash := "isPoP3" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "isPoP3" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return isPoP3(host, port)
|
return isPoP3(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IsPOP3Response{}, err
|
return IsPOP3Response{}, err
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package pop3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@ -33,16 +34,22 @@ type (
|
|||||||
// const isPOP3 = pop3.IsPOP3('acme.com', 110);
|
// const isPOP3 = pop3.IsPOP3('acme.com', 110);
|
||||||
// log(toJSON(isPOP3));
|
// log(toJSON(isPOP3));
|
||||||
// ```
|
// ```
|
||||||
func IsPOP3(host string, port int) (IsPOP3Response, error) {
|
func IsPOP3(ctx context.Context, host string, port int) (IsPOP3Response, error) {
|
||||||
return memoizedisPoP3(host, port)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedisPoP3(executionId, host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func isPoP3(host string, port int) (IsPOP3Response, error) {
|
func isPoP3(executionId string, host string, port int) (IsPOP3Response, error) {
|
||||||
resp := IsPOP3Response{}
|
resp := IsPOP3Response{}
|
||||||
|
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return IsPOP3Response{}, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
timeout := 5 * time.Second
|
timeout := 5 * time.Second
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
conn, err := dialer.Fastdialer.Dial(context.TODO(), "tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,11 +12,11 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func memoizedisPostgres(host string, port int) (bool, error) {
|
func memoizedisPostgres(executionId string, host string, port int) (bool, error) {
|
||||||
hash := "isPostgres" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "isPostgres" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return isPostgres(host, port)
|
return isPostgres(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -28,11 +28,11 @@ func memoizedisPostgres(host string, port int) (bool, error) {
|
|||||||
return false, errors.New("could not convert cached result")
|
return false, errors.New("could not convert cached result")
|
||||||
}
|
}
|
||||||
|
|
||||||
func memoizedexecuteQuery(host string, port int, username string, password string, dbName string, query string) (*utils.SQLResult, error) {
|
func memoizedexecuteQuery(executionId string, host string, port int, username string, password string, dbName string, query string) (*utils.SQLResult, error) {
|
||||||
hash := "executeQuery" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port) + ":" + fmt.Sprint(username) + ":" + fmt.Sprint(password) + ":" + fmt.Sprint(dbName) + ":" + fmt.Sprint(query)
|
hash := "executeQuery" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port) + ":" + fmt.Sprint(username) + ":" + fmt.Sprint(password) + ":" + fmt.Sprint(dbName) + ":" + fmt.Sprint(query)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return executeQuery(host, port, username, password, dbName, query)
|
return executeQuery(executionId, host, port, username, password, dbName, query)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -44,11 +44,11 @@ func memoizedexecuteQuery(host string, port int, username string, password strin
|
|||||||
return nil, errors.New("could not convert cached result")
|
return nil, errors.New("could not convert cached result")
|
||||||
}
|
}
|
||||||
|
|
||||||
func memoizedconnect(host string, port int, username string, password string, dbName string) (bool, error) {
|
func memoizedconnect(executionId string, host string, port int, username string, password string, dbName string) (bool, error) {
|
||||||
hash := "connect" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port) + ":" + fmt.Sprint(username) + ":" + fmt.Sprint(password) + ":" + fmt.Sprint(dbName)
|
hash := "connect" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port) + ":" + fmt.Sprint(username) + ":" + fmt.Sprint(password) + ":" + fmt.Sprint(dbName)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return connect(host, port, username, password, dbName)
|
return connect(executionId, host, port, username, password, dbName)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
|||||||
@ -36,22 +36,28 @@ type (
|
|||||||
// const postgres = require('nuclei/postgres');
|
// const postgres = require('nuclei/postgres');
|
||||||
// const isPostgres = postgres.IsPostgres('acme.com', 5432);
|
// const isPostgres = postgres.IsPostgres('acme.com', 5432);
|
||||||
// ```
|
// ```
|
||||||
func (c *PGClient) IsPostgres(host string, port int) (bool, error) {
|
func (c *PGClient) IsPostgres(ctx context.Context, host string, port int) (bool, error) {
|
||||||
|
executionId := ctx.Value("executionId").(string)
|
||||||
// todo: why this is exposed? Service fingerprint should be automatic
|
// todo: why this is exposed? Service fingerprint should be automatic
|
||||||
return memoizedisPostgres(host, port)
|
return memoizedisPostgres(executionId, host, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func isPostgres(host string, port int) (bool, error) {
|
func isPostgres(executionId string, host string, port int) (bool, error) {
|
||||||
timeout := 10 * time.Second
|
timeout := 10 * time.Second
|
||||||
|
|
||||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", fmt.Sprintf("%s:%d", host, port))
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return false, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.Fastdialer.Dial(context.TODO(), "tcp", fmt.Sprintf("%s:%d", host, port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = conn.Close()
|
_ = conn.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
_ = conn.SetDeadline(time.Now().Add(timeout))
|
_ = conn.SetDeadline(time.Now().Add(timeout))
|
||||||
|
|
||||||
@ -76,15 +82,16 @@ func isPostgres(host string, port int) (bool, error) {
|
|||||||
// const client = new postgres.PGClient;
|
// const client = new postgres.PGClient;
|
||||||
// const connected = client.Connect('acme.com', 5432, 'username', 'password');
|
// const connected = client.Connect('acme.com', 5432, 'username', 'password');
|
||||||
// ```
|
// ```
|
||||||
func (c *PGClient) Connect(host string, port int, username, password string) (bool, error) {
|
func (c *PGClient) Connect(ctx context.Context, host string, port int, username, password string) (bool, error) {
|
||||||
ok, err := c.IsPostgres(host, port)
|
ok, err := c.IsPostgres(ctx, host, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return false, fmt.Errorf("not a postgres service")
|
return false, fmt.Errorf("not a postgres service")
|
||||||
}
|
}
|
||||||
return memoizedconnect(host, port, username, password, "postgres")
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
return memoizedconnect(executionId, host, port, username, password, "postgres")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecuteQuery connects to Postgres database using given credentials and database name.
|
// ExecuteQuery connects to Postgres database using given credentials and database name.
|
||||||
@ -97,8 +104,8 @@ func (c *PGClient) Connect(host string, port int, username, password string) (bo
|
|||||||
// const result = client.ExecuteQuery('acme.com', 5432, 'username', 'password', 'dbname', 'select * from users');
|
// const result = client.ExecuteQuery('acme.com', 5432, 'username', 'password', 'dbname', 'select * from users');
|
||||||
// log(to_json(result));
|
// log(to_json(result));
|
||||||
// ```
|
// ```
|
||||||
func (c *PGClient) ExecuteQuery(host string, port int, username, password, dbName, query string) (*utils.SQLResult, error) {
|
func (c *PGClient) ExecuteQuery(ctx context.Context, host string, port int, username, password, dbName, query string) (*utils.SQLResult, error) {
|
||||||
ok, err := c.IsPostgres(host, port)
|
ok, err := c.IsPostgres(ctx, host, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -106,26 +113,28 @@ func (c *PGClient) ExecuteQuery(host string, port int, username, password, dbNam
|
|||||||
return nil, fmt.Errorf("not a postgres service")
|
return nil, fmt.Errorf("not a postgres service")
|
||||||
}
|
}
|
||||||
|
|
||||||
return memoizedexecuteQuery(host, port, username, password, dbName, query)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
|
||||||
|
return memoizedexecuteQuery(executionId, host, port, username, password, dbName, query)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func executeQuery(host string, port int, username string, password string, dbName string, query string) (*utils.SQLResult, error) {
|
func executeQuery(executionId string, host string, port int, username string, password string, dbName string, query string) (*utils.SQLResult, error) {
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return nil, protocolstate.ErrHostDenied.Msgf(host)
|
return nil, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
|
|
||||||
target := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
target := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||||
|
|
||||||
connStr := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", username, password, target, dbName)
|
connStr := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable&executionId=%s", username, password, target, dbName, executionId)
|
||||||
db, err := sql.Open(pgwrap.PGWrapDriver, connStr)
|
db, err := sql.Open(pgwrap.PGWrapDriver, connStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
rows, err := db.Query(query)
|
rows, err := db.Query(query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -148,8 +157,8 @@ func executeQuery(host string, port int, username string, password string, dbNam
|
|||||||
// const client = new postgres.PGClient;
|
// const client = new postgres.PGClient;
|
||||||
// const connected = client.ConnectWithDB('acme.com', 5432, 'username', 'password', 'dbname');
|
// const connected = client.ConnectWithDB('acme.com', 5432, 'username', 'password', 'dbname');
|
||||||
// ```
|
// ```
|
||||||
func (c *PGClient) ConnectWithDB(host string, port int, username, password, dbName string) (bool, error) {
|
func (c *PGClient) ConnectWithDB(ctx context.Context, host string, port int, username, password, dbName string) (bool, error) {
|
||||||
ok, err := c.IsPostgres(host, port)
|
ok, err := c.IsPostgres(ctx, host, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -157,16 +166,18 @@ func (c *PGClient) ConnectWithDB(host string, port int, username, password, dbNa
|
|||||||
return false, fmt.Errorf("not a postgres service")
|
return false, fmt.Errorf("not a postgres service")
|
||||||
}
|
}
|
||||||
|
|
||||||
return memoizedconnect(host, port, username, password, dbName)
|
executionId := ctx.Value("executionId").(string)
|
||||||
|
|
||||||
|
return memoizedconnect(executionId, host, port, username, password, dbName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// @memo
|
// @memo
|
||||||
func connect(host string, port int, username string, password string, dbName string) (bool, error) {
|
func connect(executionId string, host string, port int, username string, password string, dbName string) (bool, error) {
|
||||||
if host == "" || port <= 0 {
|
if host == "" || port <= 0 {
|
||||||
return false, fmt.Errorf("invalid host or port")
|
return false, fmt.Errorf("invalid host or port")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !protocolstate.IsHostAllowed(host) {
|
if !protocolstate.IsHostAllowed(executionId, host) {
|
||||||
// host is not valid according to network policy
|
// host is not valid according to network policy
|
||||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||||
}
|
}
|
||||||
@ -176,19 +187,24 @@ func connect(host string, port int, username string, password string, dbName str
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
dialer := protocolstate.GetDialersWithId(executionId)
|
||||||
|
if dialer == nil {
|
||||||
|
return false, fmt.Errorf("dialers not initialized for %s", executionId)
|
||||||
|
}
|
||||||
|
|
||||||
db := pg.Connect(&pg.Options{
|
db := pg.Connect(&pg.Options{
|
||||||
Addr: target,
|
Addr: target,
|
||||||
User: username,
|
User: username,
|
||||||
Password: password,
|
Password: password,
|
||||||
Database: dbName,
|
Database: dbName,
|
||||||
Dialer: func(network, addr string) (net.Conn, error) {
|
Dialer: func(network, addr string) (net.Conn, error) {
|
||||||
return protocolstate.Dialer.Dial(context.Background(), network, addr)
|
return dialer.Fastdialer.Dial(context.Background(), network, addr)
|
||||||
},
|
},
|
||||||
IdleCheckFrequency: -1,
|
IdleCheckFrequency: -1,
|
||||||
}).WithContext(ctx).WithTimeout(10 * time.Second)
|
}).WithContext(ctx).WithTimeout(10 * time.Second)
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
_, err := db.Exec("select 1")
|
_, err := db.Exec("select 1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -8,11 +8,11 @@ import (
|
|||||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func memoizedisRDP(host string, port int) (IsRDPResponse, error) {
|
func memoizedisRDP(executionId string, host string, port int) (IsRDPResponse, error) {
|
||||||
hash := "isRDP" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "isRDP" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return isRDP(host, port)
|
return isRDP(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IsRDPResponse{}, err
|
return IsRDPResponse{}, err
|
||||||
@ -24,11 +24,11 @@ func memoizedisRDP(host string, port int) (IsRDPResponse, error) {
|
|||||||
return IsRDPResponse{}, errors.New("could not convert cached result")
|
return IsRDPResponse{}, errors.New("could not convert cached result")
|
||||||
}
|
}
|
||||||
|
|
||||||
func memoizedcheckRDPAuth(host string, port int) (CheckRDPAuthResponse, error) {
|
func memoizedcheckRDPAuth(executionId string, host string, port int) (CheckRDPAuthResponse, error) {
|
||||||
hash := "checkRDPAuth" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
hash := "checkRDPAuth" + ":" + fmt.Sprint(host) + ":" + fmt.Sprint(port)
|
||||||
|
|
||||||
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
v, err, _ := protocolstate.Memoizer.Do(hash, func() (interface{}, error) {
|
||||||
return checkRDPAuth(host, port)
|
return checkRDPAuth(executionId, host, port)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return CheckRDPAuthResponse{}, err
|
return CheckRDPAuthResponse{}, err
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user