nuclei/v2/pkg/executer/executer_http.go

711 lines
20 KiB
Go
Raw Normal View History

2020-07-16 10:57:28 +02:00
package executer
2020-04-26 05:50:33 +05:30
import (
2020-11-19 01:27:06 +01:00
"bytes"
"context"
2020-04-26 05:50:33 +05:30
"crypto/tls"
2020-04-28 22:15:26 +02:00
"fmt"
2020-04-26 05:50:33 +05:30
"io"
"io/ioutil"
"net"
2020-04-26 05:50:33 +05:30
"net/http"
2020-07-16 16:15:24 +02:00
"net/http/cookiejar"
2020-06-22 19:30:01 +05:30
"net/http/httputil"
2020-04-27 23:49:53 +05:30
"net/url"
2020-06-22 19:30:01 +05:30
"os"
"regexp"
2020-10-21 22:30:53 +02:00
"strconv"
2020-05-22 00:23:38 +02:00
"strings"
"sync"
2020-04-26 05:50:33 +05:30
"time"
"github.com/pkg/errors"
2020-11-06 03:15:27 +01:00
"github.com/projectdiscovery/fastdialer/fastdialer"
2020-06-22 19:30:01 +05:30
"github.com/projectdiscovery/gologger"
2020-09-10 16:32:01 +05:30
"github.com/projectdiscovery/nuclei/v2/internal/bufwriter"
2020-07-23 20:19:19 +02:00
"github.com/projectdiscovery/nuclei/v2/internal/progress"
"github.com/projectdiscovery/nuclei/v2/internal/tracelog"
"github.com/projectdiscovery/nuclei/v2/pkg/colorizer"
2020-10-21 22:30:53 +02:00
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
2020-07-01 16:17:24 +05:30
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
2020-10-18 03:09:24 +02:00
projetctfile "github.com/projectdiscovery/nuclei/v2/pkg/projectfile"
2020-07-01 16:17:24 +05:30
"github.com/projectdiscovery/nuclei/v2/pkg/requests"
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
"github.com/projectdiscovery/rawhttp"
2020-04-26 05:50:33 +05:30
"github.com/projectdiscovery/retryablehttp-go"
2020-10-08 16:34:47 +02:00
"github.com/remeh/sizedwaitgroup"
"go.uber.org/ratelimit"
2020-04-28 04:01:25 +02:00
"golang.org/x/net/proxy"
2020-04-26 05:50:33 +05:30
)
const (
2020-10-23 10:43:49 +02:00
two = 2
ten = 10
defaultMaxWorkers = 150
defaultMaxHistorydata = 150
)
2020-07-16 10:57:28 +02:00
// HTTPExecuter is client for performing HTTP requests
2020-04-26 05:50:33 +05:30
// for a template.
2020-07-16 10:57:28 +02:00
type HTTPExecuter struct {
2020-10-18 03:09:24 +02:00
pf *projetctfile.ProjectFile
2020-10-11 11:45:48 +02:00
customHeaders requests.CustomHeaders
colorizer colorizer.NucleiColorizer
2020-10-11 11:45:48 +02:00
httpClient *retryablehttp.Client
rawHTTPClient *rawhttp.Client
template *templates.Template
bulkHTTPRequest *requests.BulkHTTPRequest
writer *bufwriter.Writer
CookieJar *cookiejar.Jar
traceLog tracelog.Log
decolorizer *regexp.Regexp
2020-10-11 11:45:48 +02:00
coloredOutput bool
debug bool
Results bool
jsonOutput bool
jsonRequest bool
2020-10-20 01:57:38 +05:30
noMeta bool
stopAtFirstMatch bool
ratelimiter ratelimit.Limiter
}
2020-07-16 10:57:28 +02:00
// HTTPOptions contains configuration options for the HTTP executer.
type HTTPOptions struct {
2020-10-11 11:45:48 +02:00
CustomHeaders requests.CustomHeaders
ProxyURL string
ProxySocksURL string
Template *templates.Template
BulkHTTPRequest *requests.BulkHTTPRequest
Writer *bufwriter.Writer
Timeout int
Retries int
CookieJar *cookiejar.Jar
Colorizer *colorizer.NucleiColorizer
Decolorizer *regexp.Regexp
TraceLog tracelog.Log
2020-10-11 11:45:48 +02:00
Debug bool
JSON bool
JSONRequests bool
2020-10-20 01:57:38 +05:30
NoMeta bool
2020-10-11 11:45:48 +02:00
CookieReuse bool
ColoredOutput bool
StopAtFirstMatch bool
2020-10-18 03:09:24 +02:00
PF *projetctfile.ProjectFile
RateLimiter ratelimit.Limiter
2020-11-06 03:15:27 +01:00
Dialer *fastdialer.Dialer
2020-04-26 05:50:33 +05:30
}
2020-07-16 10:57:28 +02:00
// NewHTTPExecuter creates a new HTTP executer from a template
2020-04-26 05:50:33 +05:30
// and a HTTP request query.
2020-07-16 10:57:28 +02:00
func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {
2020-10-24 01:27:46 +02:00
var (
proxyURL *url.URL
err error
)
2020-04-26 05:50:33 +05:30
2020-04-28 00:29:57 +05:30
if options.ProxyURL != "" {
proxyURL, err = url.Parse(options.ProxyURL)
}
2020-04-27 23:49:53 +05:30
if err != nil {
return nil, err
}
2020-04-26 05:50:33 +05:30
// Create the HTTP Client
2020-10-30 13:06:05 +01:00
client := makeHTTPClient(proxyURL, options)
// nolint:bodyclose // false positive there is no body to close yet
2020-04-26 05:50:33 +05:30
client.CheckRetry = retryablehttp.HostSprayRetryPolicy()
2020-07-16 16:15:24 +02:00
if options.CookieJar != nil {
client.HTTPClient.Jar = options.CookieJar
} else if options.CookieReuse {
jar, err := cookiejar.New(nil)
if err != nil {
return nil, err
}
client.HTTPClient.Jar = jar
}
2020-04-26 05:50:33 +05:30
2020-09-27 02:25:40 +02:00
// initiate raw http client
rawClient := rawhttp.NewClient(rawhttp.DefaultOptions)
2020-07-16 10:57:28 +02:00
executer := &HTTPExecuter{
debug: options.Debug,
jsonOutput: options.JSON,
jsonRequest: options.JSONRequests,
2020-10-20 01:57:38 +05:30
noMeta: options.NoMeta,
httpClient: client,
rawHTTPClient: rawClient,
traceLog: options.TraceLog,
template: options.Template,
bulkHTTPRequest: options.BulkHTTPRequest,
writer: options.Writer,
customHeaders: options.CustomHeaders,
CookieJar: options.CookieJar,
coloredOutput: options.ColoredOutput,
colorizer: *options.Colorizer,
decolorizer: options.Decolorizer,
stopAtFirstMatch: options.StopAtFirstMatch,
2020-10-18 03:09:24 +02:00
pf: options.PF,
ratelimiter: options.RateLimiter,
2020-04-26 05:50:33 +05:30
}
2020-07-24 13:37:01 +02:00
2020-04-27 23:49:53 +05:30
return executer, nil
2020-04-26 05:50:33 +05:30
}
2020-10-21 08:16:15 +02:00
func (e *HTTPExecuter) ExecuteRaceRequest(reqURL string) *Result {
result := &Result{
Matches: make(map[string]interface{}),
Extractions: make(map[string]interface{}),
}
dynamicvalues := make(map[string]interface{})
// verify if the URL is already being processed
if e.bulkHTTPRequest.HasGenerator(reqURL) {
return result
}
e.bulkHTTPRequest.CreateGenerator(reqURL)
// Workers that keeps enqueuing new requests
maxWorkers := e.bulkHTTPRequest.RaceNumberRequests
swg := sizedwaitgroup.New(maxWorkers)
for i := 0; i < e.bulkHTTPRequest.RaceNumberRequests; i++ {
swg.Add()
2020-10-23 11:38:45 +02:00
// base request
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
if err != nil {
result.Error = err
return result
}
2020-10-21 08:16:15 +02:00
go func(httpRequest *requests.HTTPRequest) {
defer swg.Done()
// If the request was built correctly then execute it
2020-10-23 10:55:52 +02:00
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, "")
2020-10-21 08:16:15 +02:00
if err != nil {
result.Error = errors.Wrap(err, "could not handle http request")
}
}(request)
}
swg.Wait()
return result
}
func (e *HTTPExecuter) ExecuteParallelHTTP(p *progress.Progress, reqURL string) *Result {
result := &Result{
Matches: make(map[string]interface{}),
Extractions: make(map[string]interface{}),
}
dynamicvalues := make(map[string]interface{})
// verify if the URL is already being processed
if e.bulkHTTPRequest.HasGenerator(reqURL) {
return result
}
remaining := e.bulkHTTPRequest.GetRequestCount()
e.bulkHTTPRequest.CreateGenerator(reqURL)
// Workers that keeps enqueuing new requests
maxWorkers := e.bulkHTTPRequest.Threads
swg := sizedwaitgroup.New(maxWorkers)
for e.bulkHTTPRequest.Next(reqURL) && !result.Done {
2020-10-09 23:11:07 +02:00
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
if err != nil {
result.Error = err
p.Drop(remaining)
} else {
swg.Add()
go func(httpRequest *requests.HTTPRequest) {
defer swg.Done()
e.ratelimiter.Take()
2020-10-08 16:34:47 +02:00
// If the request was built correctly then execute it
2020-10-21 22:30:53 +02:00
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, "")
if err != nil {
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", err)
result.Error = errors.Wrap(err, "could not handle http request")
p.Drop(remaining)
} else {
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
}
}(request)
}
p.Update()
e.bulkHTTPRequest.Increment(reqURL)
}
swg.Wait()
return result
}
2020-10-21 08:16:15 +02:00
func (e *HTTPExecuter) ExecuteTurboHTTP(reqURL string) *Result {
result := &Result{
Matches: make(map[string]interface{}),
Extractions: make(map[string]interface{}),
}
2020-10-08 16:34:47 +02:00
dynamicvalues := make(map[string]interface{})
// verify if the URL is already being processed
if e.bulkHTTPRequest.HasGenerator(reqURL) {
return result
2020-10-08 16:34:47 +02:00
}
e.bulkHTTPRequest.CreateGenerator(reqURL)
// need to extract the target from the url
URL, err := url.Parse(reqURL)
if err != nil {
return result
2020-10-08 16:34:47 +02:00
}
pipeOptions := rawhttp.DefaultPipelineOptions
pipeOptions.Host = URL.Host
pipeOptions.MaxConnections = 1
2020-10-11 01:41:45 +02:00
if e.bulkHTTPRequest.PipelineConcurrentConnections > 0 {
pipeOptions.MaxConnections = e.bulkHTTPRequest.PipelineConcurrentConnections
}
if e.bulkHTTPRequest.PipelineRequestsPerConnection > 0 {
pipeOptions.MaxPendingRequests = e.bulkHTTPRequest.PipelineRequestsPerConnection
2020-10-08 16:34:47 +02:00
}
pipeclient := rawhttp.NewPipelineClient(pipeOptions)
2020-10-23 10:43:49 +02:00
// defaultMaxWorkers should be a sufficient value to keep queues always full
maxWorkers := defaultMaxWorkers
2020-10-11 01:41:45 +02:00
// in case the queue is bigger increase the workers
if pipeOptions.MaxPendingRequests > maxWorkers {
maxWorkers = pipeOptions.MaxPendingRequests
2020-10-08 16:34:47 +02:00
}
swg := sizedwaitgroup.New(maxWorkers)
for e.bulkHTTPRequest.Next(reqURL) && !result.Done {
2020-10-09 23:11:07 +02:00
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
2020-10-08 16:34:47 +02:00
if err != nil {
result.Error = err
} else {
swg.Add()
go func(httpRequest *requests.HTTPRequest) {
defer swg.Done()
2020-10-09 23:11:07 +02:00
// HTTP pipelining ignores rate limit
2020-10-08 16:34:47 +02:00
// If the request was built correctly then execute it
2020-10-11 01:41:45 +02:00
request.Pipeline = true
2020-10-09 02:23:38 +02:00
request.PipelineClient = pipeclient
2020-10-21 22:30:53 +02:00
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, "")
2020-10-08 16:34:47 +02:00
if err != nil {
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", err)
2020-10-08 16:34:47 +02:00
result.Error = errors.Wrap(err, "could not handle http request")
} else {
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
2020-10-08 16:34:47 +02:00
}
2020-10-09 02:23:38 +02:00
request.PipelineClient = nil
2020-10-08 16:34:47 +02:00
}(request)
}
e.bulkHTTPRequest.Increment(reqURL)
}
swg.Wait()
return result
}
2020-04-26 05:50:33 +05:30
// ExecuteHTTP executes the HTTP request on a URL
func (e *HTTPExecuter) ExecuteHTTP(p *progress.Progress, reqURL string) *Result {
2020-10-08 16:34:47 +02:00
// verify if pipeline was requested
if e.bulkHTTPRequest.Pipeline {
2020-10-21 08:16:15 +02:00
return e.ExecuteTurboHTTP(reqURL)
}
// verify if a basic race condition was requested
if e.bulkHTTPRequest.Race && e.bulkHTTPRequest.RaceNumberRequests > 0 {
return e.ExecuteRaceRequest(reqURL)
}
2020-10-21 08:16:15 +02:00
// verify if parallel elaboration was requested
if e.bulkHTTPRequest.Threads > 0 {
return e.ExecuteParallelHTTP(p, reqURL)
2020-10-08 16:34:47 +02:00
}
2020-10-21 22:30:53 +02:00
var requestNumber int
result := &Result{
Matches: make(map[string]interface{}),
Extractions: make(map[string]interface{}),
2020-10-21 22:30:53 +02:00
historyData: make(map[string]interface{}),
}
2020-07-19 03:14:19 +02:00
dynamicvalues := make(map[string]interface{})
2020-04-26 05:50:33 +05:30
2020-07-25 22:25:21 +02:00
// verify if the URL is already being processed
if e.bulkHTTPRequest.HasGenerator(reqURL) {
return result
2020-07-25 22:25:21 +02:00
}
2020-07-26 15:10:03 +02:00
remaining := e.bulkHTTPRequest.GetRequestCount()
e.bulkHTTPRequest.CreateGenerator(reqURL)
2020-07-23 20:19:19 +02:00
for e.bulkHTTPRequest.Next(reqURL) && !result.Done {
2020-10-21 22:30:53 +02:00
requestNumber++
2020-10-09 23:11:07 +02:00
httpRequest, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
2020-07-18 21:42:23 +02:00
if err != nil {
result.Error = err
p.Drop(remaining)
2020-09-23 23:38:12 +02:00
} else {
e.ratelimiter.Take()
2020-09-23 23:38:12 +02:00
// If the request was built correctly then execute it
2020-10-21 22:30:53 +02:00
format := "%s_" + strconv.Itoa(requestNumber)
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, format)
2020-09-23 23:38:12 +02:00
if err != nil {
result.Error = errors.Wrap(err, "could not handle http request")
p.Drop(remaining)
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", err)
} else {
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
2020-09-23 23:38:12 +02:00
}
2020-07-19 03:14:19 +02:00
}
p.Update()
2020-07-18 21:42:23 +02:00
// Check if has to stop processing at first valid result
if e.stopAtFirstMatch && result.GotResults {
p.Drop(remaining)
break
}
2020-09-23 23:38:12 +02:00
// move always forward with requests
2020-09-23 23:44:13 +02:00
e.bulkHTTPRequest.Increment(reqURL)
2020-07-23 20:19:19 +02:00
remaining--
2020-07-18 21:42:23 +02:00
}
gologger.Verbosef("Sent for [%s] to %s\n", "http-request", e.template.ID, reqURL)
return result
2020-07-18 21:42:23 +02:00
}
2020-10-21 22:30:53 +02:00
func (e *HTTPExecuter) handleHTTP(reqURL string, request *requests.HTTPRequest, dynamicvalues map[string]interface{}, result *Result, format string) error {
2020-10-08 16:34:47 +02:00
e.setCustomHeaders(request)
var (
2020-10-15 23:39:00 +02:00
resp *http.Response
err error
dumpedRequest []byte
fromcache bool
2020-10-08 16:34:47 +02:00
)
2020-10-18 03:09:24 +02:00
if e.debug || e.pf != nil {
2020-10-15 23:39:00 +02:00
dumpedRequest, err = requests.Dump(request, reqURL)
if err != nil {
return err
2020-10-08 16:34:47 +02:00
}
2020-10-15 23:39:00 +02:00
}
2020-10-08 16:34:47 +02:00
2020-10-15 23:39:00 +02:00
if e.debug {
2020-10-08 16:34:47 +02:00
gologger.Infof("Dumped HTTP request for %s (%s)\n\n", reqURL, e.template.ID)
fmt.Fprintf(os.Stderr, "%s", string(dumpedRequest))
}
timeStart := time.Now()
2020-10-15 23:39:00 +02:00
2020-10-09 02:23:38 +02:00
if request.Pipeline {
resp, err = request.PipelineClient.DoRaw(request.RawRequest.Method, reqURL, request.RawRequest.Path, requests.ExpandMapValues(request.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.RawRequest.Data)))
2020-04-26 05:50:33 +05:30
if err != nil {
2020-10-11 12:14:57 +02:00
if resp != nil {
resp.Body.Close()
}
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", err)
return err
2020-04-26 05:50:33 +05:30
}
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
2020-10-09 02:23:38 +02:00
} else if request.Unsafe {
2020-10-05 20:45:45 +02:00
// rawhttp
// burp uses "\r\n" as new line character
request.RawRequest.Data = strings.ReplaceAll(request.RawRequest.Data, "\n", "\r\n")
options := e.rawHTTPClient.Options
2020-10-09 02:23:38 +02:00
options.AutomaticContentLength = request.AutomaticContentLengthHeader
options.AutomaticHostHeader = request.AutomaticHostHeader
options.FollowRedirects = request.FollowRedirects
resp, err = e.rawHTTPClient.DoRawWithOptions(request.RawRequest.Method, reqURL, request.RawRequest.Path, requests.ExpandMapValues(request.RawRequest.Headers), ioutil.NopCloser(strings.NewReader(request.RawRequest.Data)), options)
if err != nil {
2020-10-11 12:14:57 +02:00
if resp != nil {
resp.Body.Close()
}
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", err)
return err
}
2020-10-22 16:16:33 +05:30
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
} else {
2020-10-15 23:39:00 +02:00
// if nuclei-project is available check if the request was already sent previously
2020-10-18 03:09:24 +02:00
if e.pf != nil {
// if unavailable fail silently
fromcache = true
2020-10-23 12:29:49 +02:00
// nolint:bodyclose // false positive the response is generated at runtime
2020-10-18 03:09:24 +02:00
resp, err = e.pf.Get(dumpedRequest)
if err != nil {
fromcache = false
2020-10-15 23:39:00 +02:00
}
}
// retryablehttp
2020-10-15 23:39:00 +02:00
if resp == nil {
resp, err = e.httpClient.Do(request.Request)
if err != nil {
if resp != nil {
resp.Body.Close()
}
2020-10-24 20:04:58 +02:00
e.traceLog.Request(e.template.ID, reqURL, "http", err)
2020-10-15 23:39:00 +02:00
return err
}
2020-10-24 20:04:58 +02:00
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
2020-06-22 19:30:01 +05:30
}
2020-07-18 21:42:23 +02:00
}
2020-10-11 12:14:57 +02:00
duration := time.Since(timeStart)
2020-06-22 19:30:01 +05:30
2020-11-19 01:27:06 +01:00
// Dump response - Step 1 - Decompression not yet handled
var dumpedResponse []byte
2020-07-18 21:42:23 +02:00
if e.debug {
2020-11-19 01:27:06 +01:00
var dumpErr error
dumpedResponse, dumpErr = httputil.DumpResponse(resp, true)
if dumpErr != nil {
return errors.Wrap(dumpErr, "could not dump http response")
2020-04-26 05:50:33 +05:30
}
2020-07-18 21:42:23 +02:00
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
_, copyErr := io.Copy(ioutil.Discard, resp.Body)
if copyErr != nil {
resp.Body.Close()
return copyErr
}
2020-04-26 05:50:33 +05:30
resp.Body.Close()
2020-07-18 21:42:23 +02:00
return errors.Wrap(err, "could not read http body")
}
2020-07-18 21:42:23 +02:00
resp.Body.Close()
2020-04-26 05:50:33 +05:30
2020-07-18 21:42:23 +02:00
// net/http doesn't automatically decompress the response body if an encoding has been specified by the user in the request
// so in case we have to manually do it
2020-11-19 01:27:06 +01:00
dataOrig := data
data, err = requests.HandleDecompression(request, data)
2020-07-18 21:42:23 +02:00
if err != nil {
return errors.Wrap(err, "could not decompress http body")
}
2020-11-19 01:27:06 +01:00
// Dump response - step 2 - replace gzip body with deflated one or with itself (NOP operation)
if e.debug {
dumpedResponse = bytes.ReplaceAll(dumpedResponse, dataOrig, data)
gologger.Infof("Dumped HTTP response for %s (%s)\n\n", reqURL, e.template.ID)
fmt.Fprintf(os.Stderr, "%s\n", string(dumpedResponse))
}
2020-10-15 23:39:00 +02:00
// if nuclei-project is enabled store the response if not previously done
2020-10-18 03:09:24 +02:00
if e.pf != nil && !fromcache {
2020-10-23 12:14:24 +02:00
err := e.pf.Set(dumpedRequest, resp, data)
if err != nil {
return errors.Wrap(err, "could not store in project file")
}
2020-10-15 23:39:00 +02:00
}
2020-07-18 21:42:23 +02:00
// Convert response body from []byte to string with zero copy
body := unsafeToString(data)
headers := headersToString(resp.Header)
2020-10-21 22:30:53 +02:00
// store for internal purposes the DSL matcher data
2020-10-23 10:43:49 +02:00
// hardcode stopping storing data after defaultMaxHistorydata items
if len(result.historyData) < defaultMaxHistorydata {
2020-10-21 22:30:53 +02:00
result.Lock()
2020-10-23 10:41:49 +02:00
result.historyData = generators.MergeMaps(result.historyData, matchers.HTTPToMap(resp, body, headers, duration, format))
2020-10-21 22:30:53 +02:00
result.Unlock()
}
matcherCondition := e.bulkHTTPRequest.GetMatchersCondition()
for _, matcher := range e.bulkHTTPRequest.Matchers {
2020-07-18 21:42:23 +02:00
// Check if the matcher matched
2020-10-21 22:30:53 +02:00
if !matcher.Match(resp, body, headers, duration, result.historyData) {
2020-07-18 21:42:23 +02:00
// If the condition is AND we haven't matched, try next request.
if matcherCondition == matchers.ANDCondition {
return nil
2020-04-26 05:50:33 +05:30
}
2020-07-18 21:42:23 +02:00
} else {
// If the matcher has matched, and its an OR
// write the first output then move to next matcher.
2020-07-25 21:15:28 +02:00
if matcherCondition == matchers.ORCondition {
result.Lock()
2020-07-18 21:42:23 +02:00
result.Matches[matcher.Name] = nil
// probably redundant but ensures we snapshot current payload values when matchers are valid
result.Meta = request.Meta
2020-07-25 20:45:31 +02:00
result.GotResults = true
result.Unlock()
2020-10-11 21:18:10 +02:00
e.writeOutputHTTP(request, resp, body, matcher, nil, result.Meta)
2020-04-27 23:34:08 +05:30
}
}
2020-07-18 21:42:23 +02:00
}
2020-07-18 21:42:23 +02:00
// All matchers have successfully completed so now start with the
// next task which is extraction of input from matchers.
2020-07-25 21:15:28 +02:00
var extractorResults, outputExtractorResults []string
for _, extractor := range e.bulkHTTPRequest.Extractors {
2020-07-18 21:42:23 +02:00
for match := range extractor.Extract(resp, body, headers) {
if _, ok := dynamicvalues[extractor.Name]; !ok {
dynamicvalues[extractor.Name] = match
}
2020-07-18 21:42:23 +02:00
extractorResults = append(extractorResults, match)
2020-07-25 21:15:28 +02:00
if !extractor.Internal {
outputExtractorResults = append(outputExtractorResults, match)
}
}
2020-07-18 21:42:23 +02:00
// probably redundant but ensures we snapshot current payload values when extractors are valid
result.Lock()
2020-07-18 21:42:23 +02:00
result.Meta = request.Meta
result.Extractions[extractor.Name] = extractorResults
result.Unlock()
2020-04-26 05:50:33 +05:30
}
2020-06-22 19:30:01 +05:30
2020-07-18 21:42:23 +02:00
// Write a final string of output if matcher type is
// AND or if we have extractors for the mechanism too.
2020-07-25 21:44:43 +02:00
if len(outputExtractorResults) > 0 || matcherCondition == matchers.ANDCondition {
2020-10-11 21:18:10 +02:00
e.writeOutputHTTP(request, resp, body, nil, outputExtractorResults, result.Meta)
result.Lock()
2020-07-25 20:45:31 +02:00
result.GotResults = true
result.Unlock()
2020-07-18 21:42:23 +02:00
}
2020-06-22 19:30:01 +05:30
2020-07-18 21:42:23 +02:00
return nil
}
2020-07-16 10:57:28 +02:00
// Close closes the http executer for a template.
2020-09-10 16:32:01 +05:30
func (e *HTTPExecuter) Close() {}
2020-04-28 00:29:57 +05:30
// makeHTTPClient creates a http client
2020-10-30 13:06:05 +01:00
func makeHTTPClient(proxyURL *url.URL, options *HTTPOptions) *retryablehttp.Client {
// Multiple Host
2020-04-28 00:29:57 +05:30
retryablehttpOptions := retryablehttp.DefaultOptionsSpraying
disableKeepAlives := true
maxIdleConns := 0
maxConnsPerHost := 0
maxIdleConnsPerHost := -1
if options.BulkHTTPRequest.Threads > 0 {
// Single host
retryablehttpOptions = retryablehttp.DefaultOptionsSingle
disableKeepAlives = false
maxIdleConnsPerHost = 500
maxConnsPerHost = 500
}
2020-04-28 00:29:57 +05:30
retryablehttpOptions.RetryWaitMax = 10 * time.Second
retryablehttpOptions.RetryMax = options.Retries
followRedirects := options.BulkHTTPRequest.Redirects
maxRedirects := options.BulkHTTPRequest.MaxRedirects
2020-04-28 00:29:57 +05:30
transport := &http.Transport{
2020-11-06 03:15:27 +01:00
DialContext: options.Dialer.Dial,
MaxIdleConns: maxIdleConns,
MaxIdleConnsPerHost: maxIdleConnsPerHost,
MaxConnsPerHost: maxConnsPerHost,
2020-04-28 00:29:57 +05:30
TLSClientConfig: &tls.Config{
Renegotiation: tls.RenegotiateOnceAsClient,
InsecureSkipVerify: true,
},
DisableKeepAlives: disableKeepAlives,
2020-04-28 00:29:57 +05:30
}
2020-04-28 04:01:25 +02:00
// Attempts to overwrite the dial function with the socks proxied version
if options.ProxySocksURL != "" {
2020-04-28 22:15:26 +02:00
var proxyAuth *proxy.Auth
2020-04-28 22:15:26 +02:00
socksURL, err := url.Parse(options.ProxySocksURL)
2020-04-28 22:15:26 +02:00
if err == nil {
proxyAuth = &proxy.Auth{}
proxyAuth.User = socksURL.User.Username()
proxyAuth.Password, _ = socksURL.User.Password()
}
2020-04-28 22:15:26 +02:00
dialer, err := proxy.SOCKS5("tcp", fmt.Sprintf("%s:%s", socksURL.Hostname(), socksURL.Port()), proxyAuth, proxy.Direct)
dc := dialer.(interface {
DialContext(ctx context.Context, network, addr string) (net.Conn, error)
})
2020-04-28 04:01:25 +02:00
if err == nil {
transport.DialContext = dc.DialContext
2020-04-28 04:01:25 +02:00
}
}
2020-04-28 00:29:57 +05:30
if proxyURL != nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
2020-04-28 00:29:57 +05:30
return retryablehttp.NewWithHTTPClient(&http.Client{
Transport: transport,
Timeout: time.Duration(options.Timeout) * time.Second,
CheckRedirect: makeCheckRedirectFunc(followRedirects, maxRedirects),
2020-10-30 13:06:05 +01:00
}, retryablehttpOptions)
2020-04-28 00:29:57 +05:30
}
type checkRedirectFunc func(_ *http.Request, requests []*http.Request) error
func makeCheckRedirectFunc(followRedirects bool, maxRedirects int) checkRedirectFunc {
return func(_ *http.Request, requests []*http.Request) error {
if !followRedirects {
return http.ErrUseLastResponse
}
2020-04-28 00:29:57 +05:30
if maxRedirects == 0 {
if len(requests) > ten {
2020-04-28 00:29:57 +05:30
return http.ErrUseLastResponse
}
2020-04-28 00:29:57 +05:30
return nil
}
2020-04-28 00:29:57 +05:30
if len(requests) > maxRedirects {
return http.ErrUseLastResponse
}
2020-04-28 00:29:57 +05:30
return nil
}
}
2020-05-22 00:23:38 +02:00
func (e *HTTPExecuter) setCustomHeaders(r *requests.HTTPRequest) {
2020-05-22 00:23:38 +02:00
for _, customHeader := range e.customHeaders {
// This should be pre-computed somewhere and done only once
2020-10-23 11:02:49 +02:00
tokens := strings.SplitN(customHeader, ":", two)
2020-05-22 00:23:38 +02:00
// if it's an invalid header skip it
if len(tokens) < two {
2020-05-22 00:23:38 +02:00
continue
}
headerName, headerValue := tokens[0], strings.Join(tokens[1:], "")
if r.RawRequest != nil {
// rawhttp
r.RawRequest.Headers[headerName] = headerValue
} else {
// retryablehttp
headerName = strings.TrimSpace(headerName)
headerValue = strings.TrimSpace(headerValue)
r.Request.Header[headerName] = []string{headerValue}
}
2020-05-22 00:23:38 +02:00
}
}
2020-07-10 09:04:38 +02:00
type Result struct {
sync.Mutex
GotResults bool
Done bool
Meta map[string]interface{}
2020-07-10 09:04:38 +02:00
Matches map[string]interface{}
Extractions map[string]interface{}
2020-10-21 22:30:53 +02:00
historyData map[string]interface{}
2020-07-10 09:04:38 +02:00
Error error
}