155 lines
4.6 KiB
Go
Raw Normal View History

package engine
import (
"fmt"
"net/http"
"net/http/httputil"
"strings"
"github.com/go-rod/rod"
"github.com/go-rod/rod/lib/proto"
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
)
// routingRuleHandler handles proxy rule for actions related to request/response modification
func (p *Page) routingRuleHandler(httpClient *http.Client) func(ctx *rod.Hijack) {
return func(ctx *rod.Hijack) {
// usually browsers don't use chunked transfer encoding, so we set the content-length nevertheless
ctx.Request.Req().ContentLength = int64(len(ctx.Request.Body()))
for _, rule := range p.rules {
if rule.Part != "request" {
continue
}
switch rule.Action {
case ActionSetMethod:
rule.Do(func() {
ctx.Request.Req().Method = rule.Args["method"]
})
case ActionAddHeader:
ctx.Request.Req().Header.Add(rule.Args["key"], rule.Args["value"])
case ActionSetHeader:
ctx.Request.Req().Header.Set(rule.Args["key"], rule.Args["value"])
case ActionDeleteHeader:
ctx.Request.Req().Header.Del(rule.Args["key"])
case ActionSetBody:
body := rule.Args["body"]
ctx.Request.Req().ContentLength = int64(len(body))
ctx.Request.SetBody(body)
}
}
// each http request is performed via the native go http client
// we first inject the shared cookies
if !p.options.DisableCookie {
if cookies := p.ctx.CookieJar.Cookies(ctx.Request.URL()); len(cookies) > 0 {
httpClient.Jar.SetCookies(ctx.Request.URL(), cookies)
}
}
// perform the request
_ = ctx.LoadResponse(httpClient, true)
if !p.options.DisableCookie {
// retrieve the updated cookies from the native http client and inject them into the shared cookie jar
// keeps existing one if not present
if cookies := httpClient.Jar.Cookies(ctx.Request.URL()); len(cookies) > 0 {
p.ctx.CookieJar.SetCookies(ctx.Request.URL(), cookies)
}
}
for _, rule := range p.rules {
if rule.Part != "response" {
continue
}
switch rule.Action {
case ActionAddHeader:
ctx.Response.Headers().Add(rule.Args["key"], rule.Args["value"])
case ActionSetHeader:
ctx.Response.Headers().Set(rule.Args["key"], rule.Args["value"])
case ActionDeleteHeader:
ctx.Response.Headers().Del(rule.Args["key"])
case ActionSetBody:
body := rule.Args["body"]
ctx.Response.Headers().Set("Content-Length", fmt.Sprintf("%d", len(body)))
ctx.Response.SetBody(rule.Args["body"])
}
}
// store history
req := ctx.Request.Req()
var rawReq string
if raw, err := httputil.DumpRequestOut(req, true); err == nil {
rawReq = string(raw)
}
// attempts to rebuild the response
var rawResp strings.Builder
respPayloads := ctx.Response.Payload()
if respPayloads != nil {
rawResp.WriteString(fmt.Sprintf("HTTP/1.1 %d %s\n", respPayloads.ResponseCode, respPayloads.ResponsePhrase))
for _, header := range respPayloads.ResponseHeaders {
rawResp.WriteString(header.Name + ": " + header.Value + "\n")
}
rawResp.WriteString("\n")
rawResp.WriteString(ctx.Response.Body())
}
// dump request
historyData := HistoryData{
RawRequest: rawReq,
RawResponse: rawResp.String(),
}
p.addToHistory(historyData)
}
}
// routingRuleHandlerNative handles native proxy rule
func (p *Page) routingRuleHandlerNative(e *proto.FetchRequestPaused) error {
// ValidateNFailRequest validates if Local file access is enabled
// and local network access is enables if not it will fail the request
// that don't match the rules
Remove singletons from Nuclei engine (continuation of #6210) (#6296) * introducing execution id * wip * . * adding separate execution context id * lint * vet * fixing pg dialers * test ignore * fixing loader FD limit * test * fd fix * wip: remove CloseProcesses() from dev merge * wip: fix merge issue * protocolstate: stop memguarding on last dialer delete * avoid data race in dialers.RawHTTPClient * use shared logger and avoid race conditions * use shared logger and avoid race conditions * go mod * patch executionId into compiled template cache * clean up comment in Parse * go mod update * bump echarts * address merge issues * fix use of gologger * switch cmd/nuclei to options.Logger * address merge issues with go.mod * go vet: address copy of lock with new Copy function * fixing tests * disable speed control * fix nil ExecuterOptions * removing deprecated code * fixing result print * default logger * cli default logger * filter warning from results * fix performance test * hardcoding path * disable upload * refactor(runner): uses `Warning` instead of `Print` for `pdcpUploadErrMsg` Signed-off-by: Dwi Siswanto <git@dw1.io> * Revert "disable upload" This reverts commit 114fbe6663361bf41cf8b2645fd2d57083d53682. * Revert "hardcoding path" This reverts commit cf12ca800e0a0e974bd9fd4826a24e51547f7c00. --------- Signed-off-by: Dwi Siswanto <git@dw1.io> Co-authored-by: Mzack9999 <mzack9999@protonmail.com> Co-authored-by: Dwi Siswanto <git@dw1.io> Co-authored-by: Dwi Siswanto <25837540+dwisiswant0@users.noreply.github.com>
2025-07-09 14:47:26 -05:00
if err := protocolstate.ValidateNFailRequest(p.options.Options, p.page, e); err != nil {
return err
}
body, _ := FetchGetResponseBody(p.page, e)
headers := make(map[string][]string)
for _, h := range e.ResponseHeaders {
headers[h.Name] = []string{h.Value}
}
var statusCode int
if e.ResponseStatusCode != nil {
statusCode = *e.ResponseStatusCode
}
// attempts to rebuild request
var rawReq strings.Builder
rawReq.WriteString(fmt.Sprintf("%s %s %s\n", e.Request.Method, e.Request.URL, "HTTP/1.1"))
for _, header := range e.Request.Headers {
rawReq.WriteString(fmt.Sprintf("%s\n", header.String()))
}
if e.Request.HasPostData {
rawReq.WriteString(fmt.Sprintf("\n%s\n", e.Request.PostData))
}
// attempts to rebuild the response
var rawResp strings.Builder
rawResp.WriteString(fmt.Sprintf("HTTP/1.1 %d %s\n", statusCode, e.ResponseStatusText))
for _, header := range e.ResponseHeaders {
rawResp.WriteString(header.Name + ": " + header.Value + "\n")
}
rawResp.WriteString("\n")
rawResp.Write(body)
// dump request
historyData := HistoryData{
RawRequest: rawReq.String(),
RawResponse: rawResp.String(),
}
p.addToHistory(historyData)
return FetchContinueRequest(p.page, e)
}