Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,5 @@ jobs:
with:
args: sh -c "go get -t -v ./...;
gofmt -w -s . && git diff --exit-code;
go tool vet .;
go vet .;
go test -v -race ./..."
46 changes: 41 additions & 5 deletions httpcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func varyMatches(cachedResp *http.Response, req *http.Request) bool {
// If there is a stale Response, then any validators it contains will be set on the new request
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
// will be returned.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { // skipcq: GO-R1005
cacheKey := cacheKey(req)
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
var cachedResp *http.Response
Expand Down Expand Up @@ -291,7 +291,7 @@ var clock timer = &realClock{}
//
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
// signficant. Similarly, smax-age isn't used.
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { // skipcq: GO-R1005
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
if _, ok := reqCacheControl["no-cache"]; ok {
Expand Down Expand Up @@ -435,7 +435,7 @@ func getEndToEndHeaders(respHeaders http.Header) []string {
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
}
}
endToEndHeaders := []string{}
var endToEndHeaders []string
for respHeader := range respHeaders {
if _, ok := hopByHopHeaders[respHeader]; !ok {
endToEndHeaders = append(endToEndHeaders, respHeader)
Expand Down Expand Up @@ -527,22 +527,58 @@ type cachingReadCloser struct {
OnEOF func(io.Reader)

buf bytes.Buffer // buf stores a copy of the content of R.

cached bool
readed bool
}

// Read reads the next len(p) bytes from R or until R is drained. The
// return value n is the number of bytes read. If R has no data to
// return, err is io.EOF and OnEOF is called with a full copy of what
// has been read so far.
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
r.readed = true
n, err = r.R.Read(p)
r.buf.Write(p[:n])
if err == io.EOF || n < len(p) {
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
// we only get an io.EOF if we have a Content-Length (event with
// Transfer-Encoding: chunked we might not get an EOF error marking
// the end). Also inn the very weird case that
// none of those are provided, we can only know that we have
// readed the content, because something was been read and
// close was called.
if err == io.EOF {
r.cacheIt()
}
return n, err
}

func (r *cachingReadCloser) cacheIt() {
if r.cached {
return
}
r.cached = true
if !r.readed {
// if there was no attempt to read the body, we assume
// is not used.
return
}
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
}

func (r *cachingReadCloser) Close() error {
// it might happen that when no 'Content-Length' is provided,
// and no 'Transfer-Encoding: chunked' is set, that we do not
// know when the body is fully read. For example, a json decoder
// will read a body until the end of a valid block of json (and
// would not keep reading beyond that): so a body of `{"k":"v"}foo`
// with extra characters, would not be fully read, never reaching
// the EOF, so would not be cached. However, since the connection
// is closed at some point, we can assume that the readed values
// were the good ones for the response.
//
// The problem would be if we had not read anything from the respose
// or we had read a partial response :/
r.cacheIt()
return r.R.Close()
}

Expand Down
Loading