Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions cache/redis_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ const statsTimeout = 500 * time.Millisecond
// result from redis in a temporary files before sending it to the http response
const minTTLForRedisStreamingReader = 15 * time.Second

// tmpDir temporary path to store ongoing queries results
const tmpDir = "/tmp"
const redisTmpFilePrefix = "chproxyRedisTmp"

func newRedisCache(client redis.UniversalClient, cfg config.Cache) *redisCache {
Expand Down Expand Up @@ -155,7 +153,7 @@ func (r *redisCache) readResultsAboveLimit(offset int, stringKey string, metadat
// nb: it would be better to retry the flow if such a failure happened but this requires a huge refactoring of proxy.go

if ttl <= minTTLForRedisStreamingReader {
fileStream, err := newFileWriterReader(tmpDir)
fileStream, err := newFileWriterReader(os.TempDir())
if err != nil {
return nil, err
}
Expand Down
6 changes: 3 additions & 3 deletions cache/redis_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ func TestSmallTTLOnBigPayloadAreCacheWithFile(t *testing.T) {

//simulate a value almost expired
redis.SetTTL(key.String(), 2*time.Second)
nbFileCacheBeforeGet, err := countFilesWithPrefix(tmpDir, redisTmpFilePrefix)
nbFileCacheBeforeGet, err := countFilesWithPrefix(os.TempDir(), redisTmpFilePrefix)
if err != nil {
t.Fatalf("could not read directory %s", err)
}
Expand All @@ -245,7 +245,7 @@ func TestSmallTTLOnBigPayloadAreCacheWithFile(t *testing.T) {
if err != nil {
t.Fatalf("expected cached to have the value")
}
nbFileCacheAfterGet, err := countFilesWithPrefix(tmpDir, redisTmpFilePrefix)
nbFileCacheAfterGet, err := countFilesWithPrefix(os.TempDir(), redisTmpFilePrefix)
if err != nil {
t.Fatalf("could not read directory %s", err)
}
Expand All @@ -261,7 +261,7 @@ func TestSmallTTLOnBigPayloadAreCacheWithFile(t *testing.T) {
t.Fatalf("got a value different than the expected one len(value)=%d vs len(expectedValue)=%d", len(string(cachedValue)), len(expectedValue))
}
cachedData.Data.Close()
nbFileCacheAfterClose, err := countFilesWithPrefix(tmpDir, redisTmpFilePrefix)
nbFileCacheAfterClose, err := countFilesWithPrefix(os.TempDir(), redisTmpFilePrefix)
if err != nil {
t.Fatalf("could not read directory %s", err)
}
Expand Down
6 changes: 2 additions & 4 deletions proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"sync"
Expand All @@ -22,9 +23,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
)

// tmpDir temporary path to store ongoing queries results
const tmpDir = "/tmp"

// failedTransactionPrefix prefix added to the failed reason for concurrent queries registry
const failedTransactionPrefix = "[concurrent query failed]"

Expand Down Expand Up @@ -417,7 +415,7 @@ func (rp *reverseProxy) serveFromCache(s *scope, srw *statResponseWriter, req *h

// The response wasn't found in the cache.
// Request it from clickhouse.
tmpFileRespWriter, err := cache.NewTmpFileResponseWriter(srw, tmpDir)
tmpFileRespWriter, err := cache.NewTmpFileResponseWriter(srw, os.TempDir())
if err != nil {
err = fmt.Errorf("%s: %w; query: %q", s, err, q)
respondWith(srw, err, http.StatusInternalServerError)
Expand Down