mirror of
https://github.com/zedeus/nitter.git
synced 2024-12-22 23:25:35 +00:00
Use frosty instead of msgpack, compress everything
This commit is contained in:
parent
a8553db66e
commit
908da72ba9
|
@ -19,9 +19,9 @@ requires "sass"
|
||||||
requires "markdown#head"
|
requires "markdown#head"
|
||||||
requires "https://github.com/zedeus/redis#head"
|
requires "https://github.com/zedeus/redis#head"
|
||||||
requires "redpool#head"
|
requires "redpool#head"
|
||||||
requires "msgpack4nim >= 0.3.1"
|
|
||||||
requires "packedjson"
|
requires "packedjson"
|
||||||
requires "snappy#head"
|
requires "snappy#head"
|
||||||
|
requires "https://github.com/disruptek/frosty#0.0.6"
|
||||||
|
|
||||||
|
|
||||||
# Tasks
|
# Tasks
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import asyncdispatch, times, strutils, tables
|
import asyncdispatch, times, strutils, tables
|
||||||
import redis, redpool, msgpack4nim
|
import redis, redpool, frosty, snappy
|
||||||
export redpool, msgpack4nim
|
|
||||||
|
|
||||||
import types, api
|
import types, api
|
||||||
|
|
||||||
|
@ -16,20 +15,26 @@ proc setCacheTimes*(cfg: Config) =
|
||||||
rssCacheTime = cfg.rssCacheTime * 60
|
rssCacheTime = cfg.rssCacheTime * 60
|
||||||
listCacheTime = cfg.listCacheTime * 60
|
listCacheTime = cfg.listCacheTime * 60
|
||||||
|
|
||||||
|
proc migrate*(key, match: string) {.async.} =
|
||||||
|
pool.withAcquire(r):
|
||||||
|
let hasKey = await r.get(key)
|
||||||
|
if hasKey == redisNil:
|
||||||
|
let list = await r.scan(newCursor(0), match, 100000)
|
||||||
|
r.startPipelining()
|
||||||
|
for item in list:
|
||||||
|
if item != "p:":
|
||||||
|
discard await r.del(item)
|
||||||
|
await r.setk(key, "true")
|
||||||
|
discard await r.flushPipeline()
|
||||||
|
|
||||||
proc initRedisPool*(cfg: Config) {.async.} =
|
proc initRedisPool*(cfg: Config) {.async.} =
|
||||||
try:
|
try:
|
||||||
pool = await newRedisPool(cfg.redisConns, maxConns=cfg.redisMaxConns,
|
pool = await newRedisPool(cfg.redisConns, maxConns=cfg.redisMaxConns,
|
||||||
host=cfg.redisHost, port=cfg.redisPort)
|
host=cfg.redisHost, port=cfg.redisPort)
|
||||||
|
|
||||||
pool.withAcquire(r):
|
await migrate("snappyRss", "rss:*")
|
||||||
let snappyRss = await r.get("snappyRss")
|
await migrate("frosty", "*")
|
||||||
if snappyRss == redisNil:
|
|
||||||
let list = await r.scan(newCursor(0), "rss:*", 10000)
|
|
||||||
r.startPipelining()
|
|
||||||
for rss in list:
|
|
||||||
discard await r.del(rss)
|
|
||||||
discard await r.flushPipeline()
|
|
||||||
await r.setk("snappyRss", "true")
|
|
||||||
except OSError:
|
except OSError:
|
||||||
echo "Failed to connect to Redis."
|
echo "Failed to connect to Redis."
|
||||||
quit(1)
|
quit(1)
|
||||||
|
@ -37,12 +42,6 @@ proc initRedisPool*(cfg: Config) {.async.} =
|
||||||
template toKey(p: Profile): string = "p:" & toLower(p.username)
|
template toKey(p: Profile): string = "p:" & toLower(p.username)
|
||||||
template toKey(l: List): string = toLower("l:" & l.username & '/' & l.name)
|
template toKey(l: List): string = toLower("l:" & l.username & '/' & l.name)
|
||||||
|
|
||||||
template to(s: string; typ: typedesc): untyped =
|
|
||||||
var res: typ
|
|
||||||
if s.len > 0:
|
|
||||||
s.unpack(res)
|
|
||||||
res
|
|
||||||
|
|
||||||
proc get(query: string): Future[string] {.async.} =
|
proc get(query: string): Future[string] {.async.} =
|
||||||
pool.withAcquire(r):
|
pool.withAcquire(r):
|
||||||
result = await r.get(query)
|
result = await r.get(query)
|
||||||
|
@ -52,16 +51,16 @@ proc setex(key: string; time: int; data: string) {.async.} =
|
||||||
discard await r.setex(key, time, data)
|
discard await r.setex(key, time, data)
|
||||||
|
|
||||||
proc cache*(data: List) {.async.} =
|
proc cache*(data: List) {.async.} =
|
||||||
await setex(data.toKey, listCacheTime, data.pack)
|
await setex(data.toKey, listCacheTime, compress(freeze(data)))
|
||||||
|
|
||||||
proc cache*(data: PhotoRail; id: string) {.async.} =
|
proc cache*(data: PhotoRail; id: string) {.async.} =
|
||||||
await setex("pr:" & id, baseCacheTime, data.pack)
|
await setex("pr:" & id, baseCacheTime, compress(freeze(data)))
|
||||||
|
|
||||||
proc cache*(data: Profile) {.async.} =
|
proc cache*(data: Profile) {.async.} =
|
||||||
if data.username.len == 0: return
|
if data.username.len == 0: return
|
||||||
pool.withAcquire(r):
|
pool.withAcquire(r):
|
||||||
r.startPipelining()
|
r.startPipelining()
|
||||||
discard await r.setex(data.toKey, baseCacheTime, pack(data))
|
discard await r.setex(data.toKey, baseCacheTime, compress(freeze(data)))
|
||||||
discard await r.hset("p:", toLower(data.username), data.id)
|
discard await r.hset("p:", toLower(data.username), data.id)
|
||||||
discard await r.flushPipeline()
|
discard await r.flushPipeline()
|
||||||
|
|
||||||
|
@ -88,7 +87,7 @@ proc getProfileId*(username: string): Future[string] {.async.} =
|
||||||
proc getCachedProfile*(username: string; fetch=true): Future[Profile] {.async.} =
|
proc getCachedProfile*(username: string; fetch=true): Future[Profile] {.async.} =
|
||||||
let prof = await get("p:" & toLower(username))
|
let prof = await get("p:" & toLower(username))
|
||||||
if prof != redisNil:
|
if prof != redisNil:
|
||||||
result = prof.to(Profile)
|
uncompress(prof).thaw(result)
|
||||||
elif fetch:
|
elif fetch:
|
||||||
result = await getProfile(username)
|
result = await getProfile(username)
|
||||||
|
|
||||||
|
@ -96,7 +95,7 @@ proc getCachedPhotoRail*(id: string): Future[PhotoRail] {.async.} =
|
||||||
if id.len == 0: return
|
if id.len == 0: return
|
||||||
let rail = await get("pr:" & toLower(id))
|
let rail = await get("pr:" & toLower(id))
|
||||||
if rail != redisNil:
|
if rail != redisNil:
|
||||||
result = rail.to(PhotoRail)
|
uncompress(rail).thaw(result)
|
||||||
else:
|
else:
|
||||||
result = await getPhotoRail(id)
|
result = await getPhotoRail(id)
|
||||||
await cache(result, id)
|
await cache(result, id)
|
||||||
|
@ -106,7 +105,7 @@ proc getCachedList*(username=""; name=""; id=""): Future[List] {.async.} =
|
||||||
else: await get(toLower("l:" & username & '/' & name))
|
else: await get(toLower("l:" & username & '/' & name))
|
||||||
|
|
||||||
if list != redisNil:
|
if list != redisNil:
|
||||||
result = list.to(List)
|
uncompress(list).thaw(result)
|
||||||
else:
|
else:
|
||||||
if id.len > 0:
|
if id.len > 0:
|
||||||
result = await getGraphListById(id)
|
result = await getGraphListById(id)
|
||||||
|
|
Loading…
Reference in a new issue