replace zxq.co/ripple/hanayo

This commit is contained in:
Alicia
2019-02-23 13:29:15 +00:00
commit c3d206c173
5871 changed files with 1353715 additions and 0 deletions

3
vendor/github.com/osuripple/cheesegull/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,3 @@
/cheesegull
/data/*
!/data/.gitkeep

7
vendor/github.com/osuripple/cheesegull/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,7 @@
language: go
go:
- 1.9
script:
- go build -v
after_success:
- test -n "$TRAVIS_TAG" && curl -sL https://git.io/goreleaser | bash

57
vendor/github.com/osuripple/cheesegull/Gopkg.lock generated vendored Normal file
View File

@@ -0,0 +1,57 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/alecthomas/kingpin"
packages = ["."]
revision = "1087e65c9441605df944fb12c33f0fe7072d18ca"
version = "v2.2.5"
[[projects]]
branch = "master"
name = "github.com/alecthomas/template"
packages = [".","parse"]
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
[[projects]]
branch = "master"
name = "github.com/alecthomas/units"
packages = ["."]
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
[[projects]]
name = "github.com/certifi/gocertifi"
packages = ["."]
revision = "3fd9e1adb12b72d2f3f82191d49be9b93c69f67c"
version = "2017.07.27"
[[projects]]
branch = "master"
name = "github.com/getsentry/raven-go"
packages = ["."]
revision = "1452f6376ddb15c546b6d7567e1d9518765391b5"
[[projects]]
name = "github.com/go-sql-driver/mysql"
packages = ["."]
revision = "a0583e0143b1624142adab07e0e97fe106d99561"
version = "v1.3"
[[projects]]
name = "github.com/julienschmidt/httprouter"
packages = ["."]
revision = "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
version = "v1.1"
[[projects]]
name = "github.com/thehowl/go-osuapi"
packages = ["."]
revision = "b918f5da725805d82655d6c402eb3fc0517c072f"
version = "1.1.2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "783272a0d9e62c2046fb5a2b5a02f3f58f307a7a2d9be21ff74b3627281f1c89"
solver-name = "gps-cdcl"
solver-version = 1

42
vendor/github.com/osuripple/cheesegull/Gopkg.toml generated vendored Normal file
View File

@@ -0,0 +1,42 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/alecthomas/kingpin"
version = "2.2.5"
[[constraint]]
name = "github.com/go-sql-driver/mysql"
version = "1.3.0"
[[constraint]]
name = "github.com/thehowl/go-osuapi"
version = "1.1.0"
[[constraint]]
name = "github.com/julienschmidt/httprouter"
version = "1.1.0"
[[constraint]]
branch = "master"
name = "github.com/getsentry/raven-go"

18
vendor/github.com/osuripple/cheesegull/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,18 @@
Copyright (c) 2016-2017 Morgan Bazalgette
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

77
vendor/github.com/osuripple/cheesegull/README.md generated vendored Normal file
View File

@@ -0,0 +1,77 @@
<p align="center"><img src="https://y.zxq.co/jobeei.png"></p>
# CheeseGull [![Build Status](https://travis-ci.org/osuripple/cheesegull.svg?branch=master)](https://travis-ci.org/osuripple/cheesegull)
CheeseGull creates an unofficial "slave" database of the official osu! beatmap
database, trying to keep every beatmap up to date as much as possible, as well
as a cache middleman for osz files.
The main purpose for this is, as you can see from the owner of the repository,
for running a sort of "beatmap mirror" for Ripple's osu! direct. We originally
used an actual osu! beatmap mirror which had all of the beatmaps on osu!
downloaded, but it ended up taking wayyyy too much space, and the cheapest
server we could find that had at least 2 TB of HDD had an upload speed of
30mbit - as you can guess, this meant that for the Ripple users who didn't have
a third world connection the download speed was pretty poor.
CheeseGull tries to hold a replica of the osu! database as updated as possible.
Though of course, not having any way to see the latest updated beatmaps, or have
a system for subscribing to updates to beatmap, or anything else which could
help us identify what has been updated recently makes it very hard to do keep
an updated copy at all times (Takeaway: the osu! API is completely shit). In
order to do this, CheeseGull updates WIP, Pending or Qualified beatmaps when
at least 30 minutes have passed since the time they were checked, whereas for
all other beatmaps (including Graveyard, Ranked, Approved, etc) at least 4 days
must have passed. This is not a problem for ranked/approved (it's highly
unlikely for a ranked beatmap to ever change state, and Graveyard beatmaps
are rarely resurrected, so there's that).
Beatmap downloads are also provided by going at `/d/<id>`. In case the beatmap
is not stored in the local cache, the beatmap will be downloaded on-the-fly
(this assumes the machine's internet connection is fast enough to download a
beatmap before a HTTP timeout happens). In case the beatmap already is in the
cache, then well, as you can imagine, it is served straight from there. Oh, yes,
multiple people downloading a not cached beatmap at the same time is a case we
handle. Or should be able to handle, at least.
## [API docs](http://docs.ripple.moe/docs/cheesegull/cheesegull-api)
## Getting Started
You can find binaries of the latest release
[here.](https://github.com/osuripple/cheesegull/releases/latest)
If you want to compile from source, if you have Go installed it should only be
a `go get github.com/osuripple/cheesegull` away.
The only requirements at the moment are a MySQL server and an osu! account.
Check out `cheesegull --help` to see how you can set them up for cheesegull to
work properly.
## Contributing
No strict contribution guide at the moment. Just fork, clone, then
```sh
git checkout -b your-new-feature
code . # make changes
git add .
git commit -m "Added thing"
git push origin your-new-feature
```
Go to the GitHub website, and create a pull request.
## Sphinx set-up
If you want to test search using Sphinx, you will need to set it up.
[Here is the sphinx.conf used in production, you probably only need to change lines 23-35](https://gist.github.com/thehowl/3dc046e2a0ab93fa1ffe5f0eca085905)
(No, we're not using ElasticSearch. Search is meant to be fast and not take too
much memory. Any Java solution can thus be tossed away since it does not suit
these basic two requirements.)
## License
Seeing as this project is not meant exclusively for usage by Ripple, the license,
unlike most other Ripple projects, is [MIT](LICENSE).

144
vendor/github.com/osuripple/cheesegull/api/api.go generated vendored Normal file
View File

@@ -0,0 +1,144 @@
// Package api contains the general framework for writing handlers in the
// CheeseGull API.
package api
import (
"database/sql"
"encoding/json"
"errors"
"log"
"net/http"
"os"
"runtime/debug"
"time"
raven "github.com/getsentry/raven-go"
"github.com/julienschmidt/httprouter"
"github.com/osuripple/cheesegull/downloader"
"github.com/osuripple/cheesegull/housekeeper"
)
// Context is the information that is passed to all request handlers in relation
// to the request, and how to answer it.
type Context struct {
Request *http.Request
DB *sql.DB
SearchDB *sql.DB
House *housekeeper.House
DLClient *downloader.Client
writer http.ResponseWriter
params httprouter.Params
}
// Write writes content to the response body.
func (c *Context) Write(b []byte) (int, error) {
return c.writer.Write(b)
}
// ReadHeader reads a header from the request.
func (c *Context) ReadHeader(s string) string {
return c.Request.Header.Get(s)
}
// WriteHeader sets a header in the response.
func (c *Context) WriteHeader(key, value string) {
c.writer.Header().Set(key, value)
}
// Code sets the response's code.
func (c *Context) Code(i int) {
c.writer.WriteHeader(i)
}
// Param retrieves a parameter in the URL's path.
func (c *Context) Param(s string) string {
return c.params.ByName(s)
}
// WriteJSON writes JSON to the response.
func (c *Context) WriteJSON(code int, v interface{}) error {
c.WriteHeader("Content-Type", "application/json; charset=utf-8")
c.Code(code)
return json.NewEncoder(c.writer).Encode(v)
}
var envSentryDSN = os.Getenv("SENTRY_DSN")
// Err attempts to log an error to Sentry, as well as stdout.
func (c *Context) Err(err error) {
if err == nil {
return
}
if envSentryDSN != "" {
raven.CaptureError(err, nil, raven.NewHttp(c.Request))
}
log.Println(err)
}
type handlerPath struct {
method, path string
f func(c *Context)
}
var handlers []handlerPath
// GET registers a handler for a GET request.
func GET(path string, f func(c *Context)) {
handlers = append(handlers, handlerPath{"GET", path, f})
}
// POST registers a handler for a POST request.
func POST(path string, f func(c *Context)) {
handlers = append(handlers, handlerPath{"POST", path, f})
}
// CreateHandler creates a new http.Handler using the handlers registered
// through GET and POST.
func CreateHandler(db, searchDB *sql.DB, house *housekeeper.House, dlc *downloader.Client) http.Handler {
r := httprouter.New()
for _, h := range handlers {
// Create local copy that we know won't change as the loop proceeds.
h := h
r.Handle(h.method, h.path, func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
start := time.Now()
ctx := &Context{
Request: r,
DB: db,
SearchDB: searchDB,
House: house,
DLClient: dlc,
writer: w,
params: p,
}
defer func() {
err := recover()
if err == nil {
return
}
switch err := err.(type) {
case error:
ctx.Err(err)
case stringer:
ctx.Err(errors.New(err.String()))
case string:
ctx.Err(errors.New(err))
default:
log.Println("PANIC", err)
}
debug.PrintStack()
}()
h.f(ctx)
log.Printf("[R] %-10s %-4s %s\n",
time.Since(start).String(),
r.Method,
r.URL.Path,
)
})
}
return r
}
type stringer interface {
String() string
}

View File

@@ -0,0 +1,133 @@
// Package download handles the API call to download an osu! beatmap set.
package download
import (
"fmt"
"io"
"log"
"strconv"
"time"
"github.com/osuripple/cheesegull/api"
"github.com/osuripple/cheesegull/downloader"
"github.com/osuripple/cheesegull/housekeeper"
"github.com/osuripple/cheesegull/models"
)
func errorMessage(c *api.Context, code int, err string) {
c.WriteHeader("Content-Type", "text/plain; charset=utf-8")
c.Code(code)
c.Write([]byte(err))
}
func existsQueryKey(c *api.Context, s string) bool {
_, ok := c.Request.URL.Query()[s]
return ok
}
// Download is the handler for a request to download a beatmap
func Download(c *api.Context) {
// get the beatmap ID
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
errorMessage(c, 400, "Malformed ID")
return
}
// fetch beatmap set and make sure it exists.
set, err := models.FetchSet(c.DB, id, false)
if err != nil {
c.Err(err)
errorMessage(c, 500, "Could not fetch set")
return
}
if set == nil {
errorMessage(c, 404, "Set not found")
return
}
// use novideo only when we are requested to get a beatmap having a video
// and novideo is in the request
noVideo := set.HasVideo && existsQueryKey(c, "novideo")
cbm, shouldDownload := c.House.AcquireBeatmap(&housekeeper.CachedBeatmap{
ID: id,
NoVideo: noVideo,
LastUpdate: set.LastUpdate,
})
if shouldDownload {
err := downloadBeatmap(c.DLClient, cbm, c.House)
if err != nil {
c.Err(err)
errorMessage(c, 500, "Internal error")
return
}
} else {
cbm.MustBeDownloaded()
}
cbm.SetLastRequested(time.Now())
if cbm.FileSize() == 0 {
errorMessage(c, 504, "The beatmap could not be downloaded (probably got deleted from the osu! website)")
return
}
f, err := cbm.File()
if err != nil {
c.Err(err)
errorMessage(c, 500, "Internal error")
return
}
defer f.Close()
c.WriteHeader("Content-Type", "application/octet-stream")
c.WriteHeader("Content-Disposition", fmt.Sprintf("attachment; filename=%q", fmt.Sprintf("%d %s - %s.osz", set.ID, set.Artist, set.Title)))
c.WriteHeader("Content-Length", strconv.FormatUint(uint64(cbm.FileSize()), 10))
c.Code(200)
_, err = io.Copy(c, f)
if err != nil {
c.Err(err)
}
}
func downloadBeatmap(c *downloader.Client, b *housekeeper.CachedBeatmap, house *housekeeper.House) error {
log.Println("[⬇️]", b.String())
var fileSize uint64
defer func() {
// We need to wrap this inside a function because this way the arguments
// to DownloadCompleted are actually evaluated during the defer call.
b.DownloadCompleted(fileSize, house)
}()
// Start downloading.
r, err := c.Download(b.ID, b.NoVideo)
if err != nil {
if err == downloader.ErrNoRedirect {
return nil
}
return err
}
defer r.Close()
// open the file we will write the beatmap into
f, err := b.CreateFile()
if err != nil {
return err
}
defer f.Close()
fSizeRaw, err := io.Copy(f, r)
fileSize = uint64(fSizeRaw)
if err != nil {
return err
}
return nil
}
func init() {
api.GET("/d/:id", Download)
}

21
vendor/github.com/osuripple/cheesegull/api/index.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
package api
import (
"expvar"
)
func index(c *Context) {
c.WriteHeader("Content-Type", "text/plain; charset=utf-8")
c.Write([]byte("CheeseGull v2.x Woo\nFor more information: https://github.com/osuripple/cheesegull"))
}
var _evh = expvar.Handler()
func expvarHandler(c *Context) {
_evh.ServeHTTP(c.writer, c.Request)
}
func init() {
GET("/", index)
GET("/expvar", expvarHandler)
}

View File

@@ -0,0 +1,121 @@
// Package metadata handles API request that search for metadata regarding osu!
// beatmaps.
package metadata
import (
"strconv"
"strings"
"github.com/osuripple/cheesegull/api"
"github.com/osuripple/cheesegull/models"
)
// Beatmap handles requests to retrieve single beatmaps.
func Beatmap(c *api.Context) {
id, _ := strconv.Atoi(strings.TrimSuffix(c.Param("id"), ".json"))
if id == 0 {
c.WriteJSON(404, nil)
return
}
bms, err := models.FetchBeatmaps(c.DB, id)
if err != nil {
c.Err(err)
c.WriteJSON(500, nil)
return
}
if len(bms) == 0 {
c.WriteJSON(404, nil)
return
}
c.WriteJSON(200, bms[0])
}
// Set handles requests to retrieve single beatmap sets.
func Set(c *api.Context) {
id, _ := strconv.Atoi(strings.TrimSuffix(c.Param("id"), ".json"))
if id == 0 {
c.WriteJSON(404, nil)
return
}
set, err := models.FetchSet(c.DB, id, true)
if err != nil {
c.Err(err)
c.WriteJSON(500, nil)
return
}
if set == nil {
c.WriteJSON(404, nil)
return
}
c.WriteJSON(200, set)
}
func mustInt(s string) int {
i, _ := strconv.Atoi(s)
return i
}
func mustPositive(i int) int {
if i < 0 {
return 0
}
return i
}
func intWithBounds(i, min, max, def int) int {
if i == 0 {
return def
}
if i < min {
return min
}
if i > max {
return max
}
return i
}
func sIntWithBounds(strs []string, min, max int) []int {
sInt := make([]int, 0, len(strs))
for _, s := range strs {
i, err := strconv.Atoi(s)
if err != nil || i < min || i > max {
continue
}
sInt = append(sInt, i)
}
return sInt
}
// Search does a search on the sets available in the database.
func Search(c *api.Context) {
query := c.Request.URL.Query()
sets, err := models.SearchSets(c.DB, c.SearchDB, models.SearchOptions{
Status: sIntWithBounds(query["status"], -2, 4),
Query: query.Get("query"),
Mode: sIntWithBounds(query["mode"], 0, 3),
Amount: intWithBounds(mustInt(query.Get("amount")), 1, 100, 50),
Offset: mustPositive(mustInt(query.Get("offset"))),
})
if err != nil {
c.Err(err)
c.WriteJSON(500, nil)
return
}
c.WriteJSON(200, sets)
}
func init() {
api.GET("/api/b/:id", Beatmap)
api.GET("/b/:id", Beatmap)
api.GET("/api/s/:id", Set)
api.GET("/s/:id", Set)
api.GET("/api/search", Search)
}

103
vendor/github.com/osuripple/cheesegull/cheesegull.go generated vendored Normal file
View File

@@ -0,0 +1,103 @@
package main
import (
"database/sql"
"fmt"
"net/http"
"os"
"strings"
"time"
"github.com/alecthomas/kingpin"
_ "github.com/go-sql-driver/mysql"
osuapi "github.com/thehowl/go-osuapi"
"github.com/osuripple/cheesegull/api"
"github.com/osuripple/cheesegull/dbmirror"
"github.com/osuripple/cheesegull/downloader"
"github.com/osuripple/cheesegull/housekeeper"
"github.com/osuripple/cheesegull/models"
// Components of the API we want to use
_ "github.com/osuripple/cheesegull/api/download"
_ "github.com/osuripple/cheesegull/api/metadata"
)
const searchDSNDocs = `"DSN to use for fulltext searches. ` +
`This should be a SphinxQL server. Follow the format of the MySQL DSN. ` +
`This can be the same as MYSQL_DSN, and cheesegull will still run ` +
`successfully, however what happens when search is tried is undefined ` +
`behaviour and you should definetely bother to set it up (follow the README).`
var (
osuAPIKey = kingpin.Flag("api-key", "osu! API key").Short('k').Envar("OSU_API_KEY").String()
osuUsername = kingpin.Flag("osu-username", "osu! username (for downloading and fetching whether a beatmap has a video)").Short('u').Envar("OSU_USERNAME").String()
osuPassword = kingpin.Flag("osu-password", "osu! password (for downloading and fetching whether a beatmap has a video)").Short('p').Envar("OSU_PASSWORD").String()
mysqlDSN = kingpin.Flag("mysql-dsn", "DSN of MySQL").Short('m').Default("root@/cheesegull").Envar("MYSQL_DSN").String()
searchDSN = kingpin.Flag("search-dsn", searchDSNDocs).Default("root@tcp(127.0.0.1:9306)/cheesegull").Envar("SEARCH_DSN").String()
httpAddr = kingpin.Flag("http-addr", "Address on which to take HTTP requests.").Short('a').Default("127.0.0.1:62011").String()
maxDisk = kingpin.Flag("max-disk", "Maximum number of GB used by beatmap cache.").Default("10").Envar("MAXIMUM_DISK").Float64()
)
func addTimeParsing(dsn string) string {
sep := "?"
if strings.Contains(dsn, "?") {
sep = "&"
}
dsn += sep + "parseTime=true&multiStatements=true"
return dsn
}
func main() {
kingpin.Parse()
fmt.Println("CheeseGull", Version)
// set up osuapi client
c := osuapi.NewClient(*osuAPIKey)
// set up downloader
d, err := downloader.LogIn(*osuUsername, *osuPassword)
if err != nil {
fmt.Println("Can't log in into osu!:", err)
os.Exit(1)
}
dbmirror.SetHasVideo(d.HasVideo)
// set up mysql
db, err := sql.Open("mysql", addTimeParsing(*mysqlDSN))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// set up search
db2, err := sql.Open("mysql", *searchDSN)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// set up housekeeper
house := housekeeper.New()
err = house.LoadState()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
house.MaxSize = uint64(float64(1024*1024*1024) * (*maxDisk))
house.StartCleaner()
// run mysql migrations
err = models.RunMigrations(db)
if err != nil {
fmt.Println("Error running migrations", err)
}
// start running components of cheesegull
go dbmirror.StartSetUpdater(c, db)
go dbmirror.DiscoverEvery(c, db, time.Hour*6, time.Second*20)
// create request handler
panic(http.ListenAndServe(*httpAddr, api.CreateHandler(db, db2, house, d)))
}

0
vendor/github.com/osuripple/cheesegull/data/.gitkeep generated vendored Normal file
View File

View File

@@ -0,0 +1,177 @@
// Package dbmirror is a package to create a database which is almost exactly
// the same as osu!'s beatmap database.
package dbmirror
import (
"database/sql"
"log"
"os"
"time"
raven "github.com/getsentry/raven-go"
"github.com/osuripple/cheesegull/models"
osuapi "github.com/thehowl/go-osuapi"
)
const (
// NewBatchEvery is the amount of time that will elapse between one batch
// of requests and another.
NewBatchEvery = time.Minute
// PerBatch is the amount of requests and updates every batch contains.
PerBatch = 100
// SetUpdaterWorkers is the number of goroutines which should take care of
// new batches. Keep in mind that this will be the number of maximum
// concurrent connections to the osu! API.
SetUpdaterWorkers = PerBatch / 20
)
// hasVideo checks whether a beatmap set has a video.
var hasVideo func(set int) (bool, error)
// SetHasVideo sets the hasVideo function to the one passed.
func SetHasVideo(f func(int) (bool, error)) {
if f == nil {
return
}
hasVideo = f
}
func createChildrenBeatmaps(bms []osuapi.Beatmap) []models.Beatmap {
cgBms := make([]models.Beatmap, len(bms))
for idx, bm := range bms {
cgBms[idx] = models.Beatmap{
ID: bm.BeatmapID,
ParentSetID: bm.BeatmapSetID,
DiffName: bm.DiffName,
FileMD5: bm.FileMD5,
Mode: int(bm.Mode),
BPM: bm.BPM,
AR: float32(bm.ApproachRate),
OD: float32(bm.OverallDifficulty),
CS: float32(bm.CircleSize),
HP: float32(bm.HPDrain),
TotalLength: bm.TotalLength,
HitLength: bm.HitLength,
Playcount: bm.Playcount,
Passcount: bm.Passcount,
MaxCombo: bm.MaxCombo,
DifficultyRating: bm.DifficultyRating,
}
}
return cgBms
}
func setFromOsuAPIBeatmap(b osuapi.Beatmap) models.Set {
return models.Set{
ID: b.BeatmapSetID,
RankedStatus: int(b.Approved),
ApprovedDate: time.Time(b.ApprovedDate),
LastUpdate: time.Time(b.LastUpdate),
LastChecked: time.Now(),
Artist: b.Artist,
Title: b.Title,
Creator: b.Creator,
Source: b.Source,
Tags: b.Tags,
Genre: int(b.Genre),
Language: int(b.Language),
Favourites: b.FavouriteCount,
}
}
func updateSet(c *osuapi.Client, db *sql.DB, set models.Set) error {
var (
err error
bms []osuapi.Beatmap
)
for i := 0; i < 5; i++ {
bms, err = c.GetBeatmaps(osuapi.GetBeatmapsOpts{
BeatmapSetID: set.ID,
})
if err == nil {
break
}
if i >= 5 {
return err
}
}
if len(bms) == 0 {
// set has been deleted from osu!, so we do the same thing
return models.DeleteSet(db, set.ID)
}
// create the new set based on the information we can obtain from the
// first beatmap's information
var x = bms[0]
updated := !time.Time(x.LastUpdate).Equal(set.LastUpdate)
set = setFromOsuAPIBeatmap(x)
set.ChildrenBeatmaps = createChildrenBeatmaps(bms)
if updated {
// if it has been updated, video might have been added or removed
// so we need to check for it
set.HasVideo, err = hasVideo(x.BeatmapSetID)
if err != nil {
return err
}
}
return models.CreateSet(db, set)
}
// By making the buffer the same size of the batch, we can be sure that all
// sets from the previous batch will have completed by the time we finish
// pushing all the beatmaps to the queue.
var setQueue = make(chan models.Set, PerBatch)
// setUpdater is a function to be run as a goroutine, that receives sets
// from setQueue and brings the information in the database up-to-date for that
// set.
func setUpdater(c *osuapi.Client, db *sql.DB) {
for set := range setQueue {
err := updateSet(c, db, set)
if err != nil {
logError(err)
}
}
}
// StartSetUpdater does batch updates for the beatmaps in the database,
// employing goroutines to fetch the data from the osu! API and then write it to
// the database.
func StartSetUpdater(c *osuapi.Client, db *sql.DB) {
for i := 0; i < SetUpdaterWorkers; i++ {
go setUpdater(c, db)
}
for {
sets, err := models.FetchSetsForBatchUpdate(db, PerBatch)
if err != nil {
logError(err)
time.Sleep(NewBatchEvery)
continue
}
for _, set := range sets {
setQueue <- set
}
if len(sets) > 0 {
log.Printf("[U] Updating sets, oldest LastChecked %v, newest %v, total length %d",
sets[0].LastChecked,
sets[len(sets)-1].LastChecked,
len(sets),
)
}
time.Sleep(NewBatchEvery)
}
}
var envSentryDSN = os.Getenv("SENTRY_DSN")
// logError attempts to log an error to Sentry, as well as stdout.
func logError(err error) {
if err == nil {
return
}
if envSentryDSN != "" {
raven.CaptureError(err, nil)
}
log.Println(err)
}

View File

@@ -0,0 +1,82 @@
package dbmirror
import (
"database/sql"
"log"
"time"
"github.com/osuripple/cheesegull/models"
osuapi "github.com/thehowl/go-osuapi"
)
// Discover discovers new beatmaps in the osu! database and adds them.
func Discover(c *osuapi.Client, db *sql.DB) error {
id, err := models.BiggestSetID(db)
if err != nil {
return err
}
log.Println("[D] Starting discovery with ID", id)
// failedAttempts is the number of consecutive failed attempts at fetching a
// beatmap (by 'failed', in this case we mean exclusively when a request to
// get_beatmaps returns no beatmaps)
failedAttempts := 0
for failedAttempts < 4096 {
id++
if id%64 == 0 {
log.Println("[D]", id)
}
var (
err error
bms []osuapi.Beatmap
)
for i := 0; i < 5; i++ {
bms, err = c.GetBeatmaps(osuapi.GetBeatmapsOpts{
BeatmapSetID: id,
})
if err == nil {
break
}
if i >= 5 {
return err
}
}
if err != nil {
return err
}
if len(bms) == 0 {
failedAttempts++
continue
}
failedAttempts = 0
set := setFromOsuAPIBeatmap(bms[0])
set.ChildrenBeatmaps = createChildrenBeatmaps(bms)
set.HasVideo, err = hasVideo(bms[0].BeatmapSetID)
if err != nil {
return err
}
err = models.CreateSet(db, set)
if err != nil {
return err
}
}
return nil
}
// DiscoverEvery runs Discover and waits for it to finish. If Discover returns
// an error, then it will wait errorWait before running Discover again. If
// Discover doesn't return any error, then it will wait successWait before
// running Discover again.
func DiscoverEvery(c *osuapi.Client, db *sql.DB, successWait, errorWait time.Duration) {
for {
err := Discover(c, db)
if err == nil {
time.Sleep(successWait)
} else {
logError(err)
time.Sleep(errorWait)
}
}
}

7
vendor/github.com/osuripple/cheesegull/doc.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
// CheeseGull is a webserver that functions as a cache middleman between the
// official osu! mirrors and requesters of beatmaps, as well as also a cache
// middleman for beatmaps metadata retrieved from the official osu! API.
package main
// Version is the version of cheesegull.
const Version = "v2.0.4"

View File

@@ -0,0 +1,91 @@
// Package downloader implements downloading from the osu! website, through,
// well, mostly scraping and dirty hacks.
package downloader
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
)
// LogIn logs in into an osu! account and returns a Client.
func LogIn(username, password string) (*Client, error) {
j, err := cookiejar.New(&cookiejar.Options{})
if err != nil {
return nil, err
}
c := &http.Client{
Jar: j,
}
vals := url.Values{}
vals.Add("redirect", "/")
vals.Add("sid", "")
vals.Add("username", username)
vals.Add("password", password)
vals.Add("autologin", "on")
vals.Add("login", "login")
loginResp, err := c.PostForm("https://osu.ppy.sh/forum/ucp.php?mode=login", vals)
if err != nil {
return nil, err
}
if loginResp.Request.URL.Path != "/" {
return nil, errors.New("downloader: Login: could not log in (was not redirected to index)")
}
return (*Client)(c), nil
}
// Client is a wrapper around an http.Client which can fetch beatmaps from the
// osu! website.
type Client http.Client
// HasVideo checks whether a beatmap has a video.
func (c *Client) HasVideo(setID int) (bool, error) {
h := (*http.Client)(c)
page, err := h.Get(fmt.Sprintf("https://osu.ppy.sh/s/%d", setID))
if err != nil {
return false, err
}
defer page.Body.Close()
body, err := ioutil.ReadAll(page.Body)
if err != nil {
return false, err
}
return bytes.Contains(body, []byte(fmt.Sprintf(`href="/d/%dn"`, setID))), nil
}
// Download downloads a beatmap from the osu! website. noVideo specifies whether
// we should request the beatmap to not have the video.
func (c *Client) Download(setID int, noVideo bool) (io.ReadCloser, error) {
suffix := ""
if noVideo {
suffix = "n"
}
return c.getReader(strconv.Itoa(setID) + suffix)
}
// ErrNoRedirect is returned from Download when we were not redirect, thus
// indicating that the beatmap is unavailable.
var ErrNoRedirect = errors.New("no redirect happened, beatmap could not be downloaded")
func (c *Client) getReader(str string) (io.ReadCloser, error) {
h := (*http.Client)(c)
resp, err := h.Get("https://osu.ppy.sh/d/" + str)
if err != nil {
return nil, err
}
if resp.Request.URL.Host == "osu.ppy.sh" {
resp.Body.Close()
return nil, ErrNoRedirect
}
return resp.Body, nil
}

View File

@@ -0,0 +1,76 @@
package downloader
import (
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"os"
"testing"
)
var c *Client
var (
username = os.Getenv("OSU_USERNAME")
password = os.Getenv("OSU_PASSWORD")
)
func TestLogIn(t *testing.T) {
var err error
c, err = LogIn(username, password)
if err != nil {
t.Fatal(err)
}
}
func TestLogInWrongDetails(t *testing.T) {
_, err := LogIn("a", "i")
if err == nil {
t.Fatal("Unexpected non-error when trying to log in with user 'a' and password 'i'")
}
}
func TestDownload(t *testing.T) {
if c == nil {
t.Skip("c is nil")
}
{
vid, novid, err := c.Download(1)
if err != nil {
t.Fatal(err)
}
if novid != nil {
t.Fatal("Returning a video when there's supposed to be no video")
}
md5Test(t, vid, "f40fae62893087e72672b3e6d1468a70")
}
{
vid, novid, err := c.Download(100517)
if err != nil {
t.Fatal(err)
}
if novid == nil {
t.Fatal("Returning no video when there's supposed to be one.")
}
md5Test(t, vid, "500b361f47ff99551dbb9931cdf39ace")
md5Test(t, novid, "3de1e07850e2fe1f21333e4d5b01a350")
}
}
func cleanUp(files ...string) {
for _, f := range files {
os.Remove(f)
}
}
func md5Test(t *testing.T, f io.Reader, expect string) {
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
sum := fmt.Sprintf("%x", md5.Sum(data))
if sum != expect {
t.Fatal("expecting md5 sum to be", expect, "got", sum)
}
}

21
vendor/github.com/osuripple/cheesegull/goreleaser.yml generated vendored Normal file
View File

@@ -0,0 +1,21 @@
build:
goos:
- windows
- darwin
- linux
goarch:
- 386
- amd64
- arm
- arm64
archive:
replacements:
amd64: 64bit
386: 32bit
format_overrides:
- goos: windows
format: zip
snapshot:
name_template: devel
release:
draft: true

View File

@@ -0,0 +1,113 @@
package housekeeper
import (
"encoding"
"errors"
"io"
)
const cachedBeatmapBinSize = 8 + 1 + 15 + 15 + 8
func b2i(b bool) byte {
if b {
return 1
}
return 0
}
func marshalBinaryCopy(dst []byte, t encoding.BinaryMarshaler) {
b, _ := t.MarshalBinary()
copy(dst, b)
}
// copied from binary.BigEndian
func putUint64(b []byte, v uint64) {
_ = b[7] // boundary check
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
func writeBeatmaps(w io.Writer, c []*CachedBeatmap) error {
_, err := w.Write(append([]byte("CGBIN001"), cachedBeatmapBinSize))
if err != nil {
return err
}
for _, b := range c {
if b == nil || !b.isDownloaded {
continue
}
enc := make([]byte, cachedBeatmapBinSize)
putUint64(enc[:8], uint64(b.ID))
enc[8] = b2i(b.NoVideo)
marshalBinaryCopy(enc[9:24], b.LastUpdate)
marshalBinaryCopy(enc[24:39], b.lastRequested)
putUint64(enc[39:47], b.fileSize)
_, err := w.Write(enc)
if err != nil {
return err
}
}
return nil
}
// copied from binary.BigEndian
func readUint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func readCachedBeatmap(b []byte) *CachedBeatmap {
m := &CachedBeatmap{}
m.ID = int(readUint64(b[:8]))
m.NoVideo = b[8] == 1
(&m.LastUpdate).UnmarshalBinary(b[9:24])
(&m.lastRequested).UnmarshalBinary(b[24:39])
m.fileSize = readUint64(b[39:47])
m.isDownloaded = true
return m
}
func readBeatmaps(r io.Reader) ([]*CachedBeatmap, error) {
b := make([]byte, 8)
_, err := r.Read(b)
if err != nil {
return nil, err
}
if string(b) != "CGBIN001" {
return nil, errors.New("unknown cgbin version")
}
b = make([]byte, 1)
_, err = r.Read(b)
if err != nil {
return nil, err
}
bmLength := b[0]
if bmLength == 0 {
return nil, nil
}
b = make([]byte, bmLength)
beatmaps := make([]*CachedBeatmap, 0, 50)
for {
read, err := r.Read(b)
switch {
case err == io.EOF:
return beatmaps, nil
case err != nil:
return nil, err
case byte(read) != bmLength:
return beatmaps, nil
}
beatmaps = append(beatmaps, readCachedBeatmap(b))
}
}

View File

@@ -0,0 +1,79 @@
package housekeeper
import (
"bytes"
"reflect"
"testing"
"time"
)
var testBeatmaps = []*CachedBeatmap{
&CachedBeatmap{
isDownloaded: true,
},
&CachedBeatmap{
ID: 851,
NoVideo: true,
isDownloaded: true,
},
&CachedBeatmap{
ID: 1337777,
fileSize: 58111,
isDownloaded: true,
},
&CachedBeatmap{
ID: 851,
LastUpdate: time.Date(2017, 9, 21, 11, 11, 50, 0, time.UTC),
lastRequested: time.Date(2017, 9, 21, 22, 11, 50, 0, time.UTC),
isDownloaded: true,
},
}
func TestEncodeDecode(t *testing.T) {
buf := &bytes.Buffer{}
start := time.Now()
err := writeBeatmaps(buf, testBeatmaps)
if err != nil {
t.Fatal(err)
}
t.Logf("Write took %v", time.Since(start))
start = time.Now()
readBMs, err := readBeatmaps(buf)
if err != nil {
t.Fatal(err)
}
t.Logf("Read took %v", time.Since(start))
if !reflect.DeepEqual(readBMs, testBeatmaps) {
t.Fatalf("original %v read %v", testBeatmaps, readBMs)
}
}
func BenchmarkWriteBinaryState(b *testing.B) {
for i := 0; i < b.N; i++ {
writeBeatmaps(fakeWriter{}, testBeatmaps)
}
}
func BenchmarkReadBinaryState(b *testing.B) {
buf := &bytes.Buffer{}
err := writeBeatmaps(buf, testBeatmaps)
if err != nil {
b.Fatal(err)
}
bufBytes := buf.Bytes()
bReader := bytes.NewReader(bufBytes)
b.ResetTimer()
for i := 0; i < b.N; i++ {
readBeatmaps(bReader)
bReader.Reset(bufBytes)
}
}
type fakeWriter struct{}
func (fakeWriter) Write(b []byte) (int, error) {
return len(b), nil
}

View File

@@ -0,0 +1,188 @@
// Package housekeeper manages the local cache of CheeseGull, by always keeping
// track of the local state of the cache and keeping it to a low amount.
package housekeeper
import (
"log"
"os"
"sort"
"sync"
raven "github.com/getsentry/raven-go"
)
// House manages the state of the cached beatmaps in the local filesystem.
type House struct {
MaxSize uint64
state []*CachedBeatmap
stateMutex sync.RWMutex
requestChan chan struct{}
// set to non-nil to avoid calling os.Remove on the files to remove, and
// place them here instead.
dryRun []*CachedBeatmap
}
// New creates a new house, initialised with the default values.
func New() *House {
return &House{
MaxSize: 1024 * 1024 * 1024 * 10, // 10 gigs
requestChan: make(chan struct{}, 1),
}
}
// scheduleCleanup enschedules a housekeeping request if one isn't already
// present.
func (h *House) scheduleCleanup() {
select {
case h.requestChan <- struct{}{}:
// carry on
default:
// carry on
}
}
// StartCleaner starts the process that will do the necessary housekeeping
// every time a cleanup is scheduled with scheduleCleanup.
func (h *House) StartCleaner() {
go func() {
for {
<-h.requestChan
h.cleanUp()
}
}()
}
func (h *House) cleanUp() {
log.Println("[C] Running cleanup")
toRemove := h.mapsToRemove()
f, err := os.Create("cgbin.db")
if err != nil {
logError(err)
return
}
// build new state by removing from it the beatmaps from toRemove
h.stateMutex.Lock()
newState := make([]*CachedBeatmap, 0, len(h.state))
StateLoop:
for _, b := range h.state {
for _, r := range toRemove {
if r.ID == b.ID && r.NoVideo == b.NoVideo {
continue StateLoop
}
}
newState = append(newState, b)
}
h.state = newState
err = writeBeatmaps(f, h.state)
h.stateMutex.Unlock()
f.Close()
if err != nil {
logError(err)
return
}
if h.dryRun != nil {
h.dryRun = toRemove
return
}
for _, b := range toRemove {
err := os.Remove(b.fileName())
switch {
case err == nil, os.IsNotExist(err):
// silently ignore
default:
logError(err)
}
}
}
func (h *House) mapsToRemove() []*CachedBeatmap {
totalSize, removable := h.stateSizeAndRemovableMaps()
if totalSize <= h.MaxSize {
// no clean up needed, our totalSize has still not gotten over the
// threshold
return nil
}
sortByLastRequested(removable)
removeBytes := int(totalSize - h.MaxSize)
var toRemove []*CachedBeatmap
for _, b := range removable {
toRemove = append(toRemove, b)
fSize := b.FileSize()
removeBytes -= int(fSize)
if removeBytes <= 0 {
break
}
}
return toRemove
}
// i hate verbose names myself, but it was very hard to come up with something
// even as short as this.
func (h *House) stateSizeAndRemovableMaps() (totalSize uint64, removable []*CachedBeatmap) {
h.stateMutex.RLock()
for _, b := range h.state {
if !b.IsDownloaded() {
continue
}
fSize := b.FileSize()
totalSize += fSize
if fSize == 0 {
continue
}
removable = append(removable, b)
}
h.stateMutex.RUnlock()
return
}
func sortByLastRequested(b []*CachedBeatmap) {
sort.Slice(b, func(i, j int) bool {
b[i].mtx.RLock()
b[j].mtx.RLock()
r := b[i].lastRequested.Before(b[j].lastRequested)
b[i].mtx.RUnlock()
b[j].mtx.RUnlock()
return r
})
}
// LoadState attempts to load the state from cgbin.db
func (h *House) LoadState() error {
f, err := os.Open("cgbin.db")
switch {
case os.IsNotExist(err):
return nil
case err != nil:
return err
}
defer f.Close()
h.stateMutex.Lock()
h.state, err = readBeatmaps(f)
h.stateMutex.Unlock()
return err
}
var envSentryDSN = os.Getenv("SENTRY_DSN")
// logError attempts to log an error to Sentry, as well as stdout.
func logError(err error) {
if err == nil {
return
}
if envSentryDSN != "" {
raven.CaptureError(err, nil)
}
log.Println(err)
}

View File

@@ -0,0 +1,129 @@
package housekeeper
import (
"reflect"
"testing"
"time"
)
func TestCleanupOneMap(t *testing.T) {
expectRemove := []*CachedBeatmap{
&CachedBeatmap{
ID: 1,
lastRequested: time.Date(2017, 4, 5, 15, 5, 3, 0, time.UTC),
fileSize: 15,
isDownloaded: true,
},
}
expectRemain := []*CachedBeatmap{
&CachedBeatmap{
ID: 2,
lastRequested: time.Date(2017, 4, 10, 15, 5, 3, 0, time.UTC),
fileSize: 15,
isDownloaded: true,
},
&CachedBeatmap{
ID: 3,
lastRequested: time.Date(2017, 4, 15, 15, 5, 3, 0, time.UTC),
fileSize: 15,
isDownloaded: true,
},
&CachedBeatmap{
ID: 4,
lastRequested: time.Date(2017, 4, 20, 15, 5, 3, 0, time.UTC),
fileSize: 15,
isDownloaded: true,
},
}
h := New()
h.MaxSize = 50
h.state = append(expectRemain, expectRemove...)
h.dryRun = make([]*CachedBeatmap, 0)
start := time.Now()
h.cleanUp()
t.Log("cleanup took", time.Since(start))
if !reflect.DeepEqual(expectRemain, h.state) {
t.Errorf("Want %v got %v", expectRemain, h.state)
}
if !reflect.DeepEqual(expectRemove, h.dryRun) {
t.Errorf("Want %v got %v", expectRemove, h.dryRun)
}
}
func TestCleanupNoMaps(t *testing.T) {
expectRemove := []*CachedBeatmap{}
expectRemain := []*CachedBeatmap{
&CachedBeatmap{
ID: 1,
lastRequested: time.Date(2017, 4, 10, 15, 5, 3, 0, time.UTC),
fileSize: 10,
isDownloaded: true,
},
}
h := New()
h.MaxSize = 10
h.state = append(expectRemain, expectRemove...)
h.dryRun = make([]*CachedBeatmap, 0)
start := time.Now()
h.cleanUp()
t.Log("cleanup took", time.Since(start))
if !reflect.DeepEqual(expectRemain, h.state) {
t.Errorf("Want %v got %v", expectRemain, h.state)
}
if !reflect.DeepEqual(expectRemove, h.dryRun) {
t.Errorf("Want %v got %v", expectRemove, h.dryRun)
}
}
func TestCleanupEmptyBeatmaps(t *testing.T) {
expectRemove := []*CachedBeatmap{
&CachedBeatmap{
ID: 1,
lastRequested: time.Date(2017, 4, 10, 15, 5, 3, 0, time.UTC),
fileSize: 10,
isDownloaded: true,
},
}
expectRemain := []*CachedBeatmap{
&CachedBeatmap{
ID: 2,
lastRequested: time.Date(2017, 4, 5, 15, 5, 3, 0, time.UTC),
fileSize: 0,
isDownloaded: true,
},
&CachedBeatmap{
ID: 3,
lastRequested: time.Date(2017, 4, 4, 15, 5, 3, 0, time.UTC),
fileSize: 0,
isDownloaded: true,
},
&CachedBeatmap{
ID: 4,
lastRequested: time.Date(2017, 4, 3, 15, 5, 3, 0, time.UTC),
fileSize: 0,
isDownloaded: true,
},
}
h := New()
h.MaxSize = 5
h.state = append(expectRemain, expectRemove...)
h.dryRun = make([]*CachedBeatmap, 0)
start := time.Now()
h.cleanUp()
t.Log("cleanup took", time.Since(start))
if !reflect.DeepEqual(expectRemain, h.state) {
t.Errorf("Want %v got %v", expectRemain, h.state)
}
if !reflect.DeepEqual(expectRemove, h.dryRun) {
t.Errorf("Want %v got %v", expectRemove, h.dryRun)
}
}

View File

@@ -0,0 +1,158 @@
package housekeeper
import (
"fmt"
"os"
"strconv"
"sync"
"time"
)
// CachedBeatmap represents a beatmap that is held in the cache of CheeseGull.
type CachedBeatmap struct {
ID int
NoVideo bool
LastUpdate time.Time
lastRequested time.Time
fileSize uint64
isDownloaded bool
mtx sync.RWMutex
waitGroup sync.WaitGroup
}
// File opens the File of the beatmap from the filesystem.
func (c *CachedBeatmap) File() (*os.File, error) {
return os.Open(c.fileName())
}
// CreateFile creates the File of the beatmap in the filesystem, and returns it
// in write mode.
func (c *CachedBeatmap) CreateFile() (*os.File, error) {
return os.Create(c.fileName())
}
func (c *CachedBeatmap) fileName() string {
n := ""
if c.NoVideo {
n = "n"
}
return "data/" + strconv.Itoa(c.ID) + n + ".osz"
}
// IsDownloaded checks whether the beatmap has been downloaded.
func (c *CachedBeatmap) IsDownloaded() bool {
c.mtx.RLock()
i := c.isDownloaded
c.mtx.RUnlock()
return i
}
// FileSize returns the FileSize of c.
func (c *CachedBeatmap) FileSize() uint64 {
c.mtx.RLock()
i := c.fileSize
c.mtx.RUnlock()
return i
}
// MustBeDownloaded will check whether the beatmap is downloaded.
// If it is not, it will wait for it to become downloaded.
func (c *CachedBeatmap) MustBeDownloaded() {
if c.IsDownloaded() {
return
}
c.waitGroup.Wait()
}
// DownloadCompleted must be called once the beatmap has finished downloading.
func (c *CachedBeatmap) DownloadCompleted(fileSize uint64, parentHouse *House) {
c.mtx.Lock()
c.fileSize = fileSize
c.isDownloaded = true
c.mtx.Unlock()
c.waitGroup.Done()
parentHouse.scheduleCleanup()
}
// SetLastRequested changes the last requested time.
func (c *CachedBeatmap) SetLastRequested(t time.Time) {
c.mtx.Lock()
c.lastRequested = t
c.mtx.Unlock()
}
func (c *CachedBeatmap) String() string {
return fmt.Sprintf("{ID: %d NoVideo: %t LastUpdate: %v}", c.ID, c.NoVideo, c.LastUpdate)
}
// AcquireBeatmap attempts to add a new CachedBeatmap to the state.
// In order to add a new CachedBeatmap to the state, one must not already exist
// in the state with the same ID, NoVideo and LastUpdate. In case one is already
// found, this is returned, alongside with false. If LastUpdate is newer than
// that of the beatmap stored in the state, then the beatmap in the state's
// downloaded status is switched back to false and the LastUpdate is changed.
// true is also returned, indicating that the caller now has the burden of
// downloading the beatmap.
//
// In the case the cachedbeatmap has not been stored in the state, then
// it is added to the state and, like the case where LastUpdated has been
// changed, true is returned, indicating that the caller must now download the
// beatmap.
//
// If you're confused attempting to read this, let me give you an example:
//
// A: Yo, is this beatmap cached?
// B: Yes, yes it is! Here you go with the information about it. No need to do
// anything else.
// ----
// A: Yo, got this beatmap updated 2 hours ago. Have you got it cached?
// B: Ah, I'm afraid that I only have the version updated 10 hours ago.
// Mind downloading the updated version for me?
// ----
// A: Yo, is this beatmap cached?
// B: Nope, I didn't know it existed before you told me. I've recorded its
// info now, but jokes on you, you now have to actually download it.
// Chop chop!
func (h *House) AcquireBeatmap(c *CachedBeatmap) (*CachedBeatmap, bool) {
if c == nil {
return nil, false
}
h.stateMutex.RLock()
for _, b := range h.state {
// if the id or novideo is different, then all is good and we
// can proceed with the next element.
if b.ID != c.ID || b.NoVideo != c.NoVideo {
continue
}
// unlocking because in either branch, we will return.
h.stateMutex.RUnlock()
// if c is not newer than b, then just return.
if !b.LastUpdate.Before(c.LastUpdate) {
return b, false
}
b.LastUpdate = c.LastUpdate
return b, true
}
h.stateMutex.RUnlock()
// c was not present in our state: we need to add it.
// we need to recreate the CachedBeatmap: this way we can be sure the zero
// is set for the unexported fields.
n := &CachedBeatmap{
ID: c.ID,
NoVideo: c.NoVideo,
LastUpdate: c.LastUpdate,
}
n.waitGroup.Add(1)
h.stateMutex.Lock()
h.state = append(h.state, n)
h.stateMutex.Unlock()
return n, true
}

View File

@@ -0,0 +1,116 @@
package models
import "database/sql"
// Beatmap represents a single beatmap (difficulty) on osu!.
type Beatmap struct {
ID int `json:"BeatmapID"`
ParentSetID int
DiffName string
FileMD5 string
Mode int
BPM float64
AR float32
OD float32
CS float32
HP float32
TotalLength int
HitLength int
Playcount int
Passcount int
MaxCombo int
DifficultyRating float64
}
const beatmapFields = `
id, parent_set_id, diff_name, file_md5, mode, bpm,
ar, od, cs, hp, total_length, hit_length,
playcount, passcount, max_combo, difficulty_rating`
func readBeatmapsFromRows(rows *sql.Rows, capacity int) ([]Beatmap, error) {
var err error
bms := make([]Beatmap, 0, capacity)
for rows.Next() {
var b Beatmap
err = rows.Scan(
&b.ID, &b.ParentSetID, &b.DiffName, &b.FileMD5, &b.Mode, &b.BPM,
&b.AR, &b.OD, &b.CS, &b.HP, &b.TotalLength, &b.HitLength,
&b.Playcount, &b.Passcount, &b.MaxCombo, &b.DifficultyRating,
)
if err != nil {
return nil, err
}
bms = append(bms, b)
}
return bms, rows.Err()
}
func inClause(length int) string {
if length <= 0 {
return ""
}
b := make([]byte, length*3-2)
for i := 0; i < length; i++ {
b[i*3] = '?'
if i != length-1 {
b[i*3+1] = ','
b[i*3+2] = ' '
}
}
return string(b)
}
func sIntToSInterface(i []int) []interface{} {
args := make([]interface{}, len(i))
for idx, id := range i {
args[idx] = id
}
return args
}
// FetchBeatmaps retrieves a list of beatmap knowing their IDs.
func FetchBeatmaps(db *sql.DB, ids ...int) ([]Beatmap, error) {
if len(ids) == 0 {
return nil, nil
}
q := `SELECT ` + beatmapFields + ` FROM beatmaps WHERE id IN (` + inClause(len(ids)) + `)`
rows, err := db.Query(q, sIntToSInterface(ids)...)
if err != nil {
return nil, err
}
return readBeatmapsFromRows(rows, len(ids))
}
// CreateBeatmaps adds beatmaps in the database.
func CreateBeatmaps(db *sql.DB, bms ...Beatmap) error {
if len(bms) == 0 {
return nil
}
q := `INSERT INTO beatmaps(` + beatmapFields + `) VALUES `
const valuePlaceholder = `(
?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
?, ?, ?, ?
)`
args := make([]interface{}, 0, 15*4)
for idx, bm := range bms {
if idx != 0 {
q += ", "
}
q += valuePlaceholder
args = append(args,
bm.ID, bm.ParentSetID, bm.DiffName, bm.FileMD5, bm.Mode, bm.BPM,
bm.AR, bm.OD, bm.CS, bm.HP, bm.TotalLength, bm.HitLength,
bm.Playcount, bm.Passcount, bm.MaxCombo, bm.DifficultyRating,
)
}
_, err := db.Exec(q, args...)
return err
}

View File

@@ -0,0 +1,52 @@
// THIS FILE HAS BEEN AUTOMATICALLY GENERATED
// To re-generate it, run "go generate" in the models folder.
package models
var migrations = [...]string{
`CREATE TABLE sets(
id INT NOT NULL,
ranked_status TINYINT NOT NULL,
approved_date DATETIME NOT NULL,
last_update DATETIME NOT NULL,
last_checked DATETIME NOT NULL,
artist VARCHAR(1000) NOT NULL,
title VARCHAR(1000) NOT NULL,
creator VARCHAR(1000) NOT NULL,
source VARCHAR(1000) NOT NULL,
tags VARCHAR(1000) NOT NULL,
has_video TINYINT NOT NULL,
genre TINYINT NOT NULL,
language TINYINT NOT NULL,
favourites INT NOT NULL,
set_modes TINYINT NOT NULL,
PRIMARY KEY(id)
);
`,
`CREATE TABLE beatmaps(
id INT NOT NULL,
parent_set_id INT NOT NULL,
diff_name VARCHAR(1000) NOT NULL,
file_md5 CHAR(32) NOT NULL,
mode INT NOT NULL,
bpm DECIMAL(10, 4) NOT NULL,
ar DECIMAL(4, 2) NOT NULL,
od DECIMAL(4, 2) NOT NULL,
cs DECIMAL(4, 2) NOT NULL,
hp DECIMAL(4, 2) NOT NULL,
total_length INT NOT NULL,
hit_length INT NOT NULL,
playcount INT NOT NULL,
passcount INT NOT NULL,
max_combo INT NOT NULL,
difficulty_rating INT NOT NULL,
PRIMARY KEY(id),
FOREIGN KEY (parent_set_id) REFERENCES sets(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);`,
`ALTER TABLE sets ADD FULLTEXT(artist, title, creator, source, tags);`,
`ALTER TABLE beatmaps MODIFY difficulty_rating DECIMAL(20, 15);
`,
`ALTER TABLE sets DROP INDEX artist;`,
}

View File

@@ -0,0 +1,18 @@
CREATE TABLE sets(
id INT NOT NULL,
ranked_status TINYINT NOT NULL,
approved_date DATETIME NOT NULL,
last_update DATETIME NOT NULL,
last_checked DATETIME NOT NULL,
artist VARCHAR(1000) NOT NULL,
title VARCHAR(1000) NOT NULL,
creator VARCHAR(1000) NOT NULL,
source VARCHAR(1000) NOT NULL,
tags VARCHAR(1000) NOT NULL,
has_video TINYINT NOT NULL,
genre TINYINT NOT NULL,
language TINYINT NOT NULL,
favourites INT NOT NULL,
set_modes TINYINT NOT NULL,
PRIMARY KEY(id)
);

View File

@@ -0,0 +1,22 @@
CREATE TABLE beatmaps(
id INT NOT NULL,
parent_set_id INT NOT NULL,
diff_name VARCHAR(1000) NOT NULL,
file_md5 CHAR(32) NOT NULL,
mode INT NOT NULL,
bpm DECIMAL(10, 4) NOT NULL,
ar DECIMAL(4, 2) NOT NULL,
od DECIMAL(4, 2) NOT NULL,
cs DECIMAL(4, 2) NOT NULL,
hp DECIMAL(4, 2) NOT NULL,
total_length INT NOT NULL,
hit_length INT NOT NULL,
playcount INT NOT NULL,
passcount INT NOT NULL,
max_combo INT NOT NULL,
difficulty_rating INT NOT NULL,
PRIMARY KEY(id),
FOREIGN KEY (parent_set_id) REFERENCES sets(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);

View File

@@ -0,0 +1 @@
ALTER TABLE sets ADD FULLTEXT(artist, title, creator, source, tags);

View File

@@ -0,0 +1 @@
ALTER TABLE beatmaps MODIFY difficulty_rating DECIMAL(20, 15);

View File

@@ -0,0 +1 @@
ALTER TABLE sets DROP INDEX artist;

View File

@@ -0,0 +1,60 @@
// +build ignore
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
const fileHeader = `// THIS FILE HAS BEEN AUTOMATICALLY GENERATED
// To re-generate it, run "go generate" in the models folder.
package models
var migrations = [...]string{
`
func main() {
// ReadDir gets all the files in the directory and then sorts them
// alphabetically - thus we can be sure 0000 will come first and 0001 will
// come afterwards.
files, err := ioutil.ReadDir("migrations")
check(err)
out, err := os.Create("migrations.go")
check(err)
_, err = out.WriteString(fileHeader)
check(err)
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".sql") || file.IsDir() {
continue
}
f, err := os.Open("migrations/" + file.Name())
check(err)
out.WriteString("\t`")
_, err = io.Copy(out, f)
check(err)
out.WriteString("`,\n")
f.Close()
}
_, err = out.WriteString("}\n")
check(err)
check(out.Close())
}
func check(err error) {
if err != nil {
fmt.Fprintln(os.Stdout, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,51 @@
// Package models contains everything that is needed to interface to the
// database CheeseGull is using.
package models
import (
"database/sql"
)
//go:generate go run migrations_gen.go
// RunMigrations brings the database up to date following the migrations.
func RunMigrations(db *sql.DB) error {
var version int
var _b []byte
err := db.QueryRow("SHOW TABLES LIKE 'db_version'").Scan(&_b)
switch err {
case nil:
// fetch version from db
err = db.QueryRow("SELECT version FROM db_version").Scan(&version)
if err != nil {
return err
}
case sql.ErrNoRows:
_, err = db.Exec("CREATE TABLE db_version(version INT NOT NULL)")
if err != nil {
return err
}
_, err = db.Exec("INSERT INTO db_version(version) VALUES ('-1')")
if err != nil {
return err
}
version = -1
default:
return err
}
for {
version++
if version >= len(migrations) {
version--
db.Exec("UPDATE db_version SET version = ?", version)
return nil
}
s := migrations[version]
_, err = db.Exec(s)
if err != nil {
return err
}
}
}

160
vendor/github.com/osuripple/cheesegull/models/set.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
package models
import (
"database/sql"
"time"
)
// Set represents a set of beatmaps usually sharing the same song.
type Set struct {
ID int `json:"SetID"`
ChildrenBeatmaps []Beatmap
RankedStatus int
ApprovedDate time.Time
LastUpdate time.Time
LastChecked time.Time
Artist string
Title string
Creator string
Source string
Tags string
HasVideo bool
Genre int
Language int
Favourites int
}
const setFields = `id, ranked_status, approved_date, last_update, last_checked,
artist, title, creator, source, tags, has_video, genre,
language, favourites`
// FetchSetsForBatchUpdate fetches limit sets from the database, sorted by
// LastChecked (asc, older first). Results are further filtered: if the set's
// RankedStatus is 3, 0 or -1 (qualified, pending or WIP), at least 30 minutes
// must have passed from LastChecked. For all other statuses, at least 4 days
// must have passed from LastChecked.
func FetchSetsForBatchUpdate(db *sql.DB, limit int) ([]Set, error) {
n := time.Now()
rows, err := db.Query(`
SELECT `+setFields+` FROM sets
WHERE (ranked_status IN (3, 0, -1) AND last_checked <= ?) OR last_checked <= ?
ORDER BY last_checked ASC
LIMIT ?`,
n.Add(-time.Minute*30),
n.Add(-time.Hour*24*4),
limit,
)
if err != nil {
return nil, err
}
sets := make([]Set, 0, limit)
for rows.Next() {
var s Set
err = rows.Scan(
&s.ID, &s.RankedStatus, &s.ApprovedDate, &s.LastUpdate, &s.LastChecked,
&s.Artist, &s.Title, &s.Creator, &s.Source, &s.Tags, &s.HasVideo, &s.Genre,
&s.Language, &s.Favourites,
)
if err != nil {
return nil, err
}
sets = append(sets, s)
}
return sets, rows.Err()
}
// FetchSet retrieves a single set to show, alongside its children beatmaps.
func FetchSet(db *sql.DB, id int, withChildren bool) (*Set, error) {
var s Set
err := db.QueryRow(`SELECT `+setFields+` FROM sets WHERE id = ? LIMIT 1`, id).Scan(
&s.ID, &s.RankedStatus, &s.ApprovedDate, &s.LastUpdate, &s.LastChecked,
&s.Artist, &s.Title, &s.Creator, &s.Source, &s.Tags, &s.HasVideo, &s.Genre,
&s.Language, &s.Favourites,
)
switch err {
case nil:
break // carry on
case sql.ErrNoRows:
// silently ignore no rows, and just don't return anything
return nil, nil
default:
return nil, err
}
if !withChildren {
return &s, nil
}
rows, err := db.Query(`SELECT `+beatmapFields+` FROM beatmaps WHERE parent_set_id = ?`, s.ID)
if err != nil {
return nil, err
}
s.ChildrenBeatmaps, err = readBeatmapsFromRows(rows, 8)
return &s, err
}
// DeleteSet deletes a set from the database, removing also its children
// beatmaps.
func DeleteSet(db *sql.DB, set int) error {
_, err := db.Exec("DELETE FROM beatmaps WHERE parent_set_id = ?", set)
if err != nil {
return err
}
_, err = db.Exec("DELETE FROM sets WHERE id = ?", set)
return err
}
// createSetModes will generate the correct value for setModes, which is
// basically a bitwise enum containing the modes that are on a certain set.
func createSetModes(bms []Beatmap) (setModes uint8) {
for _, bm := range bms {
m := bm.Mode
if m < 0 || m >= 4 {
continue
}
setModes |= 1 << uint(m)
}
return setModes
}
// CreateSet creates (and updates) a beatmap set in the database.
func CreateSet(db *sql.DB, s Set) error {
// delete existing set, if any.
// This is mostly a lazy way to make sure updates work as well.
err := DeleteSet(db, s.ID)
if err != nil {
return err
}
_, err = db.Exec(`
INSERT INTO sets(
id, ranked_status, approved_date, last_update, last_checked,
artist, title, creator, source, tags, has_video, genre,
language, favourites, set_modes
)
VALUES (
?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?,
?, ?, ?
)`, s.ID, s.RankedStatus, s.ApprovedDate, s.LastUpdate, s.LastChecked,
s.Artist, s.Title, s.Creator, s.Source, s.Tags, s.HasVideo, s.Genre,
s.Language, s.Favourites, createSetModes(s.ChildrenBeatmaps))
if err != nil {
return err
}
return CreateBeatmaps(db, s.ChildrenBeatmaps...)
}
// BiggestSetID retrieves the biggest set ID in the sets database. This is used
// by discovery to have a starting point from which to discover new beatmaps.
func BiggestSetID(db *sql.DB) (int, error) {
var i int
err := db.QueryRow("SELECT id FROM sets ORDER BY id DESC LIMIT 1").Scan(&i)
if err == sql.ErrNoRows {
return 0, nil
}
return i, err
}

View File

@@ -0,0 +1,158 @@
package models
import (
"bytes"
"database/sql"
"fmt"
"strconv"
"strings"
)
// SearchOptions are options that can be passed to SearchSets for filtering
// sets.
type SearchOptions struct {
// If len is 0, then it should be treated as if all statuses are good.
Status []int
Query string
// Gamemodes to which limit the results. If len is 0, it means all modes
// are ok.
Mode []int
// Pagination options.
Offset int
Amount int
}
func (o SearchOptions) setModes() (total uint8) {
for _, m := range o.Mode {
if m < 0 || m >= 4 {
continue
}
total |= 1 << uint8(m)
}
return
}
var mysqlStringReplacer = strings.NewReplacer(
`\`, `\\`,
`"`, `\"`,
`'`, `\'`,
"\x00", `\0`,
"\n", `\n`,
"\r", `\r`,
"\x1a", `\Z`,
)
func sIntCommaSeparated(nums []int) string {
b := bytes.Buffer{}
for idx, num := range nums {
b.WriteString(strconv.Itoa(num))
if idx != len(nums)-1 {
b.WriteString(", ")
}
}
return b.String()
}
// SearchSets retrieves sets, filtering them using SearchOptions.
func SearchSets(db, searchDB *sql.DB, opts SearchOptions) ([]Set, error) {
sm := strconv.Itoa(int(opts.setModes()))
setIDsQuery := "SELECT id, set_modes & " + sm + " AS valid_set_modes FROM cg WHERE "
// add filters to query
// Yes. I know. Prepared statements. But Sphinx doesn't like them, so
// bummer.
setIDsQuery += "MATCH('" + mysqlStringReplacer.Replace(opts.Query) + "') "
if len(opts.Status) != 0 {
setIDsQuery += "AND ranked_status IN (" + sIntCommaSeparated(opts.Status) + ") "
}
if len(opts.Mode) != 0 {
// This is a hack. Apparently, Sphinx does not support AND bitwise
// operations in the WHERE clause, so we're placing that in the SELECT
// clause and only making sure it's correct in this place.
setIDsQuery += "AND valid_set_modes = " + sm + " "
}
// set limit
setIDsQuery += fmt.Sprintf("ORDER BY WEIGHT() DESC, id DESC LIMIT %d, %d OPTION ranker=sph04", opts.Offset, opts.Amount)
// fetch rows
rows, err := searchDB.Query(setIDsQuery)
if err != nil {
return nil, err
}
// from the rows we will retrieve the IDs of all our sets.
// we also pre-create the slices containing the sets we will fill later on
// when we fetch the actual data.
setIDs := make([]int, 0, opts.Amount)
sets := make([]Set, 0, opts.Amount)
// setMap, having an ID, points to a position of a set contained in sets.
setMap := make(map[int]int, opts.Amount)
for rows.Next() {
var id int
err = rows.Scan(&id, new(int))
if err != nil {
return nil, err
}
setIDs = append(setIDs, id)
sets = append(sets, Set{})
setMap[id] = len(sets) - 1
}
// short circuit: there are no sets
if len(sets) == 0 {
return []Set{}, nil
}
setsQuery := "SELECT " + setFields + " FROM sets WHERE id IN (" + inClause(len(setIDs)) + ")"
args := sIntToSInterface(setIDs)
rows, err = db.Query(setsQuery, args...)
if err != nil {
return nil, err
}
// find all beatmaps, but leave children aside for the moment.
for rows.Next() {
var s Set
err = rows.Scan(
&s.ID, &s.RankedStatus, &s.ApprovedDate, &s.LastUpdate, &s.LastChecked,
&s.Artist, &s.Title, &s.Creator, &s.Source, &s.Tags, &s.HasVideo, &s.Genre,
&s.Language, &s.Favourites,
)
if err != nil {
return nil, err
}
sets[setMap[s.ID]] = s
}
rows, err = db.Query(
"SELECT "+beatmapFields+" FROM beatmaps WHERE parent_set_id IN ("+
inClause(len(setIDs))+")",
sIntToSInterface(setIDs)...,
)
if err != nil {
return nil, err
}
for rows.Next() {
var b Beatmap
err = rows.Scan(
&b.ID, &b.ParentSetID, &b.DiffName, &b.FileMD5, &b.Mode, &b.BPM,
&b.AR, &b.OD, &b.CS, &b.HP, &b.TotalLength, &b.HitLength,
&b.Playcount, &b.Passcount, &b.MaxCombo, &b.DifficultyRating,
)
if err != nil {
return nil, err
}
parentSet, ok := setMap[b.ParentSetID]
if !ok {
continue
}
sets[parentSet].ChildrenBeatmaps = append(sets[parentSet].ChildrenBeatmaps, b)
}
return sets, nil
}