diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 4b30371..0000000 --- a/.dockerignore +++ /dev/null @@ -1,7 +0,0 @@ -.git -LICENSE -README.md -docs -pavod -pavosql -*_test.go diff --git a/.github/workflows/gobuild.yaml b/.github/workflows/build.yaml old mode 100644 new mode 100755 similarity index 69% rename from .github/workflows/gobuild.yaml rename to .github/workflows/build.yaml index 0618a9d..2ef136f --- a/.github/workflows/gobuild.yaml +++ b/.github/workflows/build.yaml @@ -1,4 +1,4 @@ -name: Go build +name: Build on: [push] jobs: @@ -10,8 +10,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v4 with: - go-version: '1.21.x' - - name: Install dependencies - run: go get . + go-version-file: go.mod + cache-dependency-path: | + go.sum - name: Build run: go build -v ./cmd/pavosql diff --git a/.github/workflows/hugo.yaml b/.github/workflows/hugo.yaml deleted file mode 100644 index f37a325..0000000 --- a/.github/workflows/hugo.yaml +++ /dev/null @@ -1,72 +0,0 @@ -name: Build docs with hugo - -on: - push: - paths: - - docs/* - branches: - - main - workflow_dispatch: - - -permissions: - contents: read - pages: write - id-token: write - -concurrency: - group: "pages" - cancel-in-progress: false - -defaults: - run: - shell: bash - -jobs: - build: - runs-on: ubuntu-latest - env: - HUGO_VERSION: 0.115.4 - steps: - - name: Install Hugo CLI - run: | - wget -O ${{ runner.temp }}/hugo.deb https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-amd64.deb \ - && sudo dpkg -i ${{ runner.temp }}/hugo.deb - - name: Install Dart Sass - run: sudo snap install dart-sass - - name: Checkout - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - - name: Setup Pages - id: pages - uses: actions/configure-pages@v3 - - name: Install Node.js dependencies - run: "[[ -f package-lock.json || -f npm-shrinkwrap.json ]] && npm ci || true" - - name: Build with Hugo - env: - HUGO_ENVIRONMENT: production - HUGO_ENV: production - run: | - hugo \ - -s docs\ - -d ../public\ - --gc \ - --minify \ - --baseURL "${{ steps.pages.outputs.base_url }}/" - - name: Upload artifact - uses: actions/upload-pages-artifact@v1 - with: - path: ./public - - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v2 diff --git a/.github/workflows/gotest.yaml b/.github/workflows/test.yaml old mode 100644 new mode 100755 similarity index 69% rename from .github/workflows/gotest.yaml rename to .github/workflows/test.yaml index a87c526..0ad850c --- a/.github/workflows/gotest.yaml +++ b/.github/workflows/test.yaml @@ -1,4 +1,4 @@ -name: Go test +name: Test on: [push] jobs: @@ -10,8 +10,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v4 with: - go-version: '1.21.x' - - name: Install dependencies - run: go get . + go-version-file: go.mod + cache-dependency-path: | + go.sum - name: Test with the Go CLI run: go test -v ./... diff --git a/.gitignore b/.gitignore index 39a63e9..3d4d92b 100644 --- a/.gitignore +++ b/.gitignore @@ -21,9 +21,4 @@ bin # Go workspace file go.work -# Hugo Docs -docs/.hugo_build.lock -public -docs/public - dist/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..e69de29 diff --git a/.golangci.toml b/.golangci.toml new file mode 100755 index 0000000..d2f36af --- /dev/null +++ b/.golangci.toml @@ -0,0 +1,40 @@ +version = '2' + +[linters] +default = 'none' +enable = [ + 'errcheck', + 'gosec', + 'govet', + 'ineffassign', + 'lll', + 'staticcheck', + 'unused' +] + +[linters.exclusions] +generated = 'lax' +presets = [ + 'comments', + 'common-false-positives', + 'legacy', + 'std-error-handling' +] +paths = [ + 'third_party$', + 'builtin$', + 'examples$' +] + +[formatters] +enable = [ + 'gofmt' +] + +[formatters.exclusions] +generated = 'lax' +paths = [ + 'third_party$', + 'builtin$', + 'examples$' +] diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 50cf86e..0000000 --- a/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM golang:1.21.0 - -WORKDIR /pavosql - -COPY ./ ./ -RUN go mod download - -RUN CGO_ENABLED=0 GOOS=linux go build ./cmd/pavosql - -EXPOSE 1758 - -CMD [ "./pavosql" ] diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/Makefile b/Makefile deleted file mode 100644 index 5e33094..0000000 --- a/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -build: - @go build -o bin/pavosql ./cmd/pavosql - -run: build - @./bin/pavosql - -test: - @go test -v ./... diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 9b057e1..2c1adfb --- a/README.md +++ b/README.md @@ -1,37 +1,38 @@ -# PavoSQL +
+ + pavosql gopher + +

PavoSQL

+

+

+
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) -[![Build](https://github.com/gKits/PavoSQL/actions/workflows/gobuild.yaml/badge.svg)](https://github.com/gKits/PavoSQL/actions/workflows/gobuild.yaml) -[![Test](https://github.com/gKits/PavoSQL/actions/workflows/gotest.yaml/badge.svg)](https://github.com/gKits/PavoSQL/actions/workflows/gotest.yaml) -[![Build Hugo docs and deploy to pages](https://github.com/gKits/PavoSQL/actions/workflows/hugo.yaml/badge.svg)](https://gkits.github.io/PavoSQL) +[![Build](https://github.com/gkits/pavosql/actions/workflows/build.yaml/badge.svg)](https://github.com/gkits/pavosql/actions/workflows/build.yaml) +[![Test](https://github.com/gkits/pavosql/actions/workflows/test.yaml/badge.svg)](https://github.com/gkits/pavosql/actions/workflows/test.yaml) -**This is a learning project and is not meant to be run in production environments.** - -**This project is stil w.i.p.** - -PavoSQL is a SQL relational Database written in pure Go, meaning only using Go's standard library. +**This is project is still work in progress and not supposed to be used in any productive setting.** ## Roadmap -- [x] Atomic backend store on single file -- [ ] Relational model build on KV Store - - [ ] Point queries - - [ ] Range queries - - [ ] Insert - - [ ] Delete - - [ ] Sorting - - [ ] Group By - - [ ] Joins -- [ ] Lexer and Parser for SQL queries -- [ ] Database server and client to use PavoSQL over the network -- [ ] User and privilege system +- [ ] Database engine + - [ ] Single file backend + - [ ] B+tree structure + - [ ] Concurrent r/w + - [ ] Atomic i/o + - [ ] SQL + - [ ] Relational model + - [ ] Tables + - [ ] Indexes + - [ ] Metadata + - [ ] Lexer, parser and AST + - [ ] Query functionality +- [ ] Network + - [ ] Server + client + - [ ] Authentication + Authorization - [ ] Implement [database/sql](https://pkg.go.dev/database/sql) driver interface -- [ ] Database Management System in single directory -- [ ] Windows compatibilty of backend store (remain atomic) - [ ] Documentation -- [ ] Installable as service/daemon (e.g. systemd) -- [ ] Create and release Docker image -- [ ] 80% Test coverage (not needed but nice to have) +- [ ] Docker image ## Reference material diff --git a/assets/pavosql-gopher.png b/assets/pavosql-gopher.png new file mode 100644 index 0000000..2879d57 Binary files /dev/null and b/assets/pavosql-gopher.png differ diff --git a/assets/pavosql-gopher.svg b/assets/pavosql-gopher.svg new file mode 100644 index 0000000..7961487 --- /dev/null +++ b/assets/pavosql-gopher.svg @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/cmd/pavofmt/main.go b/cmd/pavofmt/main.go new file mode 100644 index 0000000..06ab7d0 --- /dev/null +++ b/cmd/pavofmt/main.go @@ -0,0 +1 @@ +package main diff --git a/cmd/pavosql/cmd/root.go b/cmd/pavosql/cmd/root.go new file mode 100644 index 0000000..8b45672 --- /dev/null +++ b/cmd/pavosql/cmd/root.go @@ -0,0 +1,37 @@ +package cmd + +import ( + "os" + + "github.com/gkits/pavosql/cmd/pavosql/cmd/serve" + "github.com/gkits/pavosql/cmd/pavosql/cmd/version" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: "pavosql", + Short: "A brief description of your application", + Long: `A longer description that spans multiple lines and likely contains +examples and usage of using your application. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, +} + +func Execute() { + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func init() { + rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + + rootCmd.AddCommand(version.Command()) + rootCmd.AddCommand(serve.Command()) +} diff --git a/cmd/pavosql/cmd/serve/serve.go b/cmd/pavosql/cmd/serve/serve.go new file mode 100644 index 0000000..9232af3 --- /dev/null +++ b/cmd/pavosql/cmd/serve/serve.go @@ -0,0 +1,24 @@ +package serve + +import ( + "github.com/spf13/cobra" +) + +var ( + port uint16 +) + +func Command() *cobra.Command { + var serveCmd = &cobra.Command{ + Use: "serve", + Short: "", + Long: "", + Run: func(cmd *cobra.Command, args []string) { + // TODO: start server + }, + } + + serveCmd.Flags().Uint16VarP(&port, "port", "p", 6677, "") + + return serveCmd +} diff --git a/cmd/pavosql/cmd/version/version.go b/cmd/pavosql/cmd/version/version.go new file mode 100644 index 0000000..bec728a --- /dev/null +++ b/cmd/pavosql/cmd/version/version.go @@ -0,0 +1,18 @@ +package version + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func Command() *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "", + Long: "", + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("v0.0.0") + }, + } +} diff --git a/cmd/pavosql/main.go b/cmd/pavosql/main.go old mode 100644 new mode 100755 index 38dd16d..204ca64 --- a/cmd/pavosql/main.go +++ b/cmd/pavosql/main.go @@ -1,3 +1,30 @@ +/* +MIT License + +# Copyright (c) 2023 Georgios Kitsikoudis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ package main -func main() {} +import "github.com/gkits/pavosql/cmd/pavosql/cmd" + +func main() { + cmd.Execute() +} diff --git a/docs.go b/docs.go new file mode 100755 index 0000000..631787c --- /dev/null +++ b/docs.go @@ -0,0 +1,24 @@ +/* +MIT License + +# Copyright (c) 2023 Georgios Kitsikoudis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ +package pavosql diff --git a/docs/archetypes/default.md b/docs/archetypes/default.md deleted file mode 100644 index 00e77bd..0000000 --- a/docs/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -date: {{ .Date }} -draft: true ---- - diff --git a/docs/assets/_custom.css b/docs/assets/_custom.css deleted file mode 100644 index 4f829af..0000000 --- a/docs/assets/_custom.css +++ /dev/null @@ -1,2 +0,0 @@ -@import "plugins/_numberd.sccs" -@import "plugins/_scrollbar.sccs" diff --git a/docs/content/_index.md b/docs/content/_index.md deleted file mode 100644 index a01f933..0000000 --- a/docs/content/_index.md +++ /dev/null @@ -1,3 +0,0 @@ -+++ -+++ -# PavoSQL diff --git a/docs/content/docs/docs/_index.md b/docs/content/docs/docs/_index.md deleted file mode 100644 index 4582178..0000000 --- a/docs/content/docs/docs/_index.md +++ /dev/null @@ -1,8 +0,0 @@ -+++ -Title = "Docs" -Weight = 2 -bookFlatSection = true -draft = true -+++ - -# Documentation diff --git a/docs/content/docs/quickstart/_index.md b/docs/content/docs/quickstart/_index.md deleted file mode 100644 index 728502b..0000000 --- a/docs/content/docs/quickstart/_index.md +++ /dev/null @@ -1,6 +0,0 @@ -+++ -Title = "Getting started" -Weight = 1 -bookCollapseSection = true -draft = true -+++ diff --git a/docs/content/docs/quickstart/installation.md b/docs/content/docs/quickstart/installation.md deleted file mode 100644 index 017260a..0000000 --- a/docs/content/docs/quickstart/installation.md +++ /dev/null @@ -1,14 +0,0 @@ -+++ -Title = "Installation" -+++ - -# Installation - -{{< tabs "installos" >}} -{{< tab "Linux" >}} -## Linux -{{< /tab >}} -{{< tab "Windows" >}} -## Windows -{{< /tab >}} -{{< /tabs >}} diff --git a/docs/content/docs/quickstart/whatis.md b/docs/content/docs/quickstart/whatis.md deleted file mode 100644 index 807e201..0000000 --- a/docs/content/docs/quickstart/whatis.md +++ /dev/null @@ -1,11 +0,0 @@ -+++ -Title = "What is PavoSQL?" -Weight = 1 -bookToc = false -+++ -# PavoSQL - -[PavoSQLLogo](/PavoSQL.png) - -PavoSQL (or Pavo for short) is a simple Database engine and management system -written completely in vanilla Go. diff --git a/docs/content/docs/syntax/_index.md b/docs/content/docs/syntax/_index.md deleted file mode 100644 index 690cd42..0000000 --- a/docs/content/docs/syntax/_index.md +++ /dev/null @@ -1,7 +0,0 @@ -+++ -Title = "SQL Syntax" -bookFlatSection = true -draft = true -+++ - -# Test diff --git a/docs/go.mod b/docs/go.mod deleted file mode 100644 index 985f690..0000000 --- a/docs/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/gKits/PavoSQL/docs - -go 1.21 - -require github.com/alex-shpak/hugo-book v0.0.0-20230808113920-3f1bcccbfb24 // indirect diff --git a/docs/go.sum b/docs/go.sum deleted file mode 100644 index 6801581..0000000 --- a/docs/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/alex-shpak/hugo-book v0.0.0-20230808113920-3f1bcccbfb24 h1:8NjMYBSFTtBLeT1VmpZAZznPOt1OH8aNCnE86sL4p4k= -github.com/alex-shpak/hugo-book v0.0.0-20230808113920-3f1bcccbfb24/go.mod h1:L4NMyzbn15fpLIpmmtDg9ZFFyTZzw87/lk7M2bMQ7ds= diff --git a/docs/hugo.toml b/docs/hugo.toml deleted file mode 100644 index 05778e0..0000000 --- a/docs/hugo.toml +++ /dev/null @@ -1,34 +0,0 @@ -baseURL = "https://gkits.github.io/PavoSQL" -languageCode = "en-us" -title = "PavoSQL" - -enableGitInfo = true -disablePathToLower = true - -[markup] -[markup.goldmark.renderer] -unsafe = true -[markup.tableOfContents] -startLevel = 1 - -[menu] -[[menu.after]] -name = "Github" -url = "https://github.com/gKits/PavoSQL" -weight = 10 - -[module] -[[module.imports]] -path = "github.com/alex-shpak/hugo-book" - - -[params] -favicon = "favicon.ico" -BookTheme = "dark" -BookToc = true -BookSection = "docs" -BookLogo = "PavoSQL.png" -BookRepo = "github.com/gKits/PavoSQL" -BookCommitPath = "commit" -[params.meta] -favicon = false diff --git a/docs/resources/_gen/assets/scss/book.scss_e129fe35b8d0a70789c8a08429469073.content b/docs/resources/_gen/assets/scss/book.scss_e129fe35b8d0a70789c8a08429469073.content deleted file mode 100644 index 33ac19a..0000000 --- a/docs/resources/_gen/assets/scss/book.scss_e129fe35b8d0a70789c8a08429469073.content +++ /dev/null @@ -1 +0,0 @@ -@charset "UTF-8";:root{--gray-100:rgba(255, 255, 255, 0.1);--gray-200:rgba(255, 255, 255, 0.2);--gray-500:rgba(255, 255, 255, 0.5);--color-link:#84b2ff;--color-visited-link:#b88dff;--body-background:#343a40;--body-font-color:#e9ecef;--icon-filter:brightness(0) invert(1);--hint-color-info:#6bf;--hint-color-warning:#fd6;--hint-color-danger:#f66}/*!normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css*/html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button::-moz-focus-inner,[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner{border-style:none;padding:0}button:-moz-focusring,[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}.flex{display:flex}.flex-auto{flex:auto}.flex-even{flex:1 1}.flex-wrap{flex-wrap:wrap}.justify-start{justify-content:flex-start}.justify-end{justify-content:flex-end}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.align-center{align-items:center}.mx-auto{margin:0 auto}.text-center{text-align:center}.text-left{text-align:left}.text-right{text-align:right}.hidden{display:none}input.toggle{height:0;width:0;overflow:hidden;opacity:0;position:absolute}.clearfix::after{content:"";display:table;clear:both}html{font-size:16px;scroll-behavior:smooth;touch-action:manipulation}body{min-width:20rem;color:var(--body-font-color);background:var(--body-background);letter-spacing:.33px;font-weight:400;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;box-sizing:border-box}body *{box-sizing:inherit}h1,h2,h3,h4,h5{font-weight:400}a{text-decoration:none;color:var(--color-link)}img{vertical-align:baseline}:focus{outline-style:auto;outline-color:currentColor;outline-color:-webkit-focus-ring-color}aside nav ul{padding:0;margin:0;list-style:none}aside nav ul li{margin:1em 0;position:relative}aside nav ul a{display:block}aside nav ul a:hover{opacity:.5}aside nav ul ul{padding-inline-start:1rem}ul.pagination{display:flex;justify-content:center;list-style-type:none;padding-inline-start:0}ul.pagination .page-item a{padding:1rem}.container{max-width:80rem;margin:0 auto}.book-icon{filter:var(--icon-filter)}.book-brand{margin-top:0;margin-bottom:1rem}.book-brand img{height:1.5em;width:1.5em;margin-inline-end:.5rem}.book-menu{flex:0 0 16rem;font-size:.875rem}.book-menu .book-menu-content{width:16rem;padding:1rem;background:var(--body-background);position:fixed;top:0;bottom:0;overflow-x:hidden;overflow-y:auto}.book-menu a,.book-menu label{color:inherit;cursor:pointer;word-wrap:break-word}.book-menu a.active{color:var(--color-link)}.book-menu input.toggle+label+ul{display:none}.book-menu input.toggle:checked+label+ul{display:block}.book-menu input.toggle+label::after{content:"â–¸"}.book-menu input.toggle:checked+label::after{content:"â–¾"}body[dir=rtl] .book-menu input.toggle+label::after{content:"â—‚"}body[dir=rtl] .book-menu input.toggle:checked+label::after{content:"â–¾"}.book-section-flat{margin:2rem 0}.book-section-flat>a,.book-section-flat>span,.book-section-flat>label{font-weight:bolder}.book-section-flat>ul{padding-inline-start:0}.book-page{min-width:20rem;flex-grow:1;padding:1rem}.book-post{margin-bottom:3rem}.book-header{display:none;margin-bottom:1rem}.book-header label{line-height:0}.book-header img.book-icon{height:1.5em;width:1.5em}.book-search{position:relative;margin:1rem 0;border-bottom:1px solid transparent}.book-search input{width:100%;padding:.5rem;border:0;border-radius:.25rem;background:var(--gray-100);color:var(--body-font-color)}.book-search input:required+.book-search-spinner{display:block}.book-search .book-search-spinner{position:absolute;top:0;margin:.5rem;margin-inline-start:calc(100% - 1.5rem);width:1rem;height:1rem;border:1px solid transparent;border-top-color:var(--body-font-color);border-radius:50%;animation:spin 1s ease infinite}@keyframes spin{100%{transform:rotate(360deg)}}.book-search small{opacity:.5}.book-toc{flex:0 0 16rem;font-size:.75rem}.book-toc .book-toc-content{width:16rem;padding:1rem;position:fixed;top:0;bottom:0;overflow-x:hidden;overflow-y:auto}.book-toc img{height:1em;width:1em}.book-toc nav>ul>li:first-child{margin-top:0}.book-footer{padding-top:1rem;font-size:.875rem}.book-footer img{height:1em;width:1em;margin-inline-end:.5rem}.book-comments{margin-top:1rem}.book-languages{margin-block-end:2rem}.book-languages .book-icon{height:1em;width:1em;margin-inline-end:.5em}.book-languages ul{padding-inline-start:1.5em}.book-menu-content,.book-toc-content,.book-page,.book-header aside,.markdown{transition:.2s ease-in-out;transition-property:transform,margin,opacity,visibility;will-change:transform,margin,opacity}@media screen and (max-width:56rem){#menu-control,#toc-control{display:inline}.book-menu{visibility:hidden;margin-inline-start:-16rem;font-size:16px;z-index:1}.book-toc{display:none}.book-header{display:block}#menu-control:focus~main label[for=menu-control]{outline-style:auto;outline-color:currentColor;outline-color:-webkit-focus-ring-color}#menu-control:checked~main .book-menu{visibility:initial}#menu-control:checked~main .book-menu .book-menu-content{transform:translateX(16rem);box-shadow:0 0 .5rem rgba(0,0,0,.1)}#menu-control:checked~main .book-page{opacity:.25}#menu-control:checked~main .book-menu-overlay{display:block;position:absolute;top:0;bottom:0;left:0;right:0}#toc-control:focus~main label[for=toc-control]{outline-style:auto;outline-color:currentColor;outline-color:-webkit-focus-ring-color}#toc-control:checked~main .book-header aside{display:block}body[dir=rtl] #menu-control:checked~main .book-menu .book-menu-content{transform:translateX(-16rem)}}@media screen and (min-width:80rem){.book-page,.book-menu .book-menu-content,.book-toc .book-toc-content{padding:2rem 1rem}}@font-face{font-family:roboto;font-style:normal;font-weight:400;font-display:swap;src:local(""),url(fonts/roboto-v27-latin-regular.woff2)format("woff2"),url(fonts/roboto-v27-latin-regular.woff)format("woff")}@font-face{font-family:roboto;font-style:normal;font-weight:700;font-display:swap;src:local(""),url(fonts/roboto-v27-latin-700.woff2)format("woff2"),url(fonts/roboto-v27-latin-700.woff)format("woff")}@font-face{font-family:roboto mono;font-style:normal;font-weight:400;font-display:swap;src:local(""),url(fonts/roboto-mono-v13-latin-regular.woff2)format("woff2"),url(fonts/roboto-mono-v13-latin-regular.woff)format("woff")}body{font-family:roboto,sans-serif}code{font-family:roboto mono,monospace}@media print{.book-menu,.book-footer,.book-toc{display:none}.book-header,.book-header aside{display:block}main{display:block!important}}.markdown{line-height:1.6}.markdown>:first-child{margin-top:0}.markdown h1,.markdown h2,.markdown h3,.markdown h4,.markdown h5,.markdown h6{font-weight:400;line-height:1;margin-top:1.5em;margin-bottom:1rem}.markdown h1 a.anchor,.markdown h2 a.anchor,.markdown h3 a.anchor,.markdown h4 a.anchor,.markdown h5 a.anchor,.markdown h6 a.anchor{opacity:0;font-size:.75em;vertical-align:middle;text-decoration:none}.markdown h1:hover a.anchor,.markdown h1 a.anchor:focus,.markdown h2:hover a.anchor,.markdown h2 a.anchor:focus,.markdown h3:hover a.anchor,.markdown h3 a.anchor:focus,.markdown h4:hover a.anchor,.markdown h4 a.anchor:focus,.markdown h5:hover a.anchor,.markdown h5 a.anchor:focus,.markdown h6:hover a.anchor,.markdown h6 a.anchor:focus{opacity:initial}.markdown h4,.markdown h5,.markdown h6{font-weight:bolder}.markdown h5{font-size:.875em}.markdown h6{font-size:.75em}.markdown b,.markdown optgroup,.markdown strong{font-weight:bolder}.markdown a{text-decoration:none}.markdown a:hover{text-decoration:underline}.markdown a:visited{color:var(--color-visited-link)}.markdown img{max-width:100%;height:auto}.markdown code{padding:0 .25rem;background:var(--gray-200);border-radius:.25rem;font-size:.875em}.markdown pre{padding:1rem;background:var(--gray-100);border-radius:.25rem;overflow-x:auto}.markdown pre code{padding:0;background:0 0}.markdown p{word-wrap:break-word}.markdown blockquote{margin:1rem 0;padding:.5rem 1rem .5rem .75rem;border-inline-start:.25rem solid var(--gray-200);border-radius:.25rem}.markdown blockquote :first-child{margin-top:0}.markdown blockquote :last-child{margin-bottom:0}.markdown table{overflow:auto;display:block;border-spacing:0;border-collapse:collapse;margin-top:1rem;margin-bottom:1rem}.markdown table tr th,.markdown table tr td{padding:.5rem 1rem;border:1px solid var(--gray-200)}.markdown table tr:nth-child(2n){background:var(--gray-100)}.markdown hr{height:1px;border:none;background:var(--gray-200)}.markdown ul,.markdown ol{padding-inline-start:2rem;word-wrap:break-word}.markdown dl dt{font-weight:bolder;margin-top:1rem}.markdown dl dd{margin-inline-start:0;margin-bottom:1rem}.markdown .highlight table tr td:nth-child(1) pre{margin:0;padding-inline-end:0}.markdown .highlight table tr td:nth-child(2) pre{margin:0;padding-inline-start:0}.markdown details{padding:1rem;border:1px solid var(--gray-200);border-radius:.25rem}.markdown details summary{line-height:1;padding:1rem;margin:-1rem;cursor:pointer}.markdown details[open] summary{margin-bottom:0}.markdown figure{margin:1rem 0}.markdown figure figcaption p{margin-top:0}.markdown-inner>:first-child{margin-top:0}.markdown-inner>:last-child{margin-bottom:0}.markdown .book-expand{margin-top:1rem;margin-bottom:1rem;border:1px solid var(--gray-200);border-radius:.25rem;overflow:hidden}.markdown .book-expand .book-expand-head{background:var(--gray-100);padding:.5rem 1rem;cursor:pointer}.markdown .book-expand .book-expand-content{display:none;padding:1rem}.markdown .book-expand input[type=checkbox]:checked+.book-expand-content{display:block}.markdown .book-tabs{margin-top:1rem;margin-bottom:1rem;border:1px solid var(--gray-200);border-radius:.25rem;overflow:hidden;display:flex;flex-wrap:wrap}.markdown .book-tabs label{display:inline-block;padding:.5rem 1rem;border-bottom:1px transparent;cursor:pointer}.markdown .book-tabs .book-tabs-content{order:999;width:100%;border-top:1px solid var(--gray-100);padding:1rem;display:none}.markdown .book-tabs input[type=radio]:checked+label{border-bottom:1px solid var(--color-link)}.markdown .book-tabs input[type=radio]:checked+label+.book-tabs-content{display:block}.markdown .book-tabs input[type=radio]:focus+label{outline-style:auto;outline-color:currentColor;outline-color:-webkit-focus-ring-color}.markdown .book-columns{margin-left:-1rem;margin-right:-1rem}.markdown .book-columns>div{margin:1rem 0;min-width:10rem;padding:0 1rem}.markdown a.book-btn{display:inline-block;font-size:.875rem;color:var(--color-link);line-height:2rem;padding:0 1rem;border:1px solid var(--color-link);border-radius:.25rem;cursor:pointer}.markdown a.book-btn:hover{text-decoration:none}.markdown .book-hint.info{border-color:#6bf;background-color:rgba(102,187,255,.1)}.markdown .book-hint.warning{border-color:#fd6;background-color:rgba(255,221,102,.1)}.markdown .book-hint.danger{border-color:#f66;background-color:rgba(255,102,102,.1)} \ No newline at end of file diff --git a/docs/resources/_gen/assets/scss/book.scss_e129fe35b8d0a70789c8a08429469073.json b/docs/resources/_gen/assets/scss/book.scss_e129fe35b8d0a70789c8a08429469073.json deleted file mode 100644 index 8b302b4..0000000 --- a/docs/resources/_gen/assets/scss/book.scss_e129fe35b8d0a70789c8a08429469073.json +++ /dev/null @@ -1 +0,0 @@ -{"Target":"book.min.4f0117e74e5337280f18eb9641eae520cb4b25adcf5dd64fafad4664145a5957.css","MediaType":"text/css","Data":{"Integrity":"sha256-TwEX505TNygPGOuWQerlIMtLJa3PXdZPr61GZBRaWVc="}} \ No newline at end of file diff --git a/docs/static/PavoSQL.png b/docs/static/PavoSQL.png deleted file mode 100644 index b1ad75a..0000000 Binary files a/docs/static/PavoSQL.png and /dev/null differ diff --git a/docs/static/android-chrome-192x192.png b/docs/static/android-chrome-192x192.png deleted file mode 100644 index fb9b2f5..0000000 Binary files a/docs/static/android-chrome-192x192.png and /dev/null differ diff --git a/docs/static/android-chrome-512x512.png b/docs/static/android-chrome-512x512.png deleted file mode 100644 index 5d8a368..0000000 Binary files a/docs/static/android-chrome-512x512.png and /dev/null differ diff --git a/docs/static/apple-touch-icon.png b/docs/static/apple-touch-icon.png deleted file mode 100644 index b263193..0000000 Binary files a/docs/static/apple-touch-icon.png and /dev/null differ diff --git a/docs/static/favicon-16x16.png b/docs/static/favicon-16x16.png deleted file mode 100644 index 4cb7c2a..0000000 Binary files a/docs/static/favicon-16x16.png and /dev/null differ diff --git a/docs/static/favicon-32x32.png b/docs/static/favicon-32x32.png deleted file mode 100644 index 62b6fb2..0000000 Binary files a/docs/static/favicon-32x32.png and /dev/null differ diff --git a/docs/static/favicon.ico b/docs/static/favicon.ico deleted file mode 100644 index 8c5582c..0000000 Binary files a/docs/static/favicon.ico and /dev/null differ diff --git a/docs/static/site.webmanifest b/docs/static/site.webmanifest deleted file mode 100644 index 45dc8a2..0000000 --- a/docs/static/site.webmanifest +++ /dev/null @@ -1 +0,0 @@ -{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} \ No newline at end of file diff --git a/go.mod b/go.mod old mode 100644 new mode 100755 index dd1a8b6..e765c3c --- a/go.mod +++ b/go.mod @@ -1,3 +1,10 @@ -module github.com/gKits/PavoSQL +module github.com/gkits/pavosql -go 1.21 +go 1.24 + +require github.com/spf13/cobra v1.9.1 + +require ( + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/spf13/pflag v1.0.6 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..ffae55e --- /dev/null +++ b/go.sum @@ -0,0 +1,10 @@ +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/btree/btree.go b/internal/btree/btree.go deleted file mode 100644 index 3ce8c35..0000000 --- a/internal/btree/btree.go +++ /dev/null @@ -1,547 +0,0 @@ -package btree - -import ( - "errors" - "fmt" -) - -type getFunc func(uint64) ([]byte, error) -type pullFunc func(uint64) ([]byte, error) -type allocFunc func([]byte) (uint64, error) -type freeFunc func(uint64) error - -type BTree struct { - Root uint64 - pgSize int - get func(uint64) (node, error) - pull func(uint64) (node, error) - alloc func(node) (uint64, error) - free func(uint64) error - readOnly bool -} - -func NewReadOnly(root uint64, pgSize int, get getFunc) BTree { - return BTree{ - Root: root, - pgSize: pgSize, - get: func(ptr uint64) (node, error) { - d, err := get(ptr) - if err != nil { - return nil, err - } - return decodeNode(d) - }, - readOnly: true, - } - -} - -func New( - root uint64, pgSize int, - get getFunc, pull pullFunc, alloc allocFunc, free freeFunc, -) BTree { - return BTree{ - Root: root, - pgSize: pgSize, - get: func(ptr uint64) (node, error) { - d, err := get(ptr) - if err != nil { - return nil, err - } - return decodeNode(d) - }, - pull: func(ptr uint64) (node, error) { - d, err := pull(ptr) - if err != nil { - return nil, err - } - return decodeNode(d) - }, - alloc: func(n node) (uint64, error) { - return alloc(n.Encode()) - }, - free: free, - readOnly: false, - } -} - -func (bt *BTree) Get(k []byte) ([]byte, error) { - errMsg := "btree: cannot get key: %v" - - if bt.Root == 0 { - return nil, fmt.Errorf(errMsg, "tree is empty") - } - - root, err := bt.get(bt.Root) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - _, v, err := bt.bTreeGet(root, k) - return v, err -} - -func (bt *BTree) Set(k, v []byte) (err error) { - if bt.readOnly { - return fmt.Errorf("btree: set operation not allow on read only tree") - } - errMsg := "btree: cannot set key: %v" - - if bt.Root == 0 { - root := leafNode{} - root, err := root.Insert(0, k, v) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - bt.Root, err = bt.alloc(root) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - return nil - } - - root, err := bt.pull(bt.Root) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - inserted, err := bt.bTreeInsert(root, k, v) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - if inserted.Size() > bt.pgSize { - insertedPtr, err := bt.alloc(inserted) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - k, err := inserted.Key(0) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - t := pointerNode{} - t.Insert(0, k, insertedPtr) - - inserted, err = bt.splitChildPtr(0, t, inserted) - if err != nil { - return fmt.Errorf(errMsg, err) - } - - } - - bt.Root, err = bt.alloc(inserted) - return fmt.Errorf(errMsg, err) -} - -func (bt *BTree) Delete(k []byte) (bool, error) { - if bt.readOnly { - return false, fmt.Errorf("btree: delete operation not allow on read only tree") - } - errMsg := "btree: cannot delete key: %v" - - if bt.Root == 0 { - return false, fmt.Errorf(errMsg, "tree is empty") - } - - root, err := bt.pull(bt.Root) - if err != nil { - return false, fmt.Errorf(errMsg, err) - } - - var deleted bool - root, deleted, err = bt.bTreeDelete(root, k) - if err != nil { - return false, fmt.Errorf(errMsg, err) - } - - if !deleted { - return deleted, nil - } - - if root.Type() == btreePointer && root.Total() == 1 { - pntrRoot := root.(pointerNode) - bt.Root, _ = pntrRoot.Ptr(0) - } else { - bt.Root, err = bt.alloc(root) - if err != nil { - return false, fmt.Errorf(errMsg, err) - } - } - - return true, nil -} - -func (bt *BTree) bTreeGet(n node, k []byte) (node, []byte, error) { - i, exists := n.Search(k) - - switch n.Type() { - case btreeLeaf: - leafN := n.(leafNode) - - if !exists { - return nil, nil, errors.New("key does not exist") - } - - v, err := leafN.Val(i) - if err != nil { - return nil, nil, err - } - return n, v, nil - - case btreePointer: - pntrN := n.(pointerNode) - - ptr, _ := pntrN.Ptr(i) - child, err := bt.get(ptr) - if err != nil { - return nil, nil, err - } - - return bt.bTreeGet(child, k) - default: - return nil, nil, errors.New("invalid node type") - } -} - -func (bt *BTree) bTreeInsert(n node, k, v []byte) (node, error) { - i, exists := n.Search(k) - - var err error - var inserted node - - switch n.Type() { - case btreeLeaf: - leafN := n.(leafNode) - - if !exists { - inserted, err = leafN.Insert(i, k, v) - if err != nil { - return nil, err - } - } else { - inserted, err = leafN.Update(i, k, v) - if err != nil { - return nil, err - } - } - - case btreePointer: - pntrN := n.(pointerNode) - - ptr, _ := pntrN.Ptr(i) - child, err := bt.get(ptr) - if err != nil { - return nil, err - } - - inserted, err = bt.bTreeInsert(child, k, v) - if err != nil { - return nil, err - } - - inserted, err = bt.splitChildPtr(i, pntrN, inserted) - if err != nil { - return nil, err - } - - inPtr, err := bt.alloc(inserted) - if err != nil { - return nil, err - } - - ptrKey, _ := pntrN.Key(i) - inserted, err = pntrN.Update(i, ptrKey, inPtr) - if err != nil { - return nil, err - } - - default: - return nil, errors.New("invalid node type") - } - - return inserted, nil -} - -func (bt *BTree) bTreeDelete(n node, k []byte) (node, bool, error) { - i, exists := n.Search(k) - - var err error - var deleted node - - switch n.Type() { - case btreeLeaf: - if !exists { - return nil, false, errors.New("key does not exist") - } - - leafN := n.(leafNode) - deleted, err = leafN.Delete(i) - if err != nil { - return nil, false, err - } - - case btreePointer: - n := n.(pointerNode) - - ptr, _ := n.Ptr(i) - child, err := bt.pull(ptr) - if err != nil { - return nil, false, err - } - - deleted, _, err = bt.bTreeDelete(child, k) - if err != nil { - return nil, false, err - } - - if deleted.Size() > bt.pgSize/4 { - deleted, err = bt.mergeChildPtr(i, n, deleted) - if err != nil { - return nil, false, err - } - } else { - deletedPtr, err := bt.alloc(deleted) - if err != nil { - return nil, false, err - } - - k, _ := deleted.Key(i) - deleted, err = n.Update(i, k, deletedPtr) - if err != nil { - return nil, false, err - } - } - - default: - return nil, false, errors.New("invalid node type") - } - - return deleted, true, nil -} - -func (bt *BTree) splitChildPtr(i int, par pointerNode, child node) (node, error) { - if !bt.shouldSplit(child) { - return par, nil - } - - var ( - lPtr uint64 - rPtr uint64 - lKey []byte - rKey []byte - err error - errMsg = "cannot split child ptr: %v" - ) - - switch child.Type() { - case btreePointer: - ptrChild := child.(pointerNode) - - l, r := ptrChild.Split() - - lKey = l.keys[0] - rKey = r.keys[0] - - lPtr, err = bt.alloc(l) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - rPtr, err = bt.alloc(r) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - break - - case btreeLeaf: - leafChild := child.(pointerNode) - - l, r := leafChild.Split() - - lKey = l.keys[0] - rKey = r.keys[0] - - lPtr, err = bt.alloc(l) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - rPtr, err = bt.alloc(r) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - break - - default: - return nil, fmt.Errorf(errMsg, "child has invalid type") - } - - par, err = par.Update(i, lKey, lPtr) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - par, err = par.Insert(i+1, rKey, rPtr) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - return par, nil -} - -func (bt *BTree) mergeChildPtr(i int, par pointerNode, child node) (node, error) { - var ( - leftSib node - rightSib node - merge uint8 = 0 - err error - errMsg = "cannot merge child pointer: %v" - ) - - if i > 0 { - leftSib, err = bt.get(par.ptrs[i-1]) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - if bt.canMerge2(child, leftSib) { - merge++ - } - } - - if i < par.Total()-1 { - rightSib, err = bt.get(par.ptrs[i+1]) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - if bt.canMerge2(child, rightSib) { - merge++ - } - } - - if merge == 2 { - if !bt.canMerge3(child, leftSib, rightSib) { - merge-- - } - } else if merge == 0 { - return par, nil - } - - switch child.Type() { - - case btreePointer: - pntrChild := child.(pointerNode) - - if merge >= 1 { - if rightSib != nil { - pntrRight := rightSib.(pointerNode) - pntrChild, err = pntrChild.Merge(pntrRight) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } else { - pntrLeft := rightSib.(pointerNode) - pntrChild, err = pntrChild.Merge(pntrLeft) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } - } - - if merge == 2 { - pntrLeft := rightSib.(pointerNode) - pntrChild, err = pntrChild.Merge(pntrLeft) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } - - child = pntrChild - - case btreeLeaf: - leafChild := child.(leafNode) - - if merge >= 1 { - if rightSib != nil { - leafRight := rightSib.(leafNode) - leafChild, err = leafChild.Merge(leafRight) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } else { - leafLeft := rightSib.(leafNode) - leafChild, err = leafChild.Merge(leafLeft) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } - } - - if merge == 2 { - leafLeft := rightSib.(leafNode) - leafChild, err = leafChild.Merge(leafLeft) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } - - child = leafChild - - default: - return nil, fmt.Errorf(errMsg, "child has invalid type") - } - - first, err := child.Key(0) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - ptr, err := bt.alloc(child) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - var j int - if merge > 1 { - j = i - 1 - par, err = par.Delete(i) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - if merge == 3 { - par, err = par.Delete(i + 1) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } - } else { - j = i - par, err = par.Delete(i + 1) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - } - - par, err = par.Update(j, first, ptr) - if err != nil { - return nil, fmt.Errorf(errMsg, err) - } - - return par, nil -} - -func (bt *BTree) canMerge2(a, b node) bool { - return a.Type() == b.Type() && a.Size()+b.Size() <= bt.pgSize -} - -func (bt *BTree) canMerge3(a, b, c node) bool { - return a.Type() == b.Type() && a.Type() == c.Type() && a.Size()+b.Size()+c.Size() <= bt.pgSize -} - -func (bt *BTree) shouldSplit(n node) bool { - return n.Size() > bt.pgSize -} diff --git a/internal/btree/btree_test.go b/internal/btree/btree_test.go deleted file mode 100644 index 0dd6948..0000000 --- a/internal/btree/btree_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package btree - -import ( - "bytes" - "testing" -) - -var mockStore = map[uint64]node{ - 0: nil, -} -var lastPtr uint64 = 0 - -func mockGetPage(ptr uint64) (node, error) { - n, ok := mockStore[ptr] - if !ok { - return nil, errKVBadPtr - } - return n, nil -} - -func mockPullPage(ptr uint64) (node, error) { - n, err := mockGetPage(ptr) - if err != nil { - return nil, err - } - if err := mockFreePage(ptr); err != nil { - return nil, err - } - - return n, nil -} - -func mockAllocPage(n node) (uint64, error) { - lastPtr++ - mockStore[lastPtr] = n - - return lastPtr, nil -} - -func mockFreePage(ptr uint64) error { - if _, ok := mockStore[ptr]; !ok { - return errKVBadPtr - } - - delete(mockStore, ptr) - return nil -} - -func TestBTreeGet(t *testing.T) { - cases := []struct { - name string - bt bTree - input []byte - expected []byte - expectedErr error - }{ - { - name: "Get first key", - bt: bTree{ - root: 0, - get: mockGetPage, - pull: mockPullPage, - alloc: mockAllocPage, - free: mockFreePage, - }, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - res, err := c.bt.Get(c.input) - - if err != c.expectedErr { - t.Errorf("") - } - - if !bytes.Equal(c.expected, res) { - t.Errorf("") - } - }) - } -} diff --git a/internal/btree/iterator.go b/internal/btree/iterator.go deleted file mode 100644 index f53ec8f..0000000 --- a/internal/btree/iterator.go +++ /dev/null @@ -1,42 +0,0 @@ -package btree - -type Iterator struct { - bt *BTree - path []int - nodes []node -} - -func (iter *Iterator) Next() bool { - depth := len(iter.path) - 1 - - if iter.path[depth]+1 >= iter.nodes[depth].Total() { - iter.path = iter.path[:depth] - iter.nodes = iter.nodes[:depth] - return iter.Next() - } - - switch iter.nodes[depth].Type() { - case btreePointer: - ptr := iter.nodes[depth].(pointerNode) - - next, err := iter.bt.get(ptr.ptrs[iter.path[depth]+1]) - if err != nil { - return false - } - - iter.nodes = append(iter.nodes, next) - iter.path = append(iter.path, -1) - return iter.Next() - - case btreeLeaf: - iter.path[len(iter.path)-1]++ - return true - } - - return false -} - -func (iter *Iterator) Read() (k []byte, v []byte) { - leaf := iter.nodes[len(iter.nodes)-1].(leafNode) - return leaf.keys[iter.path[len(iter.path)-1]], leaf.vals[iter.path[len(iter.path)-1]] -} diff --git a/internal/btree/iterator_test.go b/internal/btree/iterator_test.go deleted file mode 100644 index 3c38a25..0000000 --- a/internal/btree/iterator_test.go +++ /dev/null @@ -1 +0,0 @@ -package btree diff --git a/internal/btree/leafNode.go b/internal/btree/leafNode.go deleted file mode 100644 index ff29c30..0000000 --- a/internal/btree/leafNode.go +++ /dev/null @@ -1,160 +0,0 @@ -package btree - -import ( - "bytes" - "encoding/binary" - "fmt" - "slices" -) - -type leafNode struct { - keys [][]byte - vals [][]byte -} - -func DecodeLeaf(d []byte) (leafNode, error) { - ln := leafNode{} - - if nodeType(binary.BigEndian.Uint16(d[0:2])) != btreeLeaf { - return leafNode{}, fmt.Errorf("leafNode: cannot decode to leaf, wrong type identifier") - } - - nKeys := binary.BigEndian.Uint16(d[2:4]) - ln.keys = make([][]byte, nKeys) - ln.vals = make([][]byte, nKeys) - - off := uint16(4) - for i := 0; uint16(i) < nKeys; i++ { - kSize := binary.BigEndian.Uint16(d[off : off+2]) - vSize := binary.BigEndian.Uint16(d[off+2 : off+4]) - - ln.keys[i] = d[off+4 : off+4+kSize] - ln.vals[i] = d[off+4+kSize : off+4+kSize+vSize] - - off += 4 + kSize + vSize - } - - return ln, nil - -} - -func (ln leafNode) Type() nodeType { - return btreeLeaf -} - -func (ln leafNode) Total() int { - return len(ln.keys) -} - -func (ln leafNode) Size() int { - size := 4 - for i, k := range ln.keys { - v := ln.vals[i] - size += 4 + len(k) + len(v) - } - return size -} - -func (ln leafNode) Key(i int) ([]byte, error) { - if i < 0 || i >= len(ln.keys) { - return nil, fmt.Errorf("leafNode: key at index '%d' does not exist", i) - } - return ln.keys[i], nil -} - -func (ln *leafNode) Val(i int) ([]byte, error) { - if i < 0 || i >= len(ln.vals) { - return nil, fmt.Errorf("leafNode: val at index '%d' does not exist", i) - } - return ln.vals[i], nil -} - -func (ln leafNode) Insert(i int, k, v []byte) (newLn leafNode, err error) { - if i < 0 || i > len(ln.keys) || i > len(ln.vals) { - return leafNode{}, fmt.Errorf("leafNode: cannot insert at non existing index '%d'", i) - } - - newLn.keys = slices.Insert(ln.keys, i, k) - newLn.vals = slices.Insert(ln.vals, i, k) - - return ln, nil -} - -func (ln leafNode) Update(i int, k, v []byte) (newLn leafNode, err error) { - if i < 0 || i > len(ln.keys) || i > len(ln.vals) { - return leafNode{}, fmt.Errorf("leafNode: cannot update at non existing index '%d'", i) - } - - ln.keys[i] = k - ln.vals[i] = v - - return ln, nil -} - -func (ln leafNode) Delete(i int) (leafNode, error) { - if i < 0 || i > len(ln.keys) || i > len(ln.vals) { - return leafNode{}, fmt.Errorf("leafNode: cannot delete at non existing index '%d'", i) - } - - ln.keys = slices.Delete(ln.keys, i, i) - ln.vals = slices.Delete(ln.vals, i, i) - - return ln, nil -} - -func (ln leafNode) Search(k []byte) (int, bool) { - return slices.BinarySearchFunc(ln.keys, k, bytes.Compare) -} - -func (ln leafNode) Merge(right leafNode) (leafNode, error) { - if bytes.Compare(ln.keys[len(ln.keys)-1], right.keys[0]) >= 0 { - return leafNode{}, fmt.Errorf("leafNode: cannot merge, last key of left is GE first key of right node") - } - - ln.keys = append(ln.keys, right.keys...) - ln.vals = append(ln.vals, right.vals...) - - return ln, nil -} - -func (ln leafNode) Split() (leafNode, leafNode) { - var half int - var size int = 0 - lnSize := ln.Size() - - for i, k := range ln.keys { - v := ln.vals[i] - size += 4 + len(k) + len(v) - if size > lnSize/2 { - half = i - size -= 4 - len(k) - len(v) - break - } - } - - split := leafNode{ - keys: ln.keys[half:], - vals: ln.vals[half:], - } - - ln.keys = ln.keys[:half] - ln.vals = ln.vals[:half] - - return ln, split -} - -func (ln leafNode) Encode() []byte { - var b []byte - - b = binary.BigEndian.AppendUint16(b, uint16(btreeLeaf)) - b = binary.BigEndian.AppendUint16(b, uint16(len(ln.keys))) - for i, k := range ln.keys { - v := ln.vals[i] - b = binary.BigEndian.AppendUint16(b, uint16(len(k))) - b = binary.BigEndian.AppendUint16(b, uint16(len(v))) - b = append(b, k...) - b = append(b, v...) - } - - return b -} diff --git a/internal/btree/leafNode_test.go b/internal/btree/leafNode_test.go deleted file mode 100644 index 5bb3f94..0000000 --- a/internal/btree/leafNode_test.go +++ /dev/null @@ -1,495 +0,0 @@ -package btree - -import ( - "bytes" - "testing" -) - -func TestLeafNodeDecode(t *testing.T) { - cases := []struct { - name string - input []byte - expected *leafNode - expectedErr error - }{ - { - name: "Successful decoding", - input: []byte{ - 0x00, 0x01, // 1 is representation of lfNode nodeType - 0x00, 0x03, // nKeys is equal to 3 - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '1', 'v', 'a', 'l', '1', // first entry - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '2', 'v', 'a', 'l', '2', // second entry - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '3', 'v', 'a', 'l', '3', // third entry - }, - expected: &leafNode{ - keys: [][]byte{{'k', 'e', 'y', '1'}, {'k', 'e', 'y', '2'}, {'k', 'e', 'y', '3'}}, - vals: [][]byte{{'v', 'a', 'l', '1'}, {'v', 'a', 'l', '2'}, {'v', 'a', 'l', '3'}}, - }, - expectedErr: nil, - }, - { - name: "Failed decoding due to wrong nodeType bytes", - input: []byte{ - 0x00, 0x02, // 2 is not the representation of lfNode nodeType - 0x00, 0x03, - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '1', 'v', 'a', 'l', '1', - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '2', 'v', 'a', 'l', '2', - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '3', 'v', 'a', 'l', '3', - }, - expected: &leafNode{ - keys: [][]byte{}, - vals: [][]byte{}, - }, - expectedErr: errNodeDecode, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - ln := &leafNode{} - - err := ln.Decode(c.input) - - if err != c.expectedErr { - t.Errorf("Expected error %v, but got %v", c.expectedErr, err) - } - - if len(ln.keys) != len(c.expected.keys) { - t.Errorf("Expected %v keys, but got %v", len(c.expected.keys), len(ln.keys)) - - for i, exp := range c.expected.keys { - if !bytes.Equal(ln.keys[i], exp) { - t.Errorf("Expected key %v at index %v, but got %v", exp, i, ln.keys[i]) - } - } - } - - if len(ln.vals) != len(c.expected.vals) { - t.Errorf("Expected %v vals, but got %v", len(c.expected.vals), len(ln.vals)) - - for i, exp := range c.expected.vals { - if !bytes.Equal(ln.vals[i], exp) { - t.Errorf("Expected val %v at index %v, but got %v", exp, i, ln.vals[i]) - } - } - } - }) - } -} - -func TestLeafNodeTyp(t *testing.T) { - ln := &leafNode{} - - typ := ln.Type() - if typ != lfNode { - t.Errorf("Expected type %v, but got %v", lfNode, typ) - } -} - -func TestLeafNodeEncode(t *testing.T) { - cases := []struct { - name string - input *leafNode - expected []byte - }{ - { - name: "Successful encoding", - input: &leafNode{ - keys: [][]byte{{'k', 'e', 'y', '1'}, {'k', 'e', 'y', '2'}, {'k', 'e', 'y', '3'}}, - vals: [][]byte{{'v', 'a', 'l', '1'}, {'v', 'a', 'l', '2'}, {'v', 'a', 'l', '3'}}, - }, - expected: []byte{ - 0x00, 0x01, // 1 is representation of lfNode nodeType - 0x00, 0x03, // nKeys is equal to 3 - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '1', 'v', 'a', 'l', '1', // first entry - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '2', 'v', 'a', 'l', '2', // second entry - 0x00, 0x04, 0x00, 0x04, 'k', 'e', 'y', '3', 'v', 'a', 'l', '3', // third entry - }, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - res := c.input.Encode() - - if !bytes.Equal(res, c.expected) { - t.Errorf("Expected %v, but got %v", c.expected, res) - } - }) - } -} - -func TestLeafNodeSize(t *testing.T) { - cases := []struct { - name string - input *leafNode - expected int - }{ - { - name: "Size calculation", - input: &leafNode{ - keys: [][]byte{{'k', 'e', 'y', '1'}, {'k', 'e', 'y', '2'}, {'k', 'e', 'y', '3'}}, - vals: [][]byte{{'v', 'a', 'l', '1'}, {'v', 'a', 'l', '2'}, {'v', 'a', 'l', '3'}}, - }, - expected: 40, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - res := c.input.Size() - - if res != c.expected { - t.Errorf("Expected node size %v, but got %v", c.expected, res) - } - }) - } -} - -func TestLeafNodeKey(t *testing.T) { - ln := &leafNode{ - keys: [][]byte{{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}}, - vals: [][]byte{{'1'}, {'2'}, {'3'}, {'4'}, {'5'}, {'6'}}, - } - - cases := []struct { - name string - input int - expected []byte - expectedErr error - }{ - { - name: "Key at index 0", - input: 0, - expected: []byte{'a'}, - expectedErr: nil, - }, - { - name: "Key at last index", - input: 5, - expected: []byte{'f'}, - expectedErr: nil, - }, - { - name: "Too large key", - input: 6, - expected: nil, - expectedErr: errNodeIdx, - }, - { - name: "Negative key", - input: -1, - expected: nil, - expectedErr: errNodeIdx, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - res, err := ln.Key(c.input) - - if err != c.expectedErr { - t.Errorf("Expected error %v, but got %v", c.expectedErr, err) - } - - if !bytes.Equal(res, c.expected) { - t.Errorf("Expected key %v, but got %v", c.expected, res) - } - }) - } -} - -func TestLeafNodeSearch(t *testing.T) { - cases := []struct { - name string - input []byte - ln *leafNode - expected int - expectedExists bool - }{ - { - name: "Search before first key in odd amount of keys", - input: []byte{'a'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}}, - vals: [][]byte{{}, {}, {}, {}, {}}, - }, - expected: 0, - expectedExists: false, - }, - { - name: "Search before first key in even amount of keys", - input: []byte{'a'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}, {'l'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expected: 0, - expectedExists: false, - }, - { - name: "Search after last key in odd amount of keys", - input: []byte{'k'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}}, - vals: [][]byte{{}, {}, {}, {}, {}}, - }, - expected: 5, - expectedExists: false, - }, - { - name: "Search after last key in even amount of keys", - input: []byte{'m'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}, {'l'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expected: 6, - expectedExists: false, - }, - { - name: "Search existing key in odd ammount of keys", - input: []byte{'h'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}}, - vals: [][]byte{{}, {}, {}, {}, {}}, - }, - expected: 3, - expectedExists: true, - }, - { - name: "Search existing key in even ammount of keys", - input: []byte{'j'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}, {'l'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expected: 4, - expectedExists: true, - }, - { - name: "Search non-existing key in odd ammount of keys", - input: []byte{'c'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}}, - vals: [][]byte{{}, {}, {}, {}, {}}, - }, - expected: 1, - expectedExists: false, - }, - { - name: "Search non-existing key in even ammount of keys", - input: []byte{'c'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}, {'l'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expected: 1, - expectedExists: false, - }, - { - name: "Search first key in odd ammount of keys", - input: []byte{'b'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}}, - vals: [][]byte{{}, {}, {}, {}, {}}, - }, - expected: 0, - expectedExists: true, - }, - { - name: "Search first key in even ammount of keys", - input: []byte{'b'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}, {'l'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expected: 0, - expectedExists: true, - }, - { - name: "Search last key in odd ammount of keys", - input: []byte{'j'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}}, - vals: [][]byte{{}, {}, {}, {}, {}}, - }, - expected: 4, - expectedExists: true, - }, - { - name: "Search last key in even ammount of keys", - input: []byte{'l'}, - ln: &leafNode{ - keys: [][]byte{{'b'}, {'d'}, {'f'}, {'h'}, {'j'}, {'l'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expected: 5, - expectedExists: true, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - res, exists := c.ln.Search(c.input) - - if exists != c.expectedExists { - t.Errorf("Expected the key existing %v, but got %v", c.expectedExists, exists) - } - - if res != c.expected { - t.Errorf("Expected index %v, but got %v", c.expected, res) - } - }) - } -} - -func TestLeafNodeMerge(t *testing.T) { - cases := []struct { - name string - left *leafNode - right *leafNode - expected *leafNode - expectedErr error - }{ - { - name: "Successful merge", - left: &leafNode{ - keys: [][]byte{{'a'}, {'b'}, {'c'}}, - vals: [][]byte{{}, {}, {}}, - }, - right: &leafNode{ - keys: [][]byte{{'d'}, {'e'}, {'f'}}, - vals: [][]byte{{}, {}, {}}, - }, - expected: &leafNode{ - keys: [][]byte{{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}}, - vals: [][]byte{{}, {}, {}, {}, {}, {}}, - }, - expectedErr: nil, - }, - { - name: "Failed merge due to non-ordered keys", - left: &leafNode{ - keys: [][]byte{{'a'}, {'b'}, {'d'}}, - vals: [][]byte{{}, {}, {}}, - }, - right: &leafNode{ - keys: [][]byte{{'c'}, {'e'}, {'f'}}, - vals: [][]byte{{}, {}, {}}, - }, - expected: nil, - expectedErr: errNodeMerge, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - res, err := c.left.Merge(c.right) - - if c.expectedErr != err { - t.Errorf("Expected error %v, but got %v", c.expectedErr, err) - } - - if res != nil && c.expected != nil { - ln := res.(*leafNode) - - if len(ln.keys) != len(c.expected.keys) { - t.Errorf("Expected %v keys, but got %v", len(c.expected.keys), len(ln.keys)) - - for i, exp := range c.expected.keys { - if !bytes.Equal(ln.keys[i], exp) { - t.Errorf("Expected key %v at index %v, but got %v", exp, i, ln.keys[i]) - } - } - } - - if len(ln.vals) != len(c.expected.vals) { - t.Errorf("Expected %v vals, but got %v", len(c.expected.vals), len(ln.vals)) - - for i, exp := range c.expected.vals { - if !bytes.Equal(ln.vals[i], exp) { - t.Errorf("Expected val %v at index %v, but got %v", exp, i, ln.vals[i]) - } - } - } - } else if res == nil && c.expected != nil || res != nil && c.expected == nil { - t.Errorf("Expected %v, but got %v", c.expected, res) - } - }) - } -} - -func TestLeafNodeSplit(t *testing.T) { - cases := []struct { - name string - ln *leafNode - left *leafNode - right *leafNode - }{ - { - name: "Split even number of same size", - ln: &leafNode{ - keys: [][]byte{{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}}, - vals: [][]byte{{'1'}, {'2'}, {'3'}, {'4'}, {'5'}, {'6'}}, - }, - left: &leafNode{ - keys: [][]byte{{'a'}, {'b'}, {'c'}}, - vals: [][]byte{{'1'}, {'2'}, {'3'}}, - }, - right: &leafNode{ - keys: [][]byte{{'d'}, {'e'}, {'f'}}, - vals: [][]byte{{'4'}, {'5'}, {'6'}}, - }, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - resL, resR := c.ln.Split() - - left := resL.(*leafNode) - right := resR.(*leafNode) - - if len(left.keys) != len(c.left.keys) { - t.Errorf("Expected %v keys, but got %v", len(c.left.keys), len(left.keys)) - - for i, exp := range c.left.keys { - if !bytes.Equal(left.keys[i], exp) { - t.Errorf("Expected key %v at index %v, but got %v", exp, i, left.keys[i]) - } - } - } - - if len(left.vals) != len(c.left.vals) { - t.Errorf("left %v vals, but got %v", len(c.left.vals), len(left.vals)) - - for i, exp := range c.left.vals { - if !bytes.Equal(left.vals[i], exp) { - t.Errorf("Expected val %v at index %v, but got %v", exp, i, left.vals[i]) - } - } - } - - if len(right.keys) != len(c.right.keys) { - t.Errorf("Expected %v keys, but got %v", len(c.right.keys), len(right.keys)) - - for i, exp := range c.right.keys { - if !bytes.Equal(right.keys[i], exp) { - t.Errorf("Expected key %v at index %v, but got %v", exp, i, right.keys[i]) - } - } - } - - if len(right.vals) != len(c.right.vals) { - t.Errorf("right %v vals, but got %v", len(c.right.vals), len(right.vals)) - - for i, exp := range c.right.vals { - if !bytes.Equal(right.vals[i], exp) { - t.Errorf("Expected val %v at index %v, but got %v", exp, i, right.vals[i]) - } - } - } - }) - } -} diff --git a/internal/btree/node.go b/internal/btree/node.go deleted file mode 100644 index effbe9c..0000000 --- a/internal/btree/node.go +++ /dev/null @@ -1,40 +0,0 @@ -package btree - -import ( - "fmt" -) - -type nodeType uint16 - -const ( - btreePointer nodeType = iota - btreeLeaf -) - -type node interface { - Type() nodeType - Total() int - Size() int - Key(int) ([]byte, error) - Search([]byte) (int, bool) - Encode() []byte -} - -func decodeNode(d []byte) (node, error) { - var n node - var err error - errMsg := "node: cannot decode node: %v" - - switch nodeType(d[0]) { - case btreePointer: - n, err = DecodePointer(d) - break - case btreeLeaf: - n, err = DecodeLeaf(d) - break - default: - return nil, fmt.Errorf(errMsg, "invalid node type") - } - - return n, err -} diff --git a/internal/btree/pointerNode.go b/internal/btree/pointerNode.go deleted file mode 100644 index fd0a888..0000000 --- a/internal/btree/pointerNode.go +++ /dev/null @@ -1,174 +0,0 @@ -package btree - -import ( - "bytes" - "encoding/binary" - "fmt" - "slices" -) - -type pointerNode struct { - keys [][]byte - ptrs []uint64 -} - -func DecodePointer(d []byte) (pointerNode, error) { - if nodeType(binary.BigEndian.Uint16(d[0:2])) != btreePointer { - return pointerNode{}, fmt.Errorf("ptrNode: cannot decode to pointer, wrong type identifier") - } - - pn := pointerNode{} - - nKeys := binary.BigEndian.Uint16(d[2:4]) - off := uint16(4) - for i := uint16(0); i < nKeys; i++ { - kSize := binary.BigEndian.Uint16(d[off : off+2]) - pn.keys = append(pn.keys, d[off+2:off+2+kSize]) - - ptr := binary.BigEndian.Uint64(d[off+2+kSize : off+2+kSize+8]) - pn.ptrs = append(pn.ptrs, ptr) - - off += 2 + kSize + 8 - } - - return pn, nil -} - -func (pn pointerNode) Type() nodeType { - return btreePointer -} - -func (pn pointerNode) Total() int { - return len(pn.keys) -} - -func (pn pointerNode) Size() int { - size := 4 - for _, k := range pn.keys { - size += 2 + len(k) + 8 - } - return size -} - -func (pn pointerNode) Key(i int) ([]byte, error) { - if i < 0 || i >= len(pn.keys) { - return nil, fmt.Errorf("ptrNode: key at index '%d' does not exist", i) - } - return pn.keys[i], nil -} - -func (pn *pointerNode) Ptr(i int) (uint64, error) { - if i < 0 || i >= len(pn.ptrs) { - return 0, fmt.Errorf("prtNode: ptr at index '%d' does not exist", i) - } - return pn.ptrs[i], nil -} - -func (pn pointerNode) Insert(i int, k []byte, ptr uint64) (newPn pointerNode, err error) { - if i < 0 || i > len(pn.keys) || i > len(pn.ptrs) { - return pointerNode{}, fmt.Errorf("ptrNode: cannot insert at non existing index '%d'", i) - } - - newPn.keys = slices.Insert(pn.keys, i, k) - newPn.ptrs = slices.Insert(pn.ptrs, i, ptr) - - return newPn, nil -} - -func (pn pointerNode) Update(i int, k []byte, ptr uint64) (newPn pointerNode, err error) { - if i < 0 || i > len(pn.keys) || i > len(pn.ptrs) { - return pointerNode{}, fmt.Errorf("ptrNode: cannot update at non existing index '%d'", i) - } - - newPn.keys = append(newPn.keys, pn.keys...) - newPn.ptrs = append(newPn.ptrs, pn.ptrs...) - newPn.keys[i] = k - newPn.ptrs[i] = ptr - - return pn, nil -} - -func (pn pointerNode) Delete(i int) (newPn pointerNode, err error) { - if i < 0 || i > len(pn.keys) || i > len(pn.ptrs) { - return pointerNode{}, fmt.Errorf("ptrNode: cannot delete at non existing index '%d'", i) - } - - newPn.keys = slices.Delete(pn.keys, i, i) - newPn.ptrs = slices.Delete(pn.ptrs, i, i) - - return newPn, nil -} - -func (pn pointerNode) Search(k []byte) (int, bool) { - return slices.BinarySearchFunc(pn.keys, k, bytes.Compare) -} - -func (pn pointerNode) Merge(right pointerNode) (newPn pointerNode, err error) { - if bytes.Compare(pn.keys[len(pn.keys)-1], right.keys[0]) >= 0 { - return pointerNode{}, fmt.Errorf("ptrNode: cannot merge, last key of left is GE first key of right node") - } - - newPn.keys = append(newPn.keys, pn.keys...) - newPn.ptrs = append(newPn.ptrs, pn.ptrs...) - newPn.keys = append(newPn.keys, right.keys...) - newPn.ptrs = append(newPn.ptrs, right.ptrs...) - - return newPn, nil -} - -func (pn pointerNode) Split() (l pointerNode, r pointerNode) { - var half int - var size int = 0 - var pnSize = pn.Size() - - for i, k := range pn.keys { - size += 2 + len(k) + 8 - if size > pnSize/2 { - half = i - size -= 2 - len(k) - 8 - break - } - } - - l = pointerNode{ - keys: pn.keys[:half], - ptrs: pn.ptrs[:half], - } - - r = pointerNode{ - keys: pn.keys[half:], - ptrs: pn.ptrs[half:], - } - - return l, r -} - -func (pn pointerNode) Encode() []byte { - var b []byte - - b = binary.BigEndian.AppendUint16(b, uint16(btreePointer)) - b = binary.BigEndian.AppendUint16(b, uint16(len(pn.keys))) - for i, k := range pn.keys { - b = binary.BigEndian.AppendUint16(b, uint16(len(k))) - b = append(b, k...) - b = binary.BigEndian.AppendUint64(b, pn.ptrs[i]) - } - - return b -} - -func (pn pointerNode) MergePtrs(from, to int, k []byte, ptr uint64) (newPn pointerNode, err error) { - newPn, err = pn.Update(from, k, ptr) - if err != nil { - return pointerNode{}, err - } - - for i := from + 1; i < to; i++ { - newPn, err = newPn.Delete(i) - if err != nil { - return pointerNode{}, err - } - } - - return newPn, nil -} diff --git a/internal/btree/pointerNode_test.go b/internal/btree/pointerNode_test.go deleted file mode 100644 index 3c38a25..0000000 --- a/internal/btree/pointerNode_test.go +++ /dev/null @@ -1 +0,0 @@ -package btree diff --git a/internal/cli/cli.go b/internal/cli/cli.go deleted file mode 100644 index 6a959e8..0000000 --- a/internal/cli/cli.go +++ /dev/null @@ -1,51 +0,0 @@ -package cli - -import ( - "bufio" - "fmt" - "net" - "os" -) - -type Client struct { - Addr string - Port uint16 - conn net.Conn -} - -func NewClient() Client { - return Client{} -} - -func (cli *Client) Run() error { - if err := cli.connect(); err != nil { - return err - } - - reader := bufio.NewScanner(os.Stdin) - for reader.Scan() { - text := reader.Text() - cli.conn.Write([]byte(text)) - } - - if err := cli.disconnect(); err != nil { - return err - } - - return nil -} - -func (cli *Client) connect() error { - var err error - - cli.conn, err = net.Dial("tcp", fmt.Sprintf("%s:%d", cli.Addr, cli.Port)) - if err != nil { - return err - } - - return nil -} - -func (cli *Client) disconnect() error { - return cli.conn.Close() -} diff --git a/internal/db/cell.go b/internal/db/cell.go deleted file mode 100644 index 55b94ad..0000000 --- a/internal/db/cell.go +++ /dev/null @@ -1,142 +0,0 @@ -package db - -import ( - "encoding/binary" - "fmt" -) - -const errCellMsg = "cell: cannot interpret '%x' as type '%s'" - -type Cell struct { - typ dbType - b []byte -} - -func (c *Cell) Type() dbType { - return c.typ -} - -func (c *Cell) Raw() []byte { - return c.b -} - -func (c *Cell) Bytes() ([]byte, error) { - if c.typ != dbBytes || int(c.b[0]) != len(c.b[2:]) { - return nil, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - return c.b[2:], nil -} - -func (c *Cell) Bool() (bool, error) { - if c.typ != dbBool || len(c.b) != 1 { - return false, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := uint8(c.b[0]) - return n == 1, nil -} - -func (c *Cell) String() (string, error) { - if c.typ != dbString || int(c.b[0]) != len(c.b[2:]) { - return "", fmt.Errorf(errCellMsg, c.b, c.typ) - } - - return string(c.b[2 : 2+int(c.b[0])]), nil -} - -func (c *Cell) RawString() (string, error) { - if c.typ != dbRawString || int(c.b[0]) != len(c.b[2:]) { - return "", fmt.Errorf(errCellMsg, c.b, c.typ) - } - - return string(c.b[2 : 2+int(c.b[0])]), nil -} - -func (c *Cell) Int8() (int8, error) { - if c.typ != dbInt8 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - return int8(c.b[0]), nil -} - -func (c *Cell) Uint8() (uint8, error) { - if c.typ != dbUint8 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - return c.b[0], nil -} - -func (c *Cell) Int16() (int16, error) { - if c.typ != dbInt16 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint16(c.b) - return int16(n), nil -} - -func (c *Cell) Uint16() (uint16, error) { - if c.typ != dbUint16 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint16(c.b) - return n, nil -} - -func (c *Cell) Int32() (int32, error) { - if c.typ != dbInt32 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint32(c.b) - return int32(n), nil -} - -func (c *Cell) Uint32() (uint32, error) { - if c.typ != dbUint32 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint32(c.b) - return n, nil -} - -func (c *Cell) Int64() (int64, error) { - if c.typ != dbInt64 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint64(c.b) - return int64(n), nil -} - -func (c *Cell) Uint64() (uint64, error) { - if c.typ != dbUint64 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint64(c.b) - return n, nil -} - -func (c *Cell) Float32() (float32, error) { - if c.typ != dbFloat32 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint32(c.b) - return float32(n), nil -} - -func (c *Cell) Float64() (float64, error) { - if c.typ != dbFloat64 { - return 0, fmt.Errorf(errCellMsg, c.b, c.typ) - } - - n := binary.BigEndian.Uint64(c.b) - return float64(n), nil -} diff --git a/internal/db/condition.go b/internal/db/condition.go deleted file mode 100644 index 904e836..0000000 --- a/internal/db/condition.go +++ /dev/null @@ -1,23 +0,0 @@ -package db - -type cmp uint8 - -const ( - cmpEQ cmp = iota - cmpNEQ - cmpLT - cmpLTE - cmpTween - cmpLike - cmpIn -) - -type Condition struct { - col string - val []byte - cmp cmp -} - -func (cnd *Condition) Check(c Cell) bool { - return false -} diff --git a/internal/db/database.go b/internal/db/database.go deleted file mode 100644 index 9327583..0000000 --- a/internal/db/database.go +++ /dev/null @@ -1,145 +0,0 @@ -package db - -import ( - "encoding/binary" - "encoding/json" - "fmt" - - "github.com/gKits/PavoSQL/internal/kvstore" -) - -type DB struct { - Name string - kv kvstore.KVStore -} - -func (db *DB) Query(tbName string, pk []byte) { - tb, err := db.GetTable(tbName) - if err != nil { - - } - - r, err := db.kv.Read() - if err != nil { - } - - val, err := db.kv.Get(pk) - if err != nil { - - } -} - -func (db *DB) Get(tbName string, r *Row) error { - tb, err := db.GetTable(tbName) - if err != nil { - return fmt.Errorf("table '%s' not found: %v", tbName, err) - } - - tb.checkRow(r) - - return nil -} - -func (db *DB) Insert(tbName string, vals []Row) error { - w, err := db.kv.Write() - if err != nil { - w.Abort() - return err - } - - for _, v := range vals { - - } - - if err := w.Commit(); err != nil { - w.Abort() - return err - } - - return nil -} - -func (db *DB) Update() {} - -func (db *DB) Delete(tbName string) {} - -func (db *DB) GetTable(tbName string) (table, error) { - k := defaultTBDefTable.encodeKey([]byte(tbName)) - - v, err := db.kv.Get(k) - if err != nil { - return table{}, err - } - - tb := table{} - if err := json.Unmarshal(v, &tb); err != nil { - return table{}, err - } - - return tb, nil -} - -func (db *DB) CreateTable(tb table) error { - v, err := json.Marshal(tb) - if err != nil { - return err - } - - pref, err := db.nextPrefix() - if err != nil { - return err - } - - tb.Prefix = pref - - k := defaultTBDefTable.encodeKey([]byte(tb.Name)) - - if err := db.kv.Set(k, v); err != nil { - return err - } - - return nil -} - -func (db *DB) DeleteTable(tbName string) (bool, error) { - k := defaultTBDefTable.encodeKey([]byte(tbName)) - - del, err := db.kv.Del(k) - if err != nil { - return false, err - } - - return del, nil -} - -func (db *DB) prefix() (uint32, error) { - k := defaultMetaTable.encodeKey([]byte("prefix")) - - _, err := db.kv.Get(k) - if err != nil { - return 0, err - } - - var pref uint32 - - return pref, nil -} - -func (db *DB) nextPrefix() (uint32, error) { - k := defaultMetaTable.encodeKey([]byte("prefix")) - - v, err := db.kv.Get(k) - if err != nil { - return 0, err - } - - var pref uint32 - - v = binary.BigEndian.AppendUint32(v, pref+1) - - if err := db.kv.Set(k, v); err != nil { - return 0, err - } - - return pref + 1, nil -} diff --git a/internal/db/defaults.go b/internal/db/defaults.go deleted file mode 100644 index c448465..0000000 --- a/internal/db/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -package db - -var defaultMetaTable = table{ - Name: "@meta", - Prefix: 0, - Cols: []string{"name", "val"}, - Types: []dbType{dbString, dbBytes}, - PKeys: 1, -} - -var defaultTBDefTable = table{ - Name: "@tbdef", - Prefix: 1, - Cols: []string{"name", "def"}, - Types: []dbType{dbString, dbBytes}, - PKeys: 1, -} - -var defaultUsersTable = table{ - Name: "@users", - Prefix: 2, - Cols: []string{"name", "pass"}, - Types: []dbType{dbString, dbBytes}, - PKeys: 1, -} diff --git a/internal/db/row.go b/internal/db/row.go deleted file mode 100644 index e81ffbe..0000000 --- a/internal/db/row.go +++ /dev/null @@ -1,6 +0,0 @@ -package db - -type Row struct { - Cols []string - Vals []Cell -} diff --git a/internal/db/table.go b/internal/db/table.go deleted file mode 100644 index 5e7e22c..0000000 --- a/internal/db/table.go +++ /dev/null @@ -1,35 +0,0 @@ -package db - -import ( - "encoding/binary" - "fmt" -) - -type table struct { - Name string `json:"name"` - Cols []string `json:"columns"` - Types []dbType `json:"types"` - Null []bool `json:"nullable"` - Pref uint32 `json:"prefix"` -} - -func (tb *table) encodeKey(k []byte) []byte { - var buf [4]byte - binary.BigEndian.PutUint32(buf[:], tb.Pref) - return append(k, buf[:]...) -} - -func (tb *table) CheckRow(row *Row) error { - set := map[string]struct{}{} - for _, col := range tb.Cols { - set[col] = struct{}{} - } - - for _, col := range row.Cols { - if _, ok := set[col]; !ok { - return fmt.Errorf("unknow column '%s' in table '%s'", col, tb.Name) - } - } - - return nil -} diff --git a/internal/db/types.go b/internal/db/types.go deleted file mode 100644 index ea74f3c..0000000 --- a/internal/db/types.go +++ /dev/null @@ -1,57 +0,0 @@ -package db - -import "fmt" - -type dbType uint8 - -const ( - dbBytes dbType = iota - dbBool - dbString - dbRawString - dbInt8 - dbUint8 - dbInt16 - dbUint16 - dbInt32 - dbUint32 - dbInt64 - dbUint64 - dbFloat32 - dbFloat64 -) - -func (dT dbType) String() string { - switch dT { - case dbBytes: - return "Bytes" - case dbBool: - return "Bool" - case dbString: - return "String" - case dbRawString: - return "RawString" - case dbInt8: - return "Int8" - case dbUint8: - return "Uint8" - case dbInt16: - return "Int16" - case dbUint16: - return "Uint16" - case dbInt32: - return "Int32" - case dbUint32: - return "Uint32" - case dbInt64: - return "Int64" - case dbUint64: - return "Uint64" - case dbFloat32: - return "Float32" - case dbFloat64: - return "Float64" - default: - return fmt.Sprintf("%d", dT) - } -} diff --git a/internal/dbms/dbms.go b/internal/dbms/dbms.go deleted file mode 100644 index 4d76fbd..0000000 --- a/internal/dbms/dbms.go +++ /dev/null @@ -1,13 +0,0 @@ -package dbms - -type DBMS struct { - Dir string -} - -func (dbms *DBMS) CreateDatabase() error { - return nil -} - -func (dbms *DBMS) DeleteDatabase() error { - return nil -} diff --git a/internal/freelist/freelist.go b/internal/freelist/freelist.go deleted file mode 100644 index fbcdce2..0000000 --- a/internal/freelist/freelist.go +++ /dev/null @@ -1,109 +0,0 @@ -package freelist - -import () - -type getFunc func(uint64) ([]byte, error) -type pullFunc func(uint64) ([]byte, error) -type allocFunc func([]byte) (uint64, error) -type freeFunc func(uint64) error - -type FreelistData struct { - head uint64 - ptrs []uint64 - nHead int - nDiscard int -} - -type Freelist struct { - FreelistData - version uint64 - minRead uint64 - pgSize int - get func(uint64) (freelistNode, error) - pull func(uint64) (freelistNode, error) - alloc func(freelistNode) (uint64, error) - free func(uint64) error -} - -func New( - head, version uint64, pgSize int, - get getFunc, pull pullFunc, alloc allocFunc, free freeFunc, -) Freelist { - fl := Freelist{ - version: version, - pgSize: pgSize, - get: func(ptr uint64) (freelistNode, error) { - d, err := get(ptr) - if err != nil { - return freelistNode{}, err - } - return decodeFreelistNode(d), nil - }, - pull: func(ptr uint64) (freelistNode, error) { - d, err := pull(ptr) - if err != nil { - return freelistNode{}, err - } - return decodeFreelistNode(d), nil - }, - alloc: func(fn freelistNode) (uint64, error) { - return alloc(fn.Encode()) - }, - free: free, - } - - fl.head = head - - return fl -} - -func (fl *Freelist) Nq(ptr uint64) error { - var head freelistNode - var err error - - if fl.head == 0 { - head = freelistNode{next: fl.head} - head = head.Nq(ptr) - goto alloc - } - - head, err = fl.get(fl.head) - if err != nil { - return err - } - - if head.Size()+8 <= fl.pgSize { - head = head.Nq(ptr) - if err := fl.free(fl.head); err != nil { - return err - } - goto alloc - } - head = freelistNode{next: fl.head} - head = head.Nq(ptr) - -alloc: - fl.head, err = fl.alloc(head) - return err -} - -func (fl *Freelist) Dq() (uint64, error) { - if fl.head == 0 { - return 0, nil - } - - var node freelistNode - var err error - - for next := fl.head; next != 0; next = node.next { - node, err = fl.get(next) - if err != nil { - return 0, err - } - } - - var ptr uint64 - ptr, node = node.Dq() - - return ptr, nil -} diff --git a/internal/freelist/freelistNode.go b/internal/freelist/freelistNode.go deleted file mode 100644 index da11569..0000000 --- a/internal/freelist/freelistNode.go +++ /dev/null @@ -1,65 +0,0 @@ -package freelist - -import ( - "encoding/binary" - "slices" -) - -type freelistNode struct { - next uint64 - ptrs []uint64 -} - -func decodeFreelistNode(d []byte) freelistNode { - fn := freelistNode{} - - nPtrs := binary.BigEndian.Uint16(d[0:2]) - fn.next = binary.BigEndian.Uint64(d[2:10]) - for i := uint16(0); i < nPtrs; i++ { - fn.ptrs = append(fn.ptrs, binary.BigEndian.Uint64(d[10+i*8:])) - } - - return fn -} - -func (fn freelistNode) Encode() []byte { - var b []byte - - binary.BigEndian.AppendUint16(b, uint16(len(fn.ptrs))) - binary.BigEndian.AppendUint64(b, fn.next) - for _, ptr := range fn.ptrs { - binary.BigEndian.AppendUint64(b, ptr) - } - - return b -} - -func (fn freelistNode) Total() int { - return len(fn.ptrs) -} - -func (fn freelistNode) Size() int { - return 12 + len(fn.ptrs)*8 -} - -func (fn freelistNode) Pop() (uint64, freelistNode) { - last := fn.ptrs[fn.Total()-1] - fn.ptrs = fn.ptrs[:fn.Total()-1] - return last, fn -} - -func (fn freelistNode) Push(ptr uint64) freelistNode { - fn.ptrs = append(fn.ptrs, ptr) - return fn -} - -func (fn freelistNode) Nq(ptr uint64) freelistNode { - fn.ptrs = slices.Insert(fn.ptrs, 0, ptr) - return fn -} - -func (fn freelistNode) Dq() (uint64, freelistNode) { - last := fn.ptrs[fn.Total()-1] - fn.ptrs = fn.ptrs[:fn.Total()-1] - return last, fn -} diff --git a/internal/freelist/freelistNode_test.go b/internal/freelist/freelistNode_test.go deleted file mode 100644 index cf8aa0a..0000000 --- a/internal/freelist/freelistNode_test.go +++ /dev/null @@ -1 +0,0 @@ -package freelist diff --git a/internal/freelist/freelist_test.go b/internal/freelist/freelist_test.go deleted file mode 100644 index cf8aa0a..0000000 --- a/internal/freelist/freelist_test.go +++ /dev/null @@ -1 +0,0 @@ -package freelist diff --git a/internal/kvstore/kvreader.go b/internal/kvstore/kvreader.go deleted file mode 100644 index e6f31fc..0000000 --- a/internal/kvstore/kvreader.go +++ /dev/null @@ -1,45 +0,0 @@ -package kvstore - -import ( - "github.com/gKits/PavoSQL/internal/btree" -) - -type KVReader struct { - version uint64 - tree btree.BTree - chunks [][]byte - get func(uint64) ([]byte, error) - close func(*KVReader) - idx int -} - -func newReader(v, root uint64, chunks [][]byte, close func(*KVReader)) *KVReader { - r := &KVReader{version: v, chunks: chunks, close: close} - r.tree = btree.NewReadOnly(root, PAGE_SIZE, r.getPage) - return r -} - -func (r *KVReader) Get(k []byte) ([]byte, error) { - return r.tree.Get(k) -} - -func (r *KVReader) Seek(k []byte) (*btree.Iterator, error) { - return nil, nil -} - -func (r *KVReader) Close() { - r.close(r) -} - -func (r *KVReader) getPage(ptr uint64) ([]byte, error) { - start := uint64(0) - for _, chunk := range r.chunks { - end := start + uint64(len(chunk)/PAGE_SIZE) - if ptr < end { - offset := PAGE_SIZE * (ptr - start) - return chunk[offset : offset+PAGE_SIZE], nil - } - start = end - } - return r.get(ptr) -} diff --git a/internal/kvstore/kvstore.go b/internal/kvstore/kvstore.go deleted file mode 100644 index 93987aa..0000000 --- a/internal/kvstore/kvstore.go +++ /dev/null @@ -1,242 +0,0 @@ -package kvstore - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "path/filepath" - "sync" - - "github.com/gKits/PavoSQL/internal/freelist" - "github.com/gKits/PavoSQL/pkg/vcache" -) - -const PAGE_SIZE = 4096 - -type KVStore struct { - kvOpts // options type embeded - f *os.File // the database file - root uint64 // pointer to the root of the btree - free freelist.Freelist // freelist managing free pages - cache vcache.VCache[uint64, []byte] // cache of freed pages still to be read - version uint64 // latest version of the kv store - wLock sync.Mutex // write lock allowing only a single concurrent writer a time - fLock sync.RWMutex // file lock making sure file is not read and written at the same time -} - -func New(opts ...kvOptFunc) *KVStore { - opt := defaultOpts() - for _, fn := range opts { - fn(&opt) - } - - return &KVStore{ - kvOpts: opt, - cache: vcache.New[uint64, []byte](0), - } -} - -func (kv *KVStore) Open() error { - f, err := os.OpenFile(kv.path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - kv.Close() - return err - } - kv.f = f - - if err := kv.loadMaster(); err != nil { - kv.Close() - return err - } - - return nil -} - -func (kv *KVStore) Close() { - kv.f.Close() -} - -func (kv *KVStore) Read() (*KVReader, error) { - r := newReader(kv.version, kv.root, kv.mmap.chunks, kv.endRead) - - return r, nil -} - -func (kv *KVStore) Write() (*KVWriter, error) { - kv.wLock.Lock() - - w := newWriter(kv.root, kv.commitWrite, kv.abortWrite) - - w.version = kv.version - - return w, nil -} - -func (kv *KVStore) endRead(r *KVReader) { -} - -func (kv *KVStore) commitWrite(w *KVWriter) error { - kv.root = w.tree.Root - return nil -} - -func (kv *KVStore) abortWrite(w *KVWriter) { - kv.wLock.Unlock() -} - -func (kv *KVStore) loadMaster() error { - data := kv.mmap.chunks[0] - root := binary.BigEndian.Uint64(data[16:24]) - free := binary.BigEndian.Uint64(data[24:32]) - npages := binary.BigEndian.Uint64(data[32:40]) - version := binary.BigEndian.Uint64(data[40:48]) - - if !bytes.Equal([]byte(kv.sig), data[:16]) { - - } - - if npages < 1 || npages > uint64(kv.mmap.fileSize/PAGE_SIZE) { - } - - kv.root = root - kv.version = version - - return nil -} - -func (kv *KVStore) flushMaster() error { - var data [40]byte - - copy(data[:16], []byte(kv.sig)) - binary.BigEndian.PutUint64(data[16:], kv.root) - binary.BigEndian.PutUint64(data[24:], kv.root) - binary.BigEndian.PutUint64(data[32:], kv.page.flushed) - - _, err := kv.f.WriteAt(data[:], 0) - return err -} - -func (kv *KVStore) writePages(changes map[uint64][]byte) (err error) { - path, name := filepath.Split(kv.path) - - tmp, err := os.CreateTemp(path, name) - if err != nil { - return err - } - defer func() { - tmp.Close() - if err != nil { - os.Remove(tmp.Name()) - } - }() - - tmpName := tmp.Name() - - if _, err := io.Copy(tmp, kv.f); err != nil { - return err - } - - for ptr, page := range changes { - if _, err := tmp.WriteAt(page, int64(ptr)); err != nil { - return err - } - } - - if err := tmp.Sync(); err != nil { - return err - } - - destInfo, err := kv.f.Stat() - if err != nil { - return err - } - - if err := tmp.Chmod(destInfo.Mode()); err != nil { - return err - } - - if err := tmp.Close(); err != nil { - return err - } - - kv.fLock.Lock() - defer kv.fLock.Unlock() - - if err := kv.f.Close(); err != nil { - return err - } - - if err := os.Rename(tmpName, kv.path); err != nil { - return err - } - - kv.f, err = os.Open(kv.path) - if err != nil { - return err - } - - return nil -} - -func (kv *KVStore) getFilePage(ptr uint64) ([]byte, error) { - kv.fLock.RLock() - defer kv.fLock.RUnlock() - - page := make([]byte, PAGE_SIZE) - if _, err := kv.f.ReadAt(page, int64(ptr)); err != nil { - return nil, err - } - return page, nil -} - -func (kv *KVStore) pullPage(ptr uint64) ([]byte, error) { - return nil, nil -} - -func (kv *KVStore) allocPage(page []byte) (uint64, error) { - return 0, nil -} - -func (kv *KVStore) freeFilePage(ptr uint64) error { - page, err := kv.getFilePage(ptr) - if err != nil { - return err - } - - kv.cache.Cache(ptr, page) // store freed page in versioned cache - return nil -} - -/* -kvOpts struct is embeded into KVStore to implement the functional options -pattern for KVStore -*/ -type kvOptFunc func(*kvOpts) - -type kvOpts struct { - path string // path to the database file - sig string // the file signature -} - -func defaultOpts() kvOpts { - return kvOpts{ - path: "/var/lib/pavosql", - sig: "PavoSQL_DB_File:", - } -} - -func WithPath(path string) kvOptFunc { - return func(opts *kvOpts) { - opts.path = path - } -} - -func WithSignature(sig string) kvOptFunc { - s := make([]byte, 16) - copy(s, sig) - - return func(opts *kvOpts) { - opts.sig = string(s) - } -} diff --git a/internal/kvstore/kvwriter.go b/internal/kvstore/kvwriter.go deleted file mode 100644 index c026caa..0000000 --- a/internal/kvstore/kvwriter.go +++ /dev/null @@ -1,68 +0,0 @@ -package kvstore - -import ( - "github.com/gKits/PavoSQL/internal/btree" - "github.com/gKits/PavoSQL/internal/freelist" -) - -type KVWriter struct { - KVReader - kv *KVStore - free freelist.Freelist - nappend int - changes map[uint64][]byte - commit func(*KVWriter) error - abort func(*KVWriter) -} - -func newWriter(root uint64, commit func(*KVWriter) error, abort func(*KVWriter)) *KVWriter { - w := &KVWriter{} - w.tree = btree.New(root, PAGE_SIZE, w.getWriterPage, w.pullPage, w.allocPage, w.freePage) - return w -} - -func (w *KVWriter) Set(k, v []byte) error { - return w.tree.Set(k, v) -} - -func (w *KVWriter) Del(k []byte) (bool, error) { - return w.tree.Delete(k) -} - -func (w *KVWriter) Abort() { - w.abort(w) -} - -func (w *KVWriter) Commit() error { - return w.commit(w) -} - -func (w *KVWriter) getWriterPage(ptr uint64) ([]byte, error) { - page, ok := w.changes[ptr] - if !ok { - return w.getPage(ptr) - } - return page, nil -} - -func (w *KVWriter) pullPage(ptr uint64) ([]byte, error) { - page, err := w.getWriterPage(ptr) - if err != nil { - return nil, err - } - - if err := w.freePage(ptr); err != nil { - return nil, err - } - - return page, nil -} - -func (w *KVWriter) allocPage(d []byte) (uint64, error) { - return 0, nil -} - -func (w *KVWriter) freePage(ptr uint64) error { - w.changes[ptr] = nil - return nil -} diff --git a/internal/kvstore/mmap.go b/internal/kvstore/mmap.go deleted file mode 100644 index 750afe1..0000000 --- a/internal/kvstore/mmap.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !windows - -package kvstore - -import ( - "errors" - "os" - "syscall" -) - -var ( - errMmapFileSize = errors.New("mmap: cannot init mmap, file size needs to be multiple of page size") -) - -type mmap struct { - fileSize int - mmapSize int - chunks [][]byte -} - -func (mm *mmap) Init(f *os.File) error { - fStats, err := f.Stat() - if err != nil { - return err - } - - if fStats.Size()%PAGE_SIZE != 0 { - return errMmapFileSize - } - - mmapSize := 64 << 20 - for mmapSize < int(fStats.Size()) { - mmapSize *= 2 - } - - chunk, err := syscall.Mmap( - int(f.Fd()), 0, mmapSize, - syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED, - ) - if err != nil { - return err - } - - mm.mmapSize = mmapSize - mm.chunks = [][]byte{chunk} - mm.fileSize = int(fStats.Size()) - - return nil -} - -func (mm *mmap) Extend(f *os.File, n int) error { - if mm.mmapSize >= n*PAGE_SIZE { - return nil - } - - chunk, err := syscall.Mmap( - int(f.Fd()), int64(mm.mmapSize), mm.mmapSize, - syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED, - ) - if err != nil { - return err - } - - mm.mmapSize *= 2 - mm.chunks = append(mm.chunks, chunk) - - return nil -} - -func (mm *mmap) Close() error { - for _, chunk := range mm.chunks { - if err := syscall.Munmap(chunk); err != nil { - return err - } - } - return nil -} - -func (mm *mmap) ExtendFile(f *os.File, n int) error { - filePages := mm.fileSize / PAGE_SIZE - if filePages >= n { - return nil - } - - for filePages < n { - inc := filePages / 8 - if inc < 1 { - inc = 1 - } - filePages += inc - } - - fileSize := filePages * PAGE_SIZE - if err := syscall.Fallocate(int(f.Fd()), 0, 0, int64(fileSize)); err != nil { - return err - } - - mm.fileSize = fileSize - return nil -} diff --git a/internal/kvstore/mmap_windows.go b/internal/kvstore/mmap_windows.go deleted file mode 100644 index 109fbf3..0000000 --- a/internal/kvstore/mmap_windows.go +++ /dev/null @@ -1,107 +0,0 @@ -//go:build windows - -package kvstore - -import ( - "errors" - "os" - "syscall" -) - -var ( - errMmapFileSize = errors.New("mmap: cannot init mmap, file size needs to be multiple of page size") -) - -type mmap struct { - fileSize int - mmapSize int - chunks [][]byte -} - -func (mm *mmap) Init(f *os.File) error { - fStats, err := f.Stat() - if err != nil { - return err - } - - if fStats.Size()%PageSize != 0 { - return errMmapFileSize - } - - mmapSize := 64 << 20 - for mmapSize < int(fStats.Size()) { - mmapSize *= 2 - } - - fileMap, err := syscall.CreateFileMapping( - syscall.Handle(f.Fd()), - nil, - syscall.PAGE_READWRITE, - 0, uint32(mmapSize), - nil, - ) - defer syscall.CloseHandle(fileMap) - - addr, err := syscall.MapViewOfFile(fileMap, syscall.FILE_MAP_WRITE, 0, 0, uintptr(mmapSize)) - if err != nil { - return err - } - defer syscall.UnmapViewOfFile(addr) - - // data := (([]*byte)(unsafe.Pointer(addr))) - mm.mmapSize = mmapSize - mm.chunks = [][]byte{} - mm.fileSize = int(fStats.Size()) - - return nil -} - -func (mm *mmap) Extend(f *os.File, n int) error { - if mm.mmapSize >= n*PageSize { - return nil - } - - // chunk, err := syscall.Mmap( - // int(f.Fd()), int64(mm.mmapSize), mm.mmapSize, - // syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED, - // ) - // if err != nil { - // return err - // } - - // mm.mmapSize *= 2 - // mm.chunks = append(mm.chunks, chunk) - - return nil -} - -func (mm *mmap) Close() error { - return nil -} - -func (mm *mmap) ExtendFile(f *os.File, n int) error { - filePages := mm.fileSize / PageSize - if filePages >= n { - return nil - } - - for filePages < n { - inc := filePages / 8 - if inc < 1 { - inc = 1 - } - filePages += inc - } - - // if err := syscall.LockFile(); err != nil { - - // } - - // fileSize := filePages * pageSize - // if err := syscall.Fallocate(int(f.Fd()), 0, 0, int64(fileSize)); err != nil { - // return err - // } - - // mm.fileSize = fileSize - return nil -} diff --git a/internal/parser/lexer.go b/internal/parser/lexer.go deleted file mode 100644 index de69c8a..0000000 --- a/internal/parser/lexer.go +++ /dev/null @@ -1,238 +0,0 @@ -package parser - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -type stateFn func(*lexer) stateFn - -type lexer struct { - name string // name of lexer for debugging - input string // input string beeing tokenized - pos int // current position in input string - start int // starting position of current token beeing scanned - width int // width of last rune read - tok token // last emitted token - line int // current line in multt-line statement - startLn int // starting line of current token - opt lexOptions -} - -type lexOptions struct { - emitComments bool - allowComments bool - leftComment string - rightComment string - singleComment string -} - -var defaultLexOptions lexOptions = lexOptions{ - emitComments: false, - allowComments: true, - leftComment: "/*", - rightComment: "*/", - singleComment: "//", -} - -const eof = -1 - -func (l *lexer) next() (r rune) { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - - r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) - l.pos += l.width - if r == '\n' { - l.line++ - } - - return r -} - -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -func (l *lexer) backup() { - if l.pos > 0 { - r, w := utf8.DecodeLastRuneInString(l.input[:l.pos]) - l.pos -= w - if r == '\n' { - l.line-- - } - } -} - -func (l *lexer) ignore() { - l.line += strings.Count(l.input[l.start:l.pos], "\n") - l.start = l.pos - l.startLn = l.line -} - -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -func (l *lexer) curToken(typ tokenType) token { - tok := token{typ, l.input[l.start:l.pos], l.startLn} - l.start = l.pos - l.startLn = l.line - return tok -} - -func (l *lexer) emit(typ tokenType) stateFn { - return l.emitToken(l.curToken(typ)) - -} - -func (l *lexer) emitToken(t token) stateFn { - l.tok = t - return nil -} - -func (l *lexer) run() { - for state := lexDefault; state != nil; { - state = nil - } -} - -func lexDefault(l *lexer) stateFn { - switch l.next() { - case eof: - case '"': - return lexString - case '`': - return lexRawString - case '+', '-': - return lexNumber - default: - return lexIdent - } - return nil -} - -func lexIdent(l *lexer) stateFn { - for { - r := l.next() - - if unicode.IsSpace(r) { - l.backup() - word := l.input[l.start:l.pos] - - typ, ok := key[word] - if !ok { - typ = tokIdent - } - - return l.emit(typ) - } - } -} - -func lexString(l *lexer) stateFn { -loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - - case eof, '\n': - return l.errorf("unterminated string") - - case '"': - break loop - } - } - - return l.emit(tokString) -} - -func lexRawString(l *lexer) stateFn { -loop: - for { - switch l.next() { - case '`': - break loop - } - } - - return l.emit(tokString) -} - -func lexNumber(l *lexer) stateFn { - l.accept("+-") - digits := "0123456789" - - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - - if l.accept("eE") { - l.accept("+-") - l.acceptRun(digits) - } - - if unicode.IsLetter(l.peek()) { - l.next() - return l.errorf("invalid number syntax: %q", l.input[l.start:l.pos]) - } - return l.emit(tokNumber) -} - -func lexComment(l *lexer) stateFn { - l.pos += len(l.opt.leftComment) - closeIdx := strings.Index(l.input[l.pos:], l.opt.rightComment) - if closeIdx < 0 { - return l.errorf("unterminated comment") - } - l.pos += closeIdx + len(l.opt.rightComment) - - l.curToken(tokComment) - - return nil -} - -func lexSingleLineComment(l *lexer) stateFn { - l.pos += len(l.opt.singleComment) - newlineIdx := strings.Index(l.input[l.pos:], "\n") - if newlineIdx < 0 { - } - l.pos += newlineIdx - return nil -} - -func lexSpace(l *lexer) stateFn { - for r := l.peek(); unicode.IsSpace(r); r = l.peek() { - l.next() - } - return nil -} - -func (l *lexer) errorf(format string, args ...any) stateFn { - l.tok = token{tokErr, fmt.Sprintf(format, args...), l.line} - l.start = 0 - l.pos = 0 - l.input = l.input[:0] - return nil -} diff --git a/internal/parser/parse.go b/internal/parser/parse.go deleted file mode 100644 index 725d28c..0000000 --- a/internal/parser/parse.go +++ /dev/null @@ -1,8 +0,0 @@ -package parser - -type Parser struct { - name string - lex *lexer -} - -func (p *Parser) Parse(input string) {} diff --git a/internal/parser/token.go b/internal/parser/token.go deleted file mode 100644 index 57b1c80..0000000 --- a/internal/parser/token.go +++ /dev/null @@ -1,99 +0,0 @@ -package parser - -import "fmt" - -type tokenType uint - -const ( - tokErr tokenType = iota - tokEOF - tokComment - tokBool - tokNumber - tokString - tokIdent - - tokKey // Keyword delim - tokAlter - tokCreate - tokDelete - tokDrop - tokInsert - tokSelect - tokTruncate - tokUpdate - tokColumn - tokDatabase - tokIndex - tokTable - tokDistinct - tokTop - tokAs - tokFrom - tokSet - tokLeft - tokRight - tokFull - tokJoin - tokWhere - tokHaving - tokInto - tokValues - tokAnd - tokIs - tokNot - tokNull - tokOr - tokLike - tokIn - tokBetween - tokCount - tokSum - tokAvg - tokMin - tokMax - tokGroup - tokOrder - tokBy - tokDesc -) - -var key = map[string]tokenType{ - "alter": tokAlter, - "create": tokCreate, - "delete": tokDelete, - "drop": tokDrop, - "insert": tokInsert, - "select": tokSelect, - "truncate": tokTruncate, - "update": tokUpdate, - "column": tokColumn, - "database": tokDatabase, - "index": tokIndex, - "table": tokTable, - "distinct": tokDistinct, - "top": tokTop, - "as": tokAs, - "from": tokFrom, - "set": tokSet, -} - -type token struct { - typ tokenType - val string - line int -} - -func (t token) String() string { - switch { - case t.typ == tokEOF: - return "EOF" - case t.typ == tokErr: - return t.val - case t.typ > tokKey: - return fmt.Sprintf("", t.val) - case len(t.val) > 13: - return fmt.Sprintf("%.10q...", t.val) - } - return t.val -} diff --git a/internal/server/server.go b/internal/server/server.go deleted file mode 100644 index 2e682f1..0000000 --- a/internal/server/server.go +++ /dev/null @@ -1,71 +0,0 @@ -package server - -import ( - "bufio" - "fmt" - "net" - _ "net/http" - - "github.com/gKits/PavoSQL/dbms" - "github.com/gKits/PavoSQL/parser" -) - -type Server struct { - Addr string - Port uint16 - listener net.Listener - dbms dbms.DBMS - parser parser.Parser - count int -} - -func NewServer(addr string, port uint16) (Server, error) { - listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr, port)) - if err != nil { - return Server{}, err - } - - // TODO: implement sane and correct defaults - return Server{ - Addr: addr, - Port: port, - listener: listener, - dbms: dbms.DBMS{}, - parser: parser.Parser{}, - count: 0, - }, nil -} - -func (s *Server) Run() { - for { - conn, err := s.listener.Accept() - if err != nil { - continue - } - go s.handleConnection(conn) - s.count++ - } -} - -func (s *Server) Stop() { - s.listener.Close() - // TODO: Implement graceful shutdown -} - -func (s *Server) handleConnection(conn net.Conn) { - defer conn.Close() - - scanner := bufio.NewScanner(conn) - for scanner.Scan() { - msg := scanner.Text() - - switch msg { - case "exit": - - case "h", "help": - default: - // TODO: Implement parser, operate, respond loop - } - } - s.count-- -} diff --git a/internal/tree/common.go b/internal/tree/common.go new file mode 100755 index 0000000..c49aba6 --- /dev/null +++ b/internal/tree/common.go @@ -0,0 +1,10 @@ +package tree + +const PageSize = 8192 + +type PageType uint8 + +const ( + PointerPage PageType = iota + 1 + LeafPage +) diff --git a/internal/tree/node.go b/internal/tree/node.go new file mode 100755 index 0000000..52b83f4 --- /dev/null +++ b/internal/tree/node.go @@ -0,0 +1,311 @@ +package tree + +import ( + "bytes" + "encoding/binary" + "errors" + "iter" +) + +const ( + nOff = 1 + wCurOff = nOff + 2 + dataOff = wCurOff + 2 +) + +var ( + ErrIndexOutOfBounds = errors.New("index is out of bounds") +) + +/* +A node is an array of bytes representing the data stored in a single node of a B+Tree. +Nodes are stored in a custom byte format that is structured as follows: + + Description | Header | Data area + ------------+--------+---------- + Size in B | 5 | ? + +The Header contains specific meta data about the current state of the node and is structured as +follows: + + Description | Type | N | W-Cursor + ------------+------+---+------------- + Size in B | 1 | 2 | 2 + +N is the number of cells currently stored in the node and W-Cursor the write cursor represented +by an uint16 offset to the next position data can be written to. + +The Data area is dynamically sized an contains the actual data in form of cells as well as the +offset references to those cells. Those two parts are separated by an empty space called the +void. The Data area is structured as follows: + + Description | Offsets | Void | Cells + ------------+---------+---------------------------+------ + Size in B | N * 2 | W-Cursor - (N*2 + Header) | ? + +The offsets are just an ordered list of uint16 pointing to the position of cell they are +referencing inside this node. The void is the empty space between the end of the offset list and +the W-Cursor which always points to the beginning of the cells. + +The data stored in the cells is formatted as follows: + + Description | KeyLen | ValLen | Key | Val + ------------+--------+--------+--------+------- + Size in B | 2 | 2 | KeyLen | ValLen +*/ +type node [PageSize]byte + +func newNode(typ PageType) node { + var n node + n[0] = byte(typ) + n.setWCursor(PageSize) + return n +} + +// Returns the type of n. +func (n *node) Type() PageType { + return PageType(n[0]) +} + +// Returns the number of cells currently stored on n. +func (n *node) N() uint16 { + return binary.LittleEndian.Uint16(n[nOff:]) +} + +// Returns the key of the i'th cell stored in n. +// +// Panics if i is greater or equal than the length of n. +func (n *node) Key(i uint16) []byte { + if !n.indexInBounds(i) { + panic(ErrIndexOutOfBounds) + } + off := n.offset(i) + kLen := binary.LittleEndian.Uint16(n[off:]) + return n[off+4 : off+4+kLen] +} + +// Returns the value of the i'th cell stored in n. +// +// Panics if i is greater or equal than the length of n. +func (n *node) Val(i uint16) []byte { + if !n.indexInBounds(i) { + panic(ErrIndexOutOfBounds) + } + off := n.offset(i) + kLen := binary.LittleEndian.Uint16(n[off:]) + vLen := binary.LittleEndian.Uint16(n[off+2:]) + return n[off+4+kLen : off+4+kLen+vLen] +} + +func (n *node) Pointer(i uint16) uint64 { + if !n.indexInBounds(i) { + panic(ErrIndexOutOfBounds) + } + off := n.offset(i) + kLen := binary.LittleEndian.Uint16(n[off:]) + return binary.LittleEndian.Uint64(n[off+2+kLen : off+2+kLen+8]) +} + +// Binary searches the target key inside n and returns its position and weither it exists. +func (n *node) Search(target []byte) (uint16, bool) { + l := n.N() + left, right := uint16(0), l + + for left < right { + cur := uint16(uint(left+right) >> 1) // #nosec G115 // right shift stops overflow + if cmp := bytes.Compare(n.Key(cur), target); cmp == -1 { + left = cur + 1 + } else if cmp == 1 { + right = cur + } else { + return cur, true + } + } + return left, left < l && bytes.Equal(n.Key(left), target) +} + +// Returns a copy of n with k-v set at position i. If the key at i is equal to k it will be +// overwritten an n.N will stay the same otherwise k-v will be inserted as a new cell and n.N will +// be increased by 1. +// +// Overwriting a k-v pair does not overwrite the data stored in the original cell, it mereley +// overwrites the reference to it. To free up the space taken up by unreferenced cells use Vacuum. +// +// WARNING: No additional check is performed weither i is the correct position for k. Meaning it is +// the callers responsibility to ensure that k belongs at position i to ensure the order of the keys +// will not break. Always use Search and CanSet before using Set to ensure that n has enough space +// for the k-v pair and that the value of i is correct. +func (n *node) Set(i uint16, k, v []byte) node { + l := n.N() + cell := makeCell(k, v) + + wCur := n.wCursor() + off := wCur - uint16(len(cell)) + + var res node + copy(res[:], n[:]) + copy(res[off:wCur], cell) + + res.setWCursor(off) + + if bytes.Equal(k, n.Key(i)) { + res.setOffset(i, off) + return res + } + + trailingOffs := n[offPos(i) : offPos(l)+2] + copy(res[offPos(i+1):], trailingOffs) + res.setOffset(i, off) + res.setN(l + 1) + + return res +} + +// Returns true if n has enough space left in its void to add the given k-v pair. CanSet always +// assumes that k does not exist. +func (n *node) CanSet(k, v []byte) bool { + return n.voidSize() >= 6+len(k)+len(v) +} + +// Returns a copy of n with the k-v pair at index i deleted. +// +// Delete mereley deletes the reference to the cell and does not free up the cells space. To free up +// the space taken up by unreferenced cells use Vacuum. +func (n *node) Delete(i uint16) node { + if !n.indexInBounds(i) { + panic(ErrIndexOutOfBounds) + } + + l := n.N() + + var res node + copy(res[:], n[:]) + + trailingOffs := n[offPos(i) : offPos(l)+2] + copy(res[offPos(i):], trailingOffs[2:]) + res.setOffset(l, 0) + res.setN(l - 1) + + return res +} + +// Splits n into two separate nodes. +func (n *node) Split() (left node, right node) { + var addToRight bool + var i uint16 + var wc uint16 = PageSize + + left, right = newNode(n.Type()), newNode(n.Type()) + + thresh := (PageSize - wc) / 2 + + addToNode := func(addTo *node, i uint16, cell []byte, wCursor *uint16) { + *wCursor -= uint16(len(cell)) + addTo.setOffset(i, *wCursor) + copy(addTo[*wCursor:], cell) + addTo.setWCursor(*wCursor) + } + + for k, v := range n.All() { + cell := makeCell(k, v) + + if addToRight { + addToNode(&right, i, cell, &wc) + i++ + continue + } + + addToNode(&left, i, cell, &wc) + i++ + + if wc < PageSize-thresh { + addToRight = true + } + } + return left, right +} + +// Returns a resorted and reduced copy of n by freeing up space used by unreferenced cells. +func (n *node) Vacuum() node { + var vacuumed node + vacuumed[0] = byte(n.Type()) + vacuumed.setN(n.N()) + + var wc uint16 = PageSize + var i uint16 + for k, v := range n.All() { + cell := makeCell(k, v) + wc -= uint16(len(cell)) + vacuumed.setOffset(i, wc) + copy(vacuumed[wc:], cell) + + i++ + } + vacuumed.setWCursor(wc) + + return vacuumed +} + +// An iterator over all key-value pairs of n. +func (n *node) All() iter.Seq2[[]byte, []byte] { + return n.AllFrom(0) +} + +// An iterator over all key-value pairs of n starting from position i. +func (n *node) AllFrom(i uint16) iter.Seq2[[]byte, []byte] { + return func(yield func([]byte, []byte) bool) { + for ; i < n.N(); i++ { + k, v := n.Key(i), n.Val(i) + if !yield(k, v) { + return + } + } + } +} + +func (n *node) setN(nc uint16) { + binary.LittleEndian.PutUint16(n[nOff:], nc) +} + +func (n *node) offset(i uint16) uint16 { + if n.indexInBounds(i) { + panic(ErrIndexOutOfBounds) + } + return binary.LittleEndian.Uint16(n[offPos(i):]) +} + +func (n *node) setOffset(i, off uint16) { + if n.indexInBounds(i) { + panic(ErrIndexOutOfBounds) + } + binary.LittleEndian.PutUint16(n[offPos(i):], off) +} + +func (n *node) indexInBounds(i uint16) bool { + return i >= n.N() +} + +func (n *node) wCursor() uint16 { + return binary.LittleEndian.Uint16(n[wCurOff:]) +} + +func (n *node) setWCursor(wc uint16) { + binary.LittleEndian.PutUint16(n[wCurOff:], wc) +} + +func (n *node) voidSize() int { + return int(n.wCursor()) - int(offPos(n.N())+2) +} + +// Returns the calculated position to the offset inside the offset list. This does NOT return the +// offset itself only the reference to the offset. +func offPos(i uint16) uint16 { return dataOff + 2*i } + +func makeCell(k, v []byte) []byte { + cell := make([]byte, 4+len(k)+len(v)) + binary.LittleEndian.PutUint16(cell[0:], uint16(len(k))) + binary.LittleEndian.PutUint16(cell[2:], uint16(len(v))) + copy(cell[4:], k) + copy(cell[4+len(k):], v) + return cell +} diff --git a/internal/tree/node_test.go b/internal/tree/node_test.go new file mode 100644 index 0000000..a3c64f3 --- /dev/null +++ b/internal/tree/node_test.go @@ -0,0 +1,37 @@ +package tree + +import ( + "testing" +) + +func TestKey(t *testing.T) { + t.Error("not tested") +} + +func TestVal(t *testing.T) { + t.Error("not tested") +} + +func TestSearch(t *testing.T) { + t.Error("not tested") +} + +func TestSet(t *testing.T) { + t.Error("not tested") +} + +func TestCanSet(t *testing.T) { + t.Error("not tested") +} + +func TestDelete(t *testing.T) { + t.Error("not tested") +} + +func TestSplit(t *testing.T) { + t.Error("not tested") +} + +func TestVacuum(t *testing.T) { + t.Error("not tested") +} diff --git a/internal/tree/tree.go b/internal/tree/tree.go new file mode 100755 index 0000000..6adc9fd --- /dev/null +++ b/internal/tree/tree.go @@ -0,0 +1,58 @@ +package tree + +import ( + "errors" + "fmt" +) + +type pager interface { + ReadPage(off uint64) ([PageSize]byte, error) + Commit() error + Rollback() error +} + +type Tree struct { + root uint64 + pager pager +} + +func New() *Tree { + return &Tree{} +} + +func (t *Tree) Get(k []byte) ([]byte, error) { + page, err := t.pager.ReadPage(t.root) + if err != nil { + return nil, err + } + cur := node(page) + + for { + i, exists := cur.Search(k) + + switch cur.Type() { + case PointerPage: + ptr := cur.Pointer(i) + page, err = t.pager.ReadPage(ptr) + if err != nil { + return nil, fmt.Errorf("tree: failed to read page: %w", err) + } + cur = node(page) + case LeafPage: + if !exists { + return nil, errors.New("key does not exists on leaf node") + } + return cur.Val(i), nil + default: + return nil, errors.New("invalid page type") + } + } +} + +func (t *Tree) Set(k []byte, v []byte) error { + return nil +} + +func (t *Tree) Delete(k []byte) error { + return nil +} diff --git a/pkg/ast/ast.go b/pkg/ast/ast.go new file mode 100644 index 0000000..4ef2180 --- /dev/null +++ b/pkg/ast/ast.go @@ -0,0 +1,15 @@ +package ast + +type Stmnt interface{} + +// TODO: Define AST types. + +type SelectStmt struct{} + +type DeleteStmt struct{} + +type CreateStmt struct{} + +type UpdateStmt struct{} + +type InsertStmt struct{} diff --git a/pkg/fmt/fmt.go b/pkg/fmt/fmt.go new file mode 100644 index 0000000..a95fed3 --- /dev/null +++ b/pkg/fmt/fmt.go @@ -0,0 +1,7 @@ +package fmt + +import "io" + +func Format(r io.Reader) ([]byte, error) { + return nil, nil +} diff --git a/pkg/fmt/fmt_test.go b/pkg/fmt/fmt_test.go new file mode 100644 index 0000000..832e9ce --- /dev/null +++ b/pkg/fmt/fmt_test.go @@ -0,0 +1,38 @@ +package fmt_test + +import ( + "io" + "testing" + + "github.com/gkits/pavosql/pkg/fmt" +) + +func TestFormat(t *testing.T) { + tests := []struct { + name string // description of this test case + // Named input parameters for target function. + r io.Reader + want []byte + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, gotErr := fmt.Format(tt.r) + if gotErr != nil { + if !tt.wantErr { + t.Errorf("Format() failed: %v", gotErr) + } + return + } + if tt.wantErr { + t.Fatal("Format() succeeded unexpectedly") + } + // TODO: update the condition below to compare got with tt.want. + if true { + t.Errorf("Format() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/parse/parse.go b/pkg/parse/parse.go new file mode 100644 index 0000000..fcbe0af --- /dev/null +++ b/pkg/parse/parse.go @@ -0,0 +1,68 @@ +package parse + +import ( + "io" + + "github.com/gkits/pavosql/pkg/ast" +) + +func Parse(r io.Reader) ([]ast.Stmnt, error) { + var ( + stmt ast.Stmnt + err error + ) + + stmts := []ast.Stmnt{} + toks := readTokens(r) + for tok := range toks { + switch tok.Type { + case Select: + stmt, err = parseSelectStmt(toks) + if err != nil { + return nil, err + } + case Delete: + case Create: + case Update: + case Insert: + default: + } + stmts = append(stmts, stmt) + } + + return stmts, nil +} + +func parseSelectStmt(toks <-chan Token) (ast.SelectStmt, error) { + tok := <-toks + + switch tok.Type { + case Asterisk: + case Ident: + } + return ast.SelectStmt{}, nil +} + +func parseDeleteStmt() {} + +func parseCreateStmt() {} + +func parseUpdateStmt() {} + +func parseInsertStmt() {} + +func parseFieldSelectList(toks <-chan Token) { + for tok := range toks { + _ = tok + } +} + +func readTokens(r io.Reader) <-chan Token { + toks := make(chan Token) + go func() { + for _, tok := range tokenize(r) { + toks <- tok + } + }() + return toks +} diff --git a/pkg/parse/parse_test.go b/pkg/parse/parse_test.go new file mode 100644 index 0000000..7c6b574 --- /dev/null +++ b/pkg/parse/parse_test.go @@ -0,0 +1,39 @@ +package parse_test + +import ( + "io" + "testing" + + "github.com/gkits/pavosql/pkg/ast" + "github.com/gkits/pavosql/pkg/parse" +) + +func TestParse(t *testing.T) { + tests := []struct { + name string // description of this test case + // Named input parameters for target function. + r io.Reader + want []ast.Stmnt + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, gotErr := parse.Parse(tt.r) + if gotErr != nil { + if !tt.wantErr { + t.Errorf("Parse() failed: %v", gotErr) + } + return + } + if tt.wantErr { + t.Fatal("Parse() succeeded unexpectedly") + } + // TODO: update the condition below to compare got with tt.want. + if true { + t.Errorf("Parse() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/parse/tokenize.go b/pkg/parse/tokenize.go new file mode 100644 index 0000000..3d136df --- /dev/null +++ b/pkg/parse/tokenize.go @@ -0,0 +1,134 @@ +package parse + +import ( + "io" + "iter" + "strings" + "text/scanner" +) + +type TokenType int + +const ( + LexError TokenType = iota - 1 + String + RawString + Int + Float + Ident + + SpecialChar // This is a separator => if Token.Type > SpecialChar && Token.Type < Keyword {...} + Semicolon + Period + Equal + LParen + RParen + LBracket + RBracket + LBrace + RBrace + Plus + Hyphen + Asterisk + Greater + Less + + Keyword // This is a separator => if Token.Type > Keyword {...} + Select + Delete + Create + Update + Insert + From + Into + Table + Set + Values + Where + If + Exists + Not + And + Or +) + +var keywords map[string]TokenType = map[string]TokenType{ + "select": Select, + "delete": Delete, + "create": Create, + "update": Update, + "insert": Insert, + "from": From, + "into": Into, + "table": Table, + "set": Set, + "values": Values, + "where": Where, + "if": If, + "exists": Exists, + "not": Not, + "and": And, + "or": Or, +} + +var specialChars map[string]TokenType = map[string]TokenType{ + ";": Semicolon, + ".": Period, + "=": Equal, + "(": LParen, + ")": RParen, + "[": LBracket, + "]": RBracket, + "{": LBrace, + "}": RBrace, +} + +type Token struct { + Val string + Type TokenType + Line, Column int +} + +func tokenize(r io.Reader) iter.Seq2[int, Token] { + scan := new(scanner.Scanner) + scan.Init(r) + + return func(yield func(int, Token) bool) { + var i int + for r := scan.Scan(); r != scanner.EOF; r = scan.Scan() { + tok := Token{ + Val: scan.TokenText(), + Line: scan.Pos().Line, + Column: scan.Pos().Column - len(scan.TokenText()), + } + + switch r { + case scanner.Int: + tok.Type = Int + case scanner.Float: + tok.Type = Float + case scanner.String, scanner.Char: + tok.Type = String + case scanner.RawString: + tok.Type = RawString + case scanner.Ident: + if keyword, ok := keywords[strings.ToLower(tok.Val)]; ok { + tok.Type = keyword + break + } + tok.Type = Ident + default: + if specCh, ok := specialChars[tok.Val]; ok { + tok.Type = specCh + break + } + tok.Type = LexError + } + + if !yield(i, tok) { + return + } + i++ + } + } +} diff --git a/pkg/parse/tokenize_test.go b/pkg/parse/tokenize_test.go new file mode 100644 index 0000000..803f901 --- /dev/null +++ b/pkg/parse/tokenize_test.go @@ -0,0 +1,76 @@ +package parse + +import ( + "strings" + "testing" +) + +func Test_tokenize(t *testing.T) { + cases := []struct { + name string + in string + want []Token + }{ + { + name: "tokenize with keywords and special chars #1", + in: `SeLECt name frOM users wHERE name == "john doe"`, + want: []Token{ + {"SeLECt", Select, 1, 1}, + {"name", Ident, 1, 8}, + {"frOM", From, 1, 13}, + {"users", Ident, 1, 18}, + {"wHERE", Where, 1, 24}, + {"name", Ident, 1, 30}, + {"=", Equal, 1, 35}, + {"=", Equal, 1, 36}, + {"\"john doe\"", String, 1, 38}, + }, + }, + { + name: "tokenize with keywords and special chars #2", + in: ` creaTe TABlE iF exists users ( )`, + want: []Token{ + {"creaTe", Create, 1, 3}, + {"TABlE", Table, 1, 10}, + {"iF", If, 1, 16}, + {"exists", Exists, 1, 19}, + {"users", Ident, 1, 26}, + {"(", LParen, 1, 32}, + {")", RParen, 1, 34}, + }, + }, + { + name: "tokenize with comments #1", + in: `"hello" // this is an inline comment + /* This + is a multiline comment + */ + . = [] {} 'xxxx' + `, + want: []Token{ + {"\"hello\"", String, 1, 1}, + {".", Period, 5, 17}, + {"=", Equal, 5, 19}, + {"[", LBracket, 5, 21}, + {"]", RBracket, 5, 22}, + {"{", LBrace, 5, 24}, + {"}", RBrace, 5, 25}, + {"'xxxx'", String, 5, 27}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + for i, got := range tokenize(strings.NewReader(c.in)) { + if i >= len(c.want) { + t.Fatalf("want %d tokens, got at least %d", len(c.want), i+1) + } + if got != c.want[i] { + t.Fatalf("want token %v, got %v", c.want[i], got) + } + i++ + } + }) + } +} diff --git a/pkg/vcache/vcache.go b/pkg/vcache/vcache.go deleted file mode 100644 index f86ea5a..0000000 --- a/pkg/vcache/vcache.go +++ /dev/null @@ -1,36 +0,0 @@ -package vcache - -type VCache[I comparable, T any] struct { - cache map[uint64]map[I]T - ver uint64 -} - -func New[I comparable, T any](ver uint64) VCache[I, T] { - return VCache[I, T]{ - cache: map[uint64]map[I]T{}, - ver: ver, - } -} - -func (vc *VCache[I, T]) Get(id I, ver uint64) (T, bool) { - for ; ver <= vc.ver; ver++ { - d, ok := vc.cache[ver][id] - if ok { - return d, true - } - } - return *new(T), false -} - -func (vc *VCache[I, T]) Cache(i I, d T) { - vc.cache[vc.ver][i] = d -} - -func (vc *VCache[I, T]) NewVersion() uint64 { - vc.ver++ - return vc.ver -} - -func (vc *VCache[I, T]) DeleteVersion(ver uint64) { - delete(vc.cache, ver) -} diff --git a/scripts/pavod.service b/scripts/pavod.service deleted file mode 100644 index 9a455fe..0000000 --- a/scripts/pavod.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=PavoSQL daemon -After=network-online.target - -[Service] -Type=simple -Restart=always -RestartSec=5 -Environment=PAVO_PORT=1758 -Environment=PAVO_DIR= -ExecStart=pavosql server run - -[Install] -WantedBy=multi-user.target