diff options
137 files changed, 11758 insertions, 698 deletions
diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..5e44971 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: AGPL-3.0-only +# SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +root = true + +[*] +end_of_line = lf +insert_final_newline = true +indent_style = tab +indent_size = 8 +tab_size = 8 + +[*.py] +indent_style = space +indent_size = 4 + +[*.yaml] +indent_style = space +indent_size = 2 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..5418c8b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,6 @@ +*.tmpl linguist-language=HTML +* linguist-detectable +go.mod -linguist-detectable +go.sum -linguist-detectable +.golangci.yaml -linguist-detectable +.build.yml -linguist-detectable @@ -1,3 +1 @@ -/forge -/.templates.ha -/.version.ha +/dist @@ -1,10 +1,31 @@ -forge: .version.ha .templates.ha *.ha - hare build $(HAREFLAGS) -o $@ . +# SPDX-License-Identifier: AGPL-3.0-only +# SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +# +# TODO: This Makefile utilizes a lot of GNU extensions. Some of them are +# unfortunately difficult to avoid as POSIX Make's pattern rules are not +# sufficiently expressive. This needs to be fixed sometime (or we might move to +# some other build system). +# -.templates.ha: templates/*.htmpl - htmplgen -o $@ $^ +.PHONY: clean all -.version.ha: - printf 'def VERSION="%s";\n' $(shell git describe --tags --always --dirty) > $@ +CFLAGS = -Wall -Wextra -pedantic -std=c99 -D_GNU_SOURCE + +all: dist/forged dist/git2d dist/hookc + +dist/forged: $(shell git ls-files forged) + mkdir -p dist + sqlc -f forged/sqlc.yaml generate + CGO_ENABLED=0 go build -o dist/forged -ldflags '-extldflags "-f no-PIC -static"' -tags 'osusergo netgo static_build' ./forged + +dist/git2d: $(wildcard git2d/*.c) + mkdir -p dist + $(CC) $(CFLAGS) -o dist/git2d $^ $(shell pkg-config --cflags --libs libgit2) -lpthread + +dist/hookc: $(wildcard hookc/*.c) + mkdir -p dist + $(CC) $(CFLAGS) -o dist/hookc $^ + +clean: + rm -rf dist -.PHONY: version.ha diff --git a/NOTES.md b/NOTES.md deleted file mode 100644 index 98536f6..0000000 --- a/NOTES.md +++ /dev/null @@ -1,315 +0,0 @@ -# Lindenii Forge Development Notes - -You will need the following dependencies: - -- [hare](https://git.sr.ht/~sircmpwn/hare) -- [hare-http](https://git.sr.ht/~sircmpwn/hare-http) with - [various patches](https://lists.sr.ht/~sircmpwn/hare-dev/patches?search=from%3Arunxiyu+prefix%3Ahare-http) -- [hare-htmpl](https://forge.runxiyu.org/hare/:/repos/hare-htmpl/) - ([backup](https://git.sr.ht/~runxiyu/hare-htmpl)) - - -Also, you'll need various horrible patches for `net::uri` before that gets fixed: - -``` -diff --git a/net/uri/+test.ha b/net/uri/+test.ha -index 345f41ee..63272d52 100644 ---- a/net/uri/+test.ha -+++ b/net/uri/+test.ha -@@ -10,7 +10,7 @@ use net::ip; - uri { - scheme = "file", - host = "", -- path = "/my/path/to/file", -+ raw_path = "/my/path/to/file", - ... - }, - )!; -@@ -19,7 +19,7 @@ use net::ip; - uri { - scheme = "http", - host = "harelang.org", -- path = "/", -+ raw_path = "/", - ... - }, - )!; -@@ -38,7 +38,7 @@ use net::ip; - scheme = "ldap", - host = [13, 37, 73, 31]: ip::addr4, - port = 1234, -- path = "/", -+ raw_path = "/", - ... - }, - )!; -@@ -47,7 +47,7 @@ use net::ip; - uri { - scheme = "http", - host = ip::parse("::1")!, -- path = "/test", -+ raw_path = "/test", - ... - }, - )!; -@@ -58,7 +58,7 @@ use net::ip; - uri { - scheme = "urn", - host = "", -- path = "example:animal:ferret:nose", -+ raw_path = "example:animal:ferret:nose", - ... - }, - )!; -@@ -67,7 +67,7 @@ use net::ip; - uri { - scheme = "mailto", - host = "", -- path = "~sircmpwn/hare-dev@lists.sr.ht", -+ raw_path = "~sircmpwn/hare-dev@lists.sr.ht", - ... - }, - )!; -@@ -76,7 +76,7 @@ use net::ip; - uri { - scheme = "http", - host = "", -- path = "/foo/bar", -+ raw_path = "/foo/bar", - ... - }, - )!; -@@ -85,7 +85,7 @@ use net::ip; - uri { - scheme = "http", - host = "", -- path = "/", -+ raw_path = "/", - ... - }, - )!; -@@ -94,7 +94,7 @@ use net::ip; - uri { - scheme = "https", - host = "sr.ht", -- path = "/projects", -+ raw_path = "/projects", - query = "search=%23risc-v&sort=longest-active", - fragment = "foo", - ... -@@ -105,7 +105,7 @@ use net::ip; - uri { - scheme = "https", - host = "en.wiktionary.org", -- path = "/wiki/おはよう", -+ raw_path = "/wiki/%E3%81%8A%E3%81%AF%E3%82%88%E3%81%86", - fragment = "Japanese", - ... - } -@@ -135,11 +135,11 @@ use net::ip; - - @test fn percent_encoding() void = { - test_uri( -- "https://git%2esr.ht/~sircmpw%6e/hare#Build%20status", -+ "https://git.sr.ht/~sircmpwn/hare#Build%20status", - uri { - scheme = "https", - host = "git.sr.ht", -- path = "/~sircmpwn/hare", -+ raw_path = "/~sircmpwn/hare", - fragment = "Build status", - ... - }, -@@ -152,7 +152,7 @@ use net::ip; - uri { - scheme = "ldap", - host = ip::parse("2001:db8::7")!, -- path = "/c=GB", -+ raw_path = "/c=GB", - query = "objectClass?one", - ... - }, -@@ -161,11 +161,11 @@ use net::ip; - - // https://bugs.chromium.org/p/chromium/issues/detail?id=841105 - test_uri( -- "https://web-safety.net/..;@www.google.com:%3443", -+ "https://web-safety.net/..;@www.google.com:443", - uri { - scheme = "https", - host = "web-safety.net", -- path = "/..;@www.google.com:443", -+ raw_path = "/..;@www.google.com:443", - ... - }, - "https://web-safety.net/..;@www.google.com:443", -@@ -180,6 +180,7 @@ fn test_uri(in: str, expected_uri: uri, expected_str: str) (void | invalid) = { - const u = parse(in)?; - defer finish(&u); - -+ - assert_str(u.scheme, expected_uri.scheme); - match (u.host) { - case let s: str => -@@ -189,7 +190,7 @@ fn test_uri(in: str, expected_uri: uri, expected_str: str) (void | invalid) = { - }; - assert(u.port == expected_uri.port); - assert_str(u.userinfo, expected_uri.userinfo); -- assert_str(u.path, expected_uri.path); -+ assert_str(u.raw_path, expected_uri.raw_path); - assert_str(u.query, expected_uri.query); - assert_str(u.fragment, expected_uri.fragment); - -diff --git a/net/uri/fmt.ha b/net/uri/fmt.ha -index 48a43f24..07cb3f7b 100644 ---- a/net/uri/fmt.ha -+++ b/net/uri/fmt.ha -@@ -20,9 +20,9 @@ use strings; - // query = *( pchar / "/" / "?" ) - // fragment = *( pchar / "/" / "?" ) - --def unres_host: str = "-._~!$&'()*+,;="; --def unres_query_frag: str = "-._~!$&'()*+,;=:@/?"; --def unres_path: str = "-._~!$&'()*+,;=:@/"; -+export def unres_host: str = "-._~!$&'()*+,;="; -+export def unres_query_frag: str = "-._~!$&'()*+,;=:@/?"; -+export def unres_path: str = "-._~!$&'()*+,;=:@/"; - - // Writes a formatted [[uri]] to an [[io::handle]]. Returns the number of bytes - // written. -@@ -63,10 +63,10 @@ export fn fmt(out: io::handle, u: *const uri) (size | io::error) = { - if (u.port != 0) { - n += fmt::fprintf(out, ":{}", u.port)?; - }; -- if (has_host && len(u.path) > 0 && !strings::hasprefix(u.path, '/')) { -+ if (has_host && len(u.raw_path) > 0 && !strings::hasprefix(u.raw_path, '/')) { - n += fmt::fprint(out, "/")?; - }; -- n += percent_encode(out, u.path, unres_path)?; -+ n += memio::concat(out, u.raw_path)?; - if (len(u.query) > 0) { - // Always percent-encoded, see parse and encodequery/decodequery - n += fmt::fprintf(out, "?{}", u.query)?; -@@ -92,7 +92,7 @@ fn fmtaddr(out: io::handle, addr: ip::addr) (size | io::error) = { - return n; - }; - --fn percent_encode(out: io::handle, src: str, allowed: str) (size | io::error) = { -+export fn percent_encode(out: io::handle, src: str, allowed: str) (size | io::error) = { - let iter = strings::iter(src); - let n = 0z; - for (let r => strings::next(&iter)) { -diff --git a/net/uri/parse.ha b/net/uri/parse.ha -index f2522c01..e108bd75 100644 ---- a/net/uri/parse.ha -+++ b/net/uri/parse.ha -@@ -22,10 +22,10 @@ export fn parse(in: str) (uri | invalid) = { - defer if (!success) free(scheme); - - // Determine hier-part variant -- let path = ""; -+ let raw_path = ""; - let authority: ((str | ip::addr6), u16, str) = ("", 0u16, ""); - defer if (!success) { -- free(path); -+ free(raw_path); - free_host(authority.0); - free(authority.2); - }; -@@ -50,7 +50,7 @@ export fn parse(in: str) (uri | invalid) = { - case '/' => - // path-absolute - strings::prev(&in); -- path = parse_path(&in, -+ raw_path = parse_path(&in, - path_mode::ABSOLUTE)?; - case => - return invalid; -@@ -61,17 +61,17 @@ export fn parse(in: str) (uri | invalid) = { - // path-absolute - strings::prev(&in); // return current token - strings::prev(&in); // return leading slash -- path = parse_path(&in, path_mode::ABSOLUTE)?; -+ raw_path = parse_path(&in, path_mode::ABSOLUTE)?; - }; - case => - // path-absolute (just '/') - strings::prev(&in); // return leading slash -- path = parse_path(&in, path_mode::ABSOLUTE)?; -+ raw_path = parse_path(&in, path_mode::ABSOLUTE)?; - }; - case => - // path-rootless - strings::prev(&in); -- path = parse_path(&in, path_mode::ROOTLESS)?; -+ raw_path = parse_path(&in, path_mode::ROOTLESS)?; - }; - case => void; // path-empty - }; -@@ -118,7 +118,7 @@ export fn parse(in: str) (uri | invalid) = { - port = authority.1, - userinfo = authority.2, - -- path = path, -+ raw_path = raw_path, - query = query, - fragment = fragment, - }; -@@ -274,7 +274,7 @@ fn parse_path(in: *strings::iterator, mode: path_mode) (str | invalid) = { - }; - }; - -- return percent_decode(strings::slice(©, in)); -+ return strings::dup(strings::slice(©, in))!; - }; - - fn parse_query(in: *strings::iterator) (str | invalid) = { -@@ -323,13 +323,14 @@ fn parse_port(in: *strings::iterator) (u16 | invalid) = { - }; - }; - --fn percent_decode(s: str) (str | invalid) = { -+// must be freed by caller -+export fn percent_decode(s: str) (str | invalid) = { - let buf = memio::dynamic(); - percent_decode_static(&buf, s)?; - return memio::string(&buf)!; - }; - --fn percent_decode_static(out: io::handle, s: str) (void | invalid) = { -+export fn percent_decode_static(out: io::handle, s: str) (void | invalid) = { - let iter = strings::iter(s); - let tmp = memio::dynamic(); - defer io::close(&tmp)!; -diff --git a/net/uri/uri.ha b/net/uri/uri.ha -index 623ffafb..3b7b7c4c 100644 ---- a/net/uri/uri.ha -+++ b/net/uri/uri.ha -@@ -12,7 +12,7 @@ export type uri = struct { - port: u16, - userinfo: str, - -- path: str, -+ raw_path: str, - query: str, - fragment: str, - }; -@@ -31,7 +31,7 @@ export fn dup(u: *uri) uri = { - port = u.port, - userinfo = strings::dup(u.userinfo)!, - -- path = strings::dup(u.path)!, -+ raw_path = strings::dup(u.raw_path)!, - query = strings::dup(u.query)!, - fragment = strings::dup(u.fragment)!, - }; -@@ -46,7 +46,7 @@ export fn finish(u: *uri) void = { - case => void; - }; - free(u.userinfo); -- free(u.path); -+ free(u.raw_path); - free(u.query); - free(u.fragment); - }; -``` @@ -2,13 +2,98 @@ **Work in progress.** -This is the new implementation in the [Hare](https://harelang.org) programming -language. +Lindenii Forge aims to be an uncomplicated yet featured software forge, +primarily designed for self-hosting by small organizations and individuals. + +* [Upstream source repository](https://forge.lindenii.runxiyu.org/forge/-/repos/server/) + ([backup](https://git.lindenii.runxiyu.org/forge.git/)) +* [Website and documentation](https://lindenii.runxiyu.org/forge/) +* [Temporary issue tracker](https://todo.sr.ht/~runxiyu/forge) +* IRC [`#lindenii`](https://webirc.runxiyu.org/kiwiirc/#lindenii) + on [irc.runxiyu.org](https://irc.runxiyu.org)\ + and [`#lindenii`](https://web.libera.chat/#lindenii) + on [Libera.Chat](https://libera.chat) + + +## Implemented features + +* Umambiguously parsable URL +* Groups and subgroups +* Repo hosting +* Push to `contrib/` branches to automatically create merge requests +* Basic federated authentication +* Converting mailed patches to branches + +## Planned features + +* Further integration with mailing list workflows +* Further federated authentication +* Ticket trackers, discussions, RFCs + * Web interface + * Email integration with IMAP archives +* SSH API +* Email access +* CI system similar to builds.sr.ht + +## License + +We are currently using the +[GNU Affero General Public License version 3](https://www.gnu.org/licenses/agpl-3.0.html). + +The forge software serves its own source at `/-/source/`. + +## Contribute + +Please submit patches by pushing to `contrib/...` in the official repo. + +Alternatively, send email to +[`forge/-/repos/server@forge.lindenii.runxiyu.org`](mailto:forge%2F-%2Frepos%2Fserver@forge.lindenii.runxiyu.org). +Note that emailing patches is still experimental. + +## Mirrors + +We have several repo mirrors: + +* [Official repo on our own instance of Lindenii Forge](https://forge.lindenii.org/forge/-/repos/server/) +* [The Lindenii Project's backup cgit](https://git.lindenii.org/forge.git/) +* [SourceHut](https://git.sr.ht/~runxiyu/forge/) +* [GitHub](https://github.com/runxiyu/forge/) ## Architecture -* Most components are one single daemon written in Hare. -* Because libssh is difficult to use and there aren't many other SSH server - libraries for C or Hare, we will temporarily use - [the gliberlabs SSH library for Go](https://github.com/gliderlabs/ssh) - in a separate process, and communicate via UNIX domain sockets. +We have a mostly monolithic server `forged` written in Go. PostgreSQL is used +to store everything other than Git repositories. + +Git repositories currently must be accessible via the local filesystem from +the machine running `forged`, since `forged` currently uses `go-git`, `git2d` +via UNIX domain sockets, and `git-upload-pack`/`git-receive-pack` subprocesses. +In the future, `git2d` will be expanded to support all operations, removing +our dependence on `git-upload-pack`/`git-receive-pack` and `go-git`; `git2d` +will also be extended to support remote IPC via a custom RPC protocol, +likely based on SCTP (with TLS via RFC 3436). + +## `git2d` + +`git2d` is a Git server daemon written in C, which uses `libgit2` to handle Git +operations. + +```c +int cmd_index(git_repository * repo, struct bare_writer *writer); +int cmd_treeraw(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_resolve_ref(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_list_branches(git_repository * repo, struct bare_writer *writer); +int cmd_format_patch(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_merge_base(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_log(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_tree_list_by_oid(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_write_tree(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_blob_write(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_commit_tree_oid(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_commit_create(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_update_ref(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_commit_info(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_init_repo(const char *path, struct bare_reader *reader, struct bare_writer *writer); +``` + +We are planning to rewrite `git2d` in Hare, using +[`hare-git`](https://forge.lindenii.org/hare/-/repos/hare-git/) when it's ready. diff --git a/forge.example.scfg b/forge.example.scfg new file mode 100644 index 0000000..9ef39a6 --- /dev/null +++ b/forge.example.scfg @@ -0,0 +1,110 @@ +web { + # What network transport should we listen on? + # Examples: tcp tcp4 tcp6 unix + net tcp + + # What address to listen on? + # Examples for net tcp*: 127.0.0.1:8080 :80 + # Example for unix: /var/run/lindenii/forge/http.sock + addr :8080 + + # How many seconds should cookies be remembered before they are purged? + cookie_expiry 604800 + + # What is the canonical URL of the web root? + root https://forge.example.org + + # General HTTP server context timeout settings. It's recommended to + # set them slightly higher than usual as Git operations over large + # repos may take a long time. + read_timeout 120 + write_timeout 1800 + idle_timeout 120 + max_header_bytes 20000 + + # Are we running behind a reverse proxy? If so, we will trust + # X-Forwarded-For headers. + reverse_proxy true + + templates_path /usr/share/lindenii/forge/templates + static_path /usr/share/lindenii/forge/static + + shutdown_timeout 10 +} + +irc { + tls true + net tcp + addr irc.runxiyu.org:6697 + sendq 6000 + nick forge-test + user forge + gecos "Lindenii Forge Test" +} + +git { + # Where should newly-created Git repositories be stored? + repo_dir /var/lib/lindenii/forge/repos + + # Where is git2d listening on? + socket /var/run/lindenii/forge/git2d.sock +} + +ssh { + # What network transport should we listen on? + # This should be "tcp" in almost all cases. + net tcp + + # What address to listen on? + addr :22 + + # What is the path to the SSH host key? Generate it with ssh-keygen. + # The key must have an empty password. + key /etc/lindenii/ssh_host_ed25519_key + + # What is the canonical SSH URL? + root ssh://forge.example.org + + shutdown_timeout 10 +} + +general { + title "Test Forge" +} + +db { + # What is the connection string? + conn postgresql:///lindenii-forge?host=/var/run/postgresql +} + +hooks { + # On which UNIX domain socket should we listen for hook callbacks on? + socket /var/run/lindenii/forge/hooks.sock + + # Where should hook executables be put? + execs /usr/libexec/lindenii/forge/hooks +} + +lmtp { + # On which UNIX domain socket should we listen for LMTP on? + socket /var/run/lindenii/forge/lmtp.sock + + # What's the maximum acceptable message size? + max_size 1000000 + + # What is our domainpart? + domain forge.example.org + + # General timeouts + read_timeout 300 + write_timeout 300 + shutdown_timeout 10 +} + +pprof { + # What network to listen on for pprof? + net tcp + + # What address to listen on? + addr localhost:28471 +} diff --git a/forged/.golangci.yaml b/forged/.golangci.yaml new file mode 100644 index 0000000..499136b --- /dev/null +++ b/forged/.golangci.yaml @@ -0,0 +1,32 @@ +version: "2" + +linters: + default: all + disable: + - depguard + - wsl_v5 # tmp + - wsl # tmp + - unused # tmp + - nonamedreturns + - err113 # tmp + - gochecknoinits # tmp + - nlreturn # tmp + - cyclop # tmp + - gocognit # tmp + - varnamelen # tmp + - funlen # tmp + - lll + - mnd # tmp + - revive # tmp + - godox # tmp + - nestif # tmp + +linters-settings: + revive: + rules: + - name: error-strings + disabled: true + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/forged/internal/common/ansiec/colors.go b/forged/internal/common/ansiec/colors.go new file mode 100644 index 0000000..8be2a0c --- /dev/null +++ b/forged/internal/common/ansiec/colors.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package ansiec + +// ANSI color codes +const ( + Black = "\x1b[30m" + Red = "\x1b[31m" + Green = "\x1b[32m" + Yellow = "\x1b[33m" + Blue = "\x1b[34m" + Magenta = "\x1b[35m" + Cyan = "\x1b[36m" + White = "\x1b[37m" + BrightBlack = "\x1b[30;1m" + BrightRed = "\x1b[31;1m" + BrightGreen = "\x1b[32;1m" + BrightYellow = "\x1b[33;1m" + BrightBlue = "\x1b[34;1m" + BrightMagenta = "\x1b[35;1m" + BrightCyan = "\x1b[36;1m" + BrightWhite = "\x1b[37;1m" +) diff --git a/forged/internal/common/ansiec/doc.go b/forged/internal/common/ansiec/doc.go new file mode 100644 index 0000000..542c564 --- /dev/null +++ b/forged/internal/common/ansiec/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// Package ansiec provides definitions for ANSI escape sequences. +package ansiec diff --git a/forged/internal/common/ansiec/reset.go b/forged/internal/common/ansiec/reset.go new file mode 100644 index 0000000..51bb312 --- /dev/null +++ b/forged/internal/common/ansiec/reset.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package ansiec + +// Reset the colors and styles +const Reset = "\x1b[0m" diff --git a/forged/internal/common/ansiec/style.go b/forged/internal/common/ansiec/style.go new file mode 100644 index 0000000..95edbbe --- /dev/null +++ b/forged/internal/common/ansiec/style.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package ansiec + +// ANSI text styles +const ( + Bold = "\x1b[1m" + Underline = "\x1b[4m" + Reversed = "\x1b[7m" + Italic = "\x1b[3m" +) diff --git a/forged/internal/common/argon2id/LICENSE b/forged/internal/common/argon2id/LICENSE new file mode 100644 index 0000000..3649823 --- /dev/null +++ b/forged/internal/common/argon2id/LICENSE @@ -0,0 +1,18 @@ +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/forged/internal/common/argon2id/argon2id.go b/forged/internal/common/argon2id/argon2id.go new file mode 100644 index 0000000..88df8f6 --- /dev/null +++ b/forged/internal/common/argon2id/argon2id.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2018 Alex Edwards + +// Package argon2id provides a wrapper around Go's golang.org/x/crypto/argon2. +package argon2id + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "errors" + "fmt" + "runtime" + "strings" + + "golang.org/x/crypto/argon2" +) + +var ( + // ErrInvalidHash in returned by ComparePasswordAndHash if the provided + // hash isn't in the expected format. + ErrInvalidHash = errors.New("argon2id: hash is not in the correct format") + + // ErrIncompatibleVariant is returned by ComparePasswordAndHash if the + // provided hash was created using a unsupported variant of Argon2. + // Currently only argon2id is supported by this package. + ErrIncompatibleVariant = errors.New("argon2id: incompatible variant of argon2") + + // ErrIncompatibleVersion is returned by ComparePasswordAndHash if the + // provided hash was created using a different version of Argon2. + ErrIncompatibleVersion = errors.New("argon2id: incompatible version of argon2") +) + +// DefaultParams provides some sane default parameters for hashing passwords. +// +// Follows recommendations given by the Argon2 RFC: +// "The Argon2id variant with t=1 and maximum available memory is RECOMMENDED as a +// default setting for all environments. This setting is secure against side-channel +// attacks and maximizes adversarial costs on dedicated bruteforce hardware."" +// +// The default parameters should generally be used for development/testing purposes +// only. Custom parameters should be set for production applications depending on +// available memory/CPU resources and business requirements. +var DefaultParams = &Params{ + Memory: 64 * 1024, + Iterations: 1, + Parallelism: uint8(runtime.NumCPU()), + SaltLength: 16, + KeyLength: 32, +} + +// Params describes the input parameters used by the Argon2id algorithm. The +// Memory and Iterations parameters control the computational cost of hashing +// the password. The higher these figures are, the greater the cost of generating +// the hash and the longer the runtime. It also follows that the greater the cost +// will be for any attacker trying to guess the password. If the code is running +// on a machine with multiple cores, then you can decrease the runtime without +// reducing the cost by increasing the Parallelism parameter. This controls the +// number of threads that the work is spread across. Important note: Changing the +// value of the Parallelism parameter changes the hash output. +// +// For guidance and an outline process for choosing appropriate parameters see +// https://tools.ietf.org/html/draft-irtf-cfrg-argon2-04#section-4 +type Params struct { + // The amount of memory used by the algorithm (in kibibytes). + Memory uint32 + + // The number of iterations over the memory. + Iterations uint32 + + // The number of threads (or lanes) used by the algorithm. + // Recommended value is between 1 and runtime.NumCPU(). + Parallelism uint8 + + // Length of the random salt. 16 bytes is recommended for password hashing. + SaltLength uint32 + + // Length of the generated key. 16 bytes or more is recommended. + KeyLength uint32 +} + +// CreateHash returns an Argon2id hash of a plain-text password using the +// provided algorithm parameters. The returned hash follows the format used by +// the Argon2 reference C implementation and contains the base64-encoded Argon2id d +// derived key prefixed by the salt and parameters. It looks like this: +// +// $argon2id$v=19$m=65536,t=3,p=2$c29tZXNhbHQ$RdescudvJCsgt3ub+b+dWRWJTmaaJObG +func CreateHash(password string, params *Params) (hash string, err error) { + salt, err := generateRandomBytes(params.SaltLength) + if err != nil { + return "", err + } + + key := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength) + + b64Salt := base64.RawStdEncoding.EncodeToString(salt) + b64Key := base64.RawStdEncoding.EncodeToString(key) + + hash = fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, params.Memory, params.Iterations, params.Parallelism, b64Salt, b64Key) + return hash, nil +} + +// ComparePasswordAndHash performs a constant-time comparison between a +// plain-text password and Argon2id hash, using the parameters and salt +// contained in the hash. It returns true if they match, otherwise it returns +// false. +func ComparePasswordAndHash(password, hash string) (match bool, err error) { + match, _, err = CheckHash(password, hash) + return match, err +} + +// CheckHash is like ComparePasswordAndHash, except it also returns the params that the hash was +// created with. This can be useful if you want to update your hash params over time (which you +// should). +func CheckHash(password, hash string) (match bool, params *Params, err error) { + params, salt, key, err := DecodeHash(hash) + if err != nil { + return false, nil, err + } + + otherKey := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength) + + keyLen := int32(len(key)) + otherKeyLen := int32(len(otherKey)) + + if subtle.ConstantTimeEq(keyLen, otherKeyLen) == 0 { + return false, params, nil + } + if subtle.ConstantTimeCompare(key, otherKey) == 1 { + return true, params, nil + } + return false, params, nil +} + +func generateRandomBytes(n uint32) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + + return b, nil +} + +// DecodeHash expects a hash created from this package, and parses it to return the params used to +// create it, as well as the salt and key (password hash). +func DecodeHash(hash string) (params *Params, salt, key []byte, err error) { + vals := strings.Split(hash, "$") + if len(vals) != 6 { + return nil, nil, nil, ErrInvalidHash + } + + if vals[1] != "argon2id" { + return nil, nil, nil, ErrIncompatibleVariant + } + + var version int + _, err = fmt.Sscanf(vals[2], "v=%d", &version) + if err != nil { + return nil, nil, nil, err + } + if version != argon2.Version { + return nil, nil, nil, ErrIncompatibleVersion + } + + params = &Params{} + _, err = fmt.Sscanf(vals[3], "m=%d,t=%d,p=%d", ¶ms.Memory, ¶ms.Iterations, ¶ms.Parallelism) + if err != nil { + return nil, nil, nil, err + } + + salt, err = base64.RawStdEncoding.Strict().DecodeString(vals[4]) + if err != nil { + return nil, nil, nil, err + } + params.SaltLength = uint32(len(salt)) + + key, err = base64.RawStdEncoding.Strict().DecodeString(vals[5]) + if err != nil { + return nil, nil, nil, err + } + params.KeyLength = uint32(len(key)) + + return params, salt, key, nil +} diff --git a/forged/internal/common/bare/LICENSE b/forged/internal/common/bare/LICENSE new file mode 100644 index 0000000..6b0b127 --- /dev/null +++ b/forged/internal/common/bare/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/forged/internal/common/bare/doc.go b/forged/internal/common/bare/doc.go new file mode 100644 index 0000000..2f12f55 --- /dev/null +++ b/forged/internal/common/bare/doc.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// Package bare provides primitives to encode and decode BARE messages. +// +// There is no guarantee that this is compatible with the upstream +// implementation at https://git.sr.ht/~sircmpwn/go-bare. +package bare diff --git a/forged/internal/common/bare/errors.go b/forged/internal/common/bare/errors.go new file mode 100644 index 0000000..4634f0c --- /dev/null +++ b/forged/internal/common/bare/errors.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "errors" + "fmt" + "reflect" +) + +var ErrInvalidStr = errors.New("string contains invalid UTF-8 sequences") + +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return fmt.Sprintf("unsupported type for marshaling: %s\n", e.Type.String()) +} diff --git a/forged/internal/common/bare/limit.go b/forged/internal/common/bare/limit.go new file mode 100644 index 0000000..7eece8c --- /dev/null +++ b/forged/internal/common/bare/limit.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "errors" + "io" +) + +var ( + maxUnmarshalBytes uint64 = 1024 * 1024 * 32 /* 32 MiB */ + maxArrayLength uint64 = 1024 * 4 /* 4096 elements */ + maxMapSize uint64 = 1024 +) + +// MaxUnmarshalBytes sets the maximum size of a message decoded by unmarshal. +// By default, this is set to 32 MiB. +func MaxUnmarshalBytes(bytes uint64) { + maxUnmarshalBytes = bytes +} + +// MaxArrayLength sets maximum number of elements in array. Defaults to 4096 elements +func MaxArrayLength(length uint64) { + maxArrayLength = length +} + +// MaxMapSize sets maximum size of map. Defaults to 1024 key/value pairs +func MaxMapSize(size uint64) { + maxMapSize = size +} + +// Use MaxUnmarshalBytes to prevent this error from occuring on messages which +// are large by design. +var ErrLimitExceeded = errors.New("maximum message size exceeded") + +// Identical to io.LimitedReader, except it returns our custom error instead of +// EOF if the limit is reached. +type limitedReader struct { + R io.Reader + N uint64 +} + +func (l *limitedReader) Read(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, ErrLimitExceeded + } + if uint64(len(p)) > l.N { + p = p[0:l.N] + } + n, err = l.R.Read(p) + l.N -= uint64(n) + return +} + +func newLimitedReader(r io.Reader) *limitedReader { + return &limitedReader{r, maxUnmarshalBytes} +} diff --git a/forged/internal/common/bare/marshal.go b/forged/internal/common/bare/marshal.go new file mode 100644 index 0000000..d4c338e --- /dev/null +++ b/forged/internal/common/bare/marshal.go @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sync" +) + +// A type which implements this interface will be responsible for marshaling +// itself when encountered. +type Marshalable interface { + Marshal(w *Writer) error +} + +var encoderBufferPool = sync.Pool{ + New: func() interface{} { + buf := &bytes.Buffer{} + buf.Grow(32) + return buf + }, +} + +// Marshals a value (val, which must be a pointer) into a BARE message. +// +// The encoding of each struct field can be customized by the format string +// stored under the "bare" key in the struct field's tag. +// +// As a special case, if the field tag is "-", the field is always omitted. +func Marshal(val interface{}) ([]byte, error) { + // reuse buffers from previous serializations + b := encoderBufferPool.Get().(*bytes.Buffer) + defer func() { + b.Reset() + encoderBufferPool.Put(b) + }() + + w := NewWriter(b) + err := MarshalWriter(w, val) + + msg := make([]byte, b.Len()) + copy(msg, b.Bytes()) + + return msg, err +} + +// Marshals a value (val, which must be a pointer) into a BARE message and +// writes it to a Writer. See Marshal for details. +func MarshalWriter(w *Writer, val interface{}) error { + t := reflect.TypeOf(val) + v := reflect.ValueOf(val) + if t.Kind() != reflect.Ptr { + return errors.New("expected val to be pointer type") + } + + return getEncoder(t.Elem())(w, v.Elem()) +} + +type encodeFunc func(w *Writer, v reflect.Value) error + +var encodeFuncCache sync.Map // map[reflect.Type]encodeFunc + +// get decoder from cache +func getEncoder(t reflect.Type) encodeFunc { + if f, ok := encodeFuncCache.Load(t); ok { + return f.(encodeFunc) + } + + f := encoderFunc(t) + encodeFuncCache.Store(t, f) + return f +} + +var marshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem() + +func encoderFunc(t reflect.Type) encodeFunc { + if reflect.PointerTo(t).Implements(marshalableInterface) { + return func(w *Writer, v reflect.Value) error { + uv := v.Addr().Interface().(Marshalable) + return uv.Marshal(w) + } + } + + if t.Kind() == reflect.Interface && t.Implements(unionInterface) { + return encodeUnion(t) + } + + switch t.Kind() { + case reflect.Ptr: + return encodeOptional(t.Elem()) + case reflect.Struct: + return encodeStruct(t) + case reflect.Array: + return encodeArray(t) + case reflect.Slice: + return encodeSlice(t) + case reflect.Map: + return encodeMap(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt + case reflect.Float32, reflect.Float64: + return encodeFloat + case reflect.Bool: + return encodeBool + case reflect.String: + return encodeString + } + + return func(w *Writer, v reflect.Value) error { + return &UnsupportedTypeError{v.Type()} + } +} + +func encodeOptional(t reflect.Type) encodeFunc { + return func(w *Writer, v reflect.Value) error { + if v.IsNil() { + return w.WriteBool(false) + } + + if err := w.WriteBool(true); err != nil { + return err + } + + return getEncoder(t)(w, v.Elem()) + } +} + +func encodeStruct(t reflect.Type) encodeFunc { + n := t.NumField() + encoders := make([]encodeFunc, n) + for i := 0; i < n; i++ { + field := t.Field(i) + if field.Tag.Get("bare") == "-" { + continue + } + encoders[i] = getEncoder(field.Type) + } + + return func(w *Writer, v reflect.Value) error { + for i := 0; i < n; i++ { + if encoders[i] == nil { + continue + } + err := encoders[i](w, v.Field(i)) + if err != nil { + return err + } + } + return nil + } +} + +func encodeArray(t reflect.Type) encodeFunc { + f := getEncoder(t.Elem()) + len := t.Len() + + return func(w *Writer, v reflect.Value) error { + for i := 0; i < len; i++ { + if err := f(w, v.Index(i)); err != nil { + return err + } + } + return nil + } +} + +func encodeSlice(t reflect.Type) encodeFunc { + elem := t.Elem() + f := getEncoder(elem) + + return func(w *Writer, v reflect.Value) error { + if err := w.WriteUint(uint64(v.Len())); err != nil { + return err + } + + for i := 0; i < v.Len(); i++ { + if err := f(w, v.Index(i)); err != nil { + return err + } + } + return nil + } +} + +func encodeMap(t reflect.Type) encodeFunc { + keyType := t.Key() + keyf := getEncoder(keyType) + + valueType := t.Elem() + valf := getEncoder(valueType) + + return func(w *Writer, v reflect.Value) error { + if err := w.WriteUint(uint64(v.Len())); err != nil { + return err + } + + iter := v.MapRange() + for iter.Next() { + if err := keyf(w, iter.Key()); err != nil { + return err + } + if err := valf(w, iter.Value()); err != nil { + return err + } + } + return nil + } +} + +func encodeUnion(t reflect.Type) encodeFunc { + ut, ok := unionRegistry[t] + if !ok { + return func(w *Writer, v reflect.Value) error { + return fmt.Errorf("Union type %s is not registered", t.Name()) + } + } + + encoders := make(map[uint64]encodeFunc) + for tag, t := range ut.types { + encoders[tag] = getEncoder(t) + } + + return func(w *Writer, v reflect.Value) error { + t := v.Elem().Type() + if t.Kind() == reflect.Ptr { + // If T is a valid union value type, *T is valid too. + t = t.Elem() + v = v.Elem() + } + tag, ok := ut.tags[t] + if !ok { + return fmt.Errorf("Invalid union value: %s", v.Elem().String()) + } + + if err := w.WriteUint(tag); err != nil { + return err + } + + return encoders[tag](w, v.Elem()) + } +} + +func encodeUint(w *Writer, v reflect.Value) error { + switch getIntKind(v.Type()) { + case reflect.Uint: + return w.WriteUint(v.Uint()) + + case reflect.Uint8: + return w.WriteU8(uint8(v.Uint())) + + case reflect.Uint16: + return w.WriteU16(uint16(v.Uint())) + + case reflect.Uint32: + return w.WriteU32(uint32(v.Uint())) + + case reflect.Uint64: + return w.WriteU64(uint64(v.Uint())) + } + + panic("not uint") +} + +func encodeInt(w *Writer, v reflect.Value) error { + switch getIntKind(v.Type()) { + case reflect.Int: + return w.WriteInt(v.Int()) + + case reflect.Int8: + return w.WriteI8(int8(v.Int())) + + case reflect.Int16: + return w.WriteI16(int16(v.Int())) + + case reflect.Int32: + return w.WriteI32(int32(v.Int())) + + case reflect.Int64: + return w.WriteI64(int64(v.Int())) + } + + panic("not int") +} + +func encodeFloat(w *Writer, v reflect.Value) error { + switch v.Type().Kind() { + case reflect.Float32: + return w.WriteF32(float32(v.Float())) + case reflect.Float64: + return w.WriteF64(v.Float()) + } + + panic("not float") +} + +func encodeBool(w *Writer, v reflect.Value) error { + return w.WriteBool(v.Bool()) +} + +func encodeString(w *Writer, v reflect.Value) error { + if v.Kind() != reflect.String { + panic("not string") + } + return w.WriteString(v.String()) +} diff --git a/forged/internal/common/bare/reader.go b/forged/internal/common/bare/reader.go new file mode 100644 index 0000000..7e872f4 --- /dev/null +++ b/forged/internal/common/bare/reader.go @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "unicode/utf8" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// A Reader for BARE primitive types. +type Reader struct { + base byteReader + scratch [8]byte +} + +type simpleByteReader struct { + io.Reader + scratch [1]byte +} + +func (r simpleByteReader) ReadByte() (byte, error) { + // using reference type here saves us allocations + _, err := r.Read(r.scratch[:]) + return r.scratch[0], err +} + +// Returns a new BARE primitive reader wrapping the given io.Reader. +func NewReader(base io.Reader) *Reader { + br, ok := base.(byteReader) + if !ok { + br = simpleByteReader{Reader: base} + } + return &Reader{base: br} +} + +func (r *Reader) ReadUint() (uint64, error) { + x, err := binary.ReadUvarint(r.base) + if err != nil { + return x, err + } + return x, nil +} + +func (r *Reader) ReadU8() (uint8, error) { + return r.base.ReadByte() +} + +func (r *Reader) ReadU16() (uint16, error) { + var i uint16 + if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil { + return i, err + } + return binary.LittleEndian.Uint16(r.scratch[:]), nil +} + +func (r *Reader) ReadU32() (uint32, error) { + var i uint32 + if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil { + return i, err + } + return binary.LittleEndian.Uint32(r.scratch[:]), nil +} + +func (r *Reader) ReadU64() (uint64, error) { + var i uint64 + if _, err := io.ReadAtLeast(r.base, r.scratch[:8], 8); err != nil { + return i, err + } + return binary.LittleEndian.Uint64(r.scratch[:]), nil +} + +func (r *Reader) ReadInt() (int64, error) { + return binary.ReadVarint(r.base) +} + +func (r *Reader) ReadI8() (int8, error) { + b, err := r.base.ReadByte() + return int8(b), err +} + +func (r *Reader) ReadI16() (int16, error) { + var i int16 + if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil { + return i, err + } + return int16(binary.LittleEndian.Uint16(r.scratch[:])), nil +} + +func (r *Reader) ReadI32() (int32, error) { + var i int32 + if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil { + return i, err + } + return int32(binary.LittleEndian.Uint32(r.scratch[:])), nil +} + +func (r *Reader) ReadI64() (int64, error) { + var i int64 + if _, err := io.ReadAtLeast(r.base, r.scratch[:], 8); err != nil { + return i, err + } + return int64(binary.LittleEndian.Uint64(r.scratch[:])), nil +} + +func (r *Reader) ReadF32() (float32, error) { + u, err := r.ReadU32() + f := math.Float32frombits(u) + if math.IsNaN(float64(f)) { + return 0.0, fmt.Errorf("NaN is not permitted in BARE floats") + } + return f, err +} + +func (r *Reader) ReadF64() (float64, error) { + u, err := r.ReadU64() + f := math.Float64frombits(u) + if math.IsNaN(f) { + return 0.0, fmt.Errorf("NaN is not permitted in BARE floats") + } + return f, err +} + +func (r *Reader) ReadBool() (bool, error) { + b, err := r.ReadU8() + if err != nil { + return false, err + } + + if b > 1 { + return false, fmt.Errorf("Invalid bool value: %#x", b) + } + + return b == 1, nil +} + +func (r *Reader) ReadString() (string, error) { + buf, err := r.ReadData() + if err != nil { + return "", err + } + if !utf8.Valid(buf) { + return "", ErrInvalidStr + } + return misc.BytesToString(buf), nil +} + +// Reads a fixed amount of arbitrary data, defined by the length of the slice. +func (r *Reader) ReadDataFixed(dest []byte) error { + var amt int + for amt < len(dest) { + n, err := r.base.Read(dest[amt:]) + if err != nil { + return err + } + amt += n + } + return nil +} + +// Reads arbitrary data whose length is read from the message. +func (r *Reader) ReadData() ([]byte, error) { + l, err := r.ReadUint() + if err != nil { + return nil, err + } + if l >= maxUnmarshalBytes { + return nil, ErrLimitExceeded + } + buf := make([]byte, l) + var amt uint64 = 0 + for amt < l { + n, err := r.base.Read(buf[amt:]) + if err != nil { + return nil, err + } + amt += uint64(n) + } + return buf, nil +} diff --git a/forged/internal/common/bare/unions.go b/forged/internal/common/bare/unions.go new file mode 100644 index 0000000..1020fa0 --- /dev/null +++ b/forged/internal/common/bare/unions.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "fmt" + "reflect" +) + +// Any type which is a union member must implement this interface. You must +// also call RegisterUnion for go-bare to marshal or unmarshal messages which +// utilize your union type. +type Union interface { + IsUnion() +} + +type UnionTags struct { + iface reflect.Type + tags map[reflect.Type]uint64 + types map[uint64]reflect.Type +} + +var ( + unionInterface = reflect.TypeOf((*Union)(nil)).Elem() + unionRegistry map[reflect.Type]*UnionTags +) + +func init() { + unionRegistry = make(map[reflect.Type]*UnionTags) +} + +// Registers a union type in this context. Pass the union interface and the +// list of types associated with it, sorted ascending by their union tag. +func RegisterUnion(iface interface{}) *UnionTags { + ity := reflect.TypeOf(iface).Elem() + if _, ok := unionRegistry[ity]; ok { + panic(fmt.Errorf("Type %s has already been registered", ity.Name())) + } + + if !ity.Implements(reflect.TypeOf((*Union)(nil)).Elem()) { + panic(fmt.Errorf("Type %s does not implement bare.Union", ity.Name())) + } + + utypes := &UnionTags{ + iface: ity, + tags: make(map[reflect.Type]uint64), + types: make(map[uint64]reflect.Type), + } + unionRegistry[ity] = utypes + return utypes +} + +func (ut *UnionTags) Member(t interface{}, tag uint64) *UnionTags { + ty := reflect.TypeOf(t) + if !ty.AssignableTo(ut.iface) { + panic(fmt.Errorf("Type %s does not implement interface %s", + ty.Name(), ut.iface.Name())) + } + if _, ok := ut.tags[ty]; ok { + panic(fmt.Errorf("Type %s is already registered for union %s", + ty.Name(), ut.iface.Name())) + } + if _, ok := ut.types[tag]; ok { + panic(fmt.Errorf("Tag %d is already registered for union %s", + tag, ut.iface.Name())) + } + ut.tags[ty] = tag + ut.types[tag] = ty + return ut +} + +func (ut *UnionTags) TagFor(v interface{}) (uint64, bool) { + tag, ok := ut.tags[reflect.TypeOf(v)] + return tag, ok +} + +func (ut *UnionTags) TypeFor(tag uint64) (reflect.Type, bool) { + t, ok := ut.types[tag] + return t, ok +} diff --git a/forged/internal/common/bare/unmarshal.go b/forged/internal/common/bare/unmarshal.go new file mode 100644 index 0000000..d55f32c --- /dev/null +++ b/forged/internal/common/bare/unmarshal.go @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" +) + +// A type which implements this interface will be responsible for unmarshaling +// itself when encountered. +type Unmarshalable interface { + Unmarshal(r *Reader) error +} + +// Unmarshals a BARE message into val, which must be a pointer to a value of +// the message type. +func Unmarshal(data []byte, val interface{}) error { + b := bytes.NewReader(data) + r := NewReader(b) + return UnmarshalBareReader(r, val) +} + +// Unmarshals a BARE message into value (val, which must be a pointer), from a +// reader. See Unmarshal for details. +func UnmarshalReader(r io.Reader, val interface{}) error { + r = newLimitedReader(r) + return UnmarshalBareReader(NewReader(r), val) +} + +type decodeFunc func(r *Reader, v reflect.Value) error + +var decodeFuncCache sync.Map // map[reflect.Type]decodeFunc + +func UnmarshalBareReader(r *Reader, val interface{}) error { + t := reflect.TypeOf(val) + v := reflect.ValueOf(val) + if t.Kind() != reflect.Ptr { + return errors.New("Expected val to be pointer type") + } + + return getDecoder(t.Elem())(r, v.Elem()) +} + +// get decoder from cache +func getDecoder(t reflect.Type) decodeFunc { + if f, ok := decodeFuncCache.Load(t); ok { + return f.(decodeFunc) + } + + f := decoderFunc(t) + decodeFuncCache.Store(t, f) + return f +} + +var unmarshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem() + +func decoderFunc(t reflect.Type) decodeFunc { + if reflect.PointerTo(t).Implements(unmarshalableInterface) { + return func(r *Reader, v reflect.Value) error { + uv := v.Addr().Interface().(Unmarshalable) + return uv.Unmarshal(r) + } + } + + if t.Kind() == reflect.Interface && t.Implements(unionInterface) { + return decodeUnion(t) + } + + switch t.Kind() { + case reflect.Ptr: + return decodeOptional(t.Elem()) + case reflect.Struct: + return decodeStruct(t) + case reflect.Array: + return decodeArray(t) + case reflect.Slice: + return decodeSlice(t) + case reflect.Map: + return decodeMap(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return decodeUint + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return decodeInt + case reflect.Float32, reflect.Float64: + return decodeFloat + case reflect.Bool: + return decodeBool + case reflect.String: + return decodeString + } + + return func(r *Reader, v reflect.Value) error { + return &UnsupportedTypeError{v.Type()} + } +} + +func decodeOptional(t reflect.Type) decodeFunc { + return func(r *Reader, v reflect.Value) error { + s, err := r.ReadU8() + if err != nil { + return err + } + + if s > 1 { + return fmt.Errorf("Invalid optional value: %#x", s) + } + + if s == 0 { + return nil + } + + v.Set(reflect.New(t)) + return getDecoder(t)(r, v.Elem()) + } +} + +func decodeStruct(t reflect.Type) decodeFunc { + n := t.NumField() + decoders := make([]decodeFunc, n) + for i := 0; i < n; i++ { + field := t.Field(i) + if field.Tag.Get("bare") == "-" { + continue + } + decoders[i] = getDecoder(field.Type) + } + + return func(r *Reader, v reflect.Value) error { + for i := 0; i < n; i++ { + if decoders[i] == nil { + continue + } + err := decoders[i](r, v.Field(i)) + if err != nil { + return err + } + } + return nil + } +} + +func decodeArray(t reflect.Type) decodeFunc { + f := getDecoder(t.Elem()) + len := t.Len() + + return func(r *Reader, v reflect.Value) error { + for i := 0; i < len; i++ { + err := f(r, v.Index(i)) + if err != nil { + return err + } + } + return nil + } +} + +func decodeSlice(t reflect.Type) decodeFunc { + elem := t.Elem() + f := getDecoder(elem) + + return func(r *Reader, v reflect.Value) error { + len, err := r.ReadUint() + if err != nil { + return err + } + + if len > maxArrayLength { + return fmt.Errorf("Array length %d exceeds configured limit of %d", len, maxArrayLength) + } + + v.Set(reflect.MakeSlice(t, int(len), int(len))) + + for i := 0; i < int(len); i++ { + if err := f(r, v.Index(i)); err != nil { + return err + } + } + return nil + } +} + +func decodeMap(t reflect.Type) decodeFunc { + keyType := t.Key() + keyf := getDecoder(keyType) + + valueType := t.Elem() + valf := getDecoder(valueType) + + return func(r *Reader, v reflect.Value) error { + size, err := r.ReadUint() + if err != nil { + return err + } + + if size > maxMapSize { + return fmt.Errorf("Map size %d exceeds configured limit of %d", size, maxMapSize) + } + + v.Set(reflect.MakeMapWithSize(t, int(size))) + + key := reflect.New(keyType).Elem() + value := reflect.New(valueType).Elem() + + for i := uint64(0); i < size; i++ { + if err := keyf(r, key); err != nil { + return err + } + + if v.MapIndex(key).Kind() > reflect.Invalid { + return fmt.Errorf("Encountered duplicate map key: %v", key.Interface()) + } + + if err := valf(r, value); err != nil { + return err + } + + v.SetMapIndex(key, value) + } + return nil + } +} + +func decodeUnion(t reflect.Type) decodeFunc { + ut, ok := unionRegistry[t] + if !ok { + return func(r *Reader, v reflect.Value) error { + return fmt.Errorf("Union type %s is not registered", t.Name()) + } + } + + decoders := make(map[uint64]decodeFunc) + for tag, t := range ut.types { + t := t + f := getDecoder(t) + + decoders[tag] = func(r *Reader, v reflect.Value) error { + nv := reflect.New(t) + if err := f(r, nv.Elem()); err != nil { + return err + } + + v.Set(nv) + return nil + } + } + + return func(r *Reader, v reflect.Value) error { + tag, err := r.ReadUint() + if err != nil { + return err + } + + if f, ok := decoders[tag]; ok { + return f(r, v) + } + + return fmt.Errorf("Invalid union tag %d for type %s", tag, t.Name()) + } +} + +func decodeUint(r *Reader, v reflect.Value) error { + var err error + switch getIntKind(v.Type()) { + case reflect.Uint: + var u uint64 + u, err = r.ReadUint() + v.SetUint(u) + + case reflect.Uint8: + var u uint8 + u, err = r.ReadU8() + v.SetUint(uint64(u)) + + case reflect.Uint16: + var u uint16 + u, err = r.ReadU16() + v.SetUint(uint64(u)) + case reflect.Uint32: + var u uint32 + u, err = r.ReadU32() + v.SetUint(uint64(u)) + + case reflect.Uint64: + var u uint64 + u, err = r.ReadU64() + v.SetUint(uint64(u)) + + default: + panic("not an uint") + } + + return err +} + +func decodeInt(r *Reader, v reflect.Value) error { + var err error + switch getIntKind(v.Type()) { + case reflect.Int: + var i int64 + i, err = r.ReadInt() + v.SetInt(i) + + case reflect.Int8: + var i int8 + i, err = r.ReadI8() + v.SetInt(int64(i)) + + case reflect.Int16: + var i int16 + i, err = r.ReadI16() + v.SetInt(int64(i)) + case reflect.Int32: + var i int32 + i, err = r.ReadI32() + v.SetInt(int64(i)) + + case reflect.Int64: + var i int64 + i, err = r.ReadI64() + v.SetInt(int64(i)) + + default: + panic("not an int") + } + + return err +} + +func decodeFloat(r *Reader, v reflect.Value) error { + var err error + switch v.Type().Kind() { + case reflect.Float32: + var f float32 + f, err = r.ReadF32() + v.SetFloat(float64(f)) + case reflect.Float64: + var f float64 + f, err = r.ReadF64() + v.SetFloat(f) + default: + panic("not a float") + } + return err +} + +func decodeBool(r *Reader, v reflect.Value) error { + b, err := r.ReadBool() + v.SetBool(b) + return err +} + +func decodeString(r *Reader, v reflect.Value) error { + s, err := r.ReadString() + v.SetString(s) + return err +} diff --git a/forged/internal/common/bare/varint.go b/forged/internal/common/bare/varint.go new file mode 100644 index 0000000..a185ac8 --- /dev/null +++ b/forged/internal/common/bare/varint.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "reflect" +) + +// Int is a variable-length encoded signed integer. +type Int int64 + +// Uint is a variable-length encoded unsigned integer. +type Uint uint64 + +var ( + intType = reflect.TypeOf(Int(0)) + uintType = reflect.TypeOf(Uint(0)) +) + +func getIntKind(t reflect.Type) reflect.Kind { + switch t { + case intType: + return reflect.Int + case uintType: + return reflect.Uint + default: + return t.Kind() + } +} diff --git a/forged/internal/common/bare/writer.go b/forged/internal/common/bare/writer.go new file mode 100644 index 0000000..1b23c9f --- /dev/null +++ b/forged/internal/common/bare/writer.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com> + +package bare + +import ( + "encoding/binary" + "fmt" + "io" + "math" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +// A Writer for BARE primitive types. +type Writer struct { + base io.Writer + scratch [binary.MaxVarintLen64]byte +} + +// Returns a new BARE primitive writer wrapping the given io.Writer. +func NewWriter(base io.Writer) *Writer { + return &Writer{base: base} +} + +func (w *Writer) WriteUint(i uint64) error { + n := binary.PutUvarint(w.scratch[:], i) + _, err := w.base.Write(w.scratch[:n]) + return err +} + +func (w *Writer) WriteU8(i uint8) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteU16(i uint16) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteU32(i uint32) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteU64(i uint64) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteInt(i int64) error { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], i) + _, err := w.base.Write(buf[:n]) + return err +} + +func (w *Writer) WriteI8(i int8) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteI16(i int16) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteI32(i int32) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteI64(i int64) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteF32(f float32) error { + if math.IsNaN(float64(f)) { + return fmt.Errorf("NaN is not permitted in BARE floats") + } + return binary.Write(w.base, binary.LittleEndian, f) +} + +func (w *Writer) WriteF64(f float64) error { + if math.IsNaN(f) { + return fmt.Errorf("NaN is not permitted in BARE floats") + } + return binary.Write(w.base, binary.LittleEndian, f) +} + +func (w *Writer) WriteBool(b bool) error { + return binary.Write(w.base, binary.LittleEndian, b) +} + +func (w *Writer) WriteString(str string) error { + return w.WriteData(misc.StringToBytes(str)) +} + +// Writes a fixed amount of arbitrary data, defined by the length of the slice. +func (w *Writer) WriteDataFixed(data []byte) error { + var amt int + for amt < len(data) { + n, err := w.base.Write(data[amt:]) + if err != nil { + return err + } + amt += n + } + return nil +} + +// Writes arbitrary data whose length is encoded into the message. +func (w *Writer) WriteData(data []byte) error { + err := w.WriteUint(uint64(len(data))) + if err != nil { + return err + } + var amt int + for amt < len(data) { + n, err := w.base.Write(data[amt:]) + if err != nil { + return err + } + amt += n + } + return nil +} diff --git a/forged/internal/common/cmap/LICENSE b/forged/internal/common/cmap/LICENSE new file mode 100644 index 0000000..d5dfee8 --- /dev/null +++ b/forged/internal/common/cmap/LICENSE @@ -0,0 +1,22 @@ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/forged/internal/common/cmap/comparable_map.go b/forged/internal/common/cmap/comparable_map.go new file mode 100644 index 0000000..e89175c --- /dev/null +++ b/forged/internal/common/cmap/comparable_map.go @@ -0,0 +1,539 @@ +// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically +// written from scratch with Go 1.23's sync.Map. +// Copyright 2024 Runxi Yu (porting it to generics) +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// ComparableMap[K comparable, V comparable] is like a Go map[K]V but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. Loads, +// stores, and deletes run in amortized constant time. +// +// The ComparableMap type is optimized for two common use cases: (1) when the comparableEntry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a ComparableMap may significantly reduce lock +// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. +// +// The zero ComparableMap is empty and ready for use. A ComparableMap must not be copied after first use. +// +// In the terminology of [the Go memory model], ComparableMap arranges that a write operation +// “synchronizes before” any read operation that observes the effect of the write, where +// read and write operations are defined as follows. +// [ComparableMap.Load], [ComparableMap.LoadAndDelete], [ComparableMap.LoadOrStore], [ComparableMap.Swap], [ComparableMap.CompareAndSwap], +// and [ComparableMap.CompareAndDelete] are read operations; +// [ComparableMap.Delete], [ComparableMap.LoadAndDelete], [ComparableMap.Store], and [ComparableMap.Swap] are write operations; +// [ComparableMap.LoadOrStore] is a write operation when it returns loaded set to false; +// [ComparableMap.CompareAndSwap] is a write operation when it returns swapped set to true; +// and [ComparableMap.CompareAndDelete] is a write operation when it returns deleted set to true. +// +// [the Go memory model]: https://go.dev/ref/mem +type ComparableMap[K comparable, V comparable] struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-comparableExpunged comparableEntry requires that the comparableEntry be copied to the dirty + // map and uncomparableExpunged with mu held. + read atomic.Pointer[comparableReadOnly[K, V]] + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-comparableExpunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An comparableExpunged comparableEntry in the + // clean map must be uncomparableExpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[K]*comparableEntry[V] + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// comparableReadOnly is an immutable struct stored atomically in the ComparableMap.read field. +type comparableReadOnly[K comparable, V comparable] struct { + m map[K]*comparableEntry[V] + amended bool // true if the dirty map contains some key not in m. +} + +// comparableExpunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var comparableExpunged = unsafe.Pointer(new(any)) + +// An comparableEntry is a slot in the map corresponding to a particular key. +type comparableEntry[V comparable] struct { + // p points to the value stored for the comparableEntry. + // + // If p == nil, the comparableEntry has been deleted, and either m.dirty == nil or + // m.dirty[key] is e. + // + // If p == comparableExpunged, the comparableEntry has been deleted, m.dirty != nil, and the comparableEntry + // is missing from m.dirty. + // + // Otherwise, the comparableEntry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An comparableEntry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with comparableExpunged and leave + // m.dirty[key] unset. + // + // An comparableEntry's associated value can be updated by atomic replacement, provided + // p != comparableExpunged. If p == comparableExpunged, an comparableEntry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the comparableEntry. + p unsafe.Pointer +} + +func newComparableEntry[V comparable](i V) *comparableEntry[V] { + return &comparableEntry[V]{p: unsafe.Pointer(&i)} +} + +func (m *ComparableMap[K, V]) loadReadOnly() comparableReadOnly[K, V] { + if p := m.read.Load(); p != nil { + return *p + } + return comparableReadOnly[K, V]{} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *ComparableMap[K, V]) Load(key K) (value V, ok bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the comparableEntry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return *new(V), false + } + return e.load() +} + +func (e *comparableEntry[V]) load() (value V, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged { + return value, false + } + return *(*V)(p), true +} + +// Store sets the value for a key. +func (m *ComparableMap[K, V]) Store(key K, value V) { + _, _ = m.Swap(key, value) +} + +// Clear deletes all the entries, resulting in an empty ComparableMap. +func (m *ComparableMap[K, V]) Clear() { + read := m.loadReadOnly() + if len(read.m) == 0 && !read.amended { + // Avoid allocating a new comparableReadOnly when the map is already clear. + return + } + + m.mu.Lock() + defer m.mu.Unlock() + + read = m.loadReadOnly() + if len(read.m) > 0 || read.amended { + m.read.Store(&comparableReadOnly[K, V]{}) + } + + clear(m.dirty) + // Don't immediately promote the newly-cleared dirty map on the next operation. + m.misses = 0 +} + +// tryCompareAndSwap compare the comparableEntry with the given old value and swaps +// it with a new value if the comparableEntry is equal to the old value, and the comparableEntry +// has not been comparableExpunged. +// +// If the comparableEntry is comparableExpunged, tryCompareAndSwap returns false and leaves +// the comparableEntry unchanged. +func (e *comparableEntry[V]) tryCompareAndSwap(old V, new V) bool { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged || *(*V)(p) != old { // XXX + return false + } + + // Copy the pointer after the first load to make this method more amenable + // to escape analysis: if the comparison fails from the start, we shouldn't + // bother heap-allocating a pointer to store. + nc := new + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(&nc)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged || *(*V)(p) != old { + return false + } + } +} + +// unexpungeLocked ensures that the comparableEntry is not marked as comparableExpunged. +// +// If the comparableEntry was previously comparableExpunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *comparableEntry[V]) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, comparableExpunged, nil) +} + +// swapLocked unconditionally swaps a value into the comparableEntry. +// +// The comparableEntry must be known not to be comparableExpunged. +func (e *comparableEntry[V]) swapLocked(i *V) *V { + return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i))) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *ComparableMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + // Avoid locking if it's a clean hit. + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newComparableEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the comparableEntry is not +// comparableExpunged. +// +// If the comparableEntry is comparableExpunged, tryLoadOrStore leaves the comparableEntry unchanged and +// returns with ok==false. +func (e *comparableEntry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == comparableExpunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + + // Copy the pointer after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the comparableEntry is comparableExpunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == comparableExpunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *ComparableMap[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the comparableEntry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, false +} + +// Delete deletes the value for a key. +func (m *ComparableMap[K, V]) Delete(key K) { + m.LoadAndDelete(key) +} + +func (e *comparableEntry[V]) delete() (value V, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged { + return value, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(*V)(p), true + } + } +} + +// trySwap swaps a value if the comparableEntry has not been comparableExpunged. +// +// If the comparableEntry is comparableExpunged, trySwap returns false and leaves the comparableEntry +// unchanged. +func (e *comparableEntry[V]) trySwap(i *V) (*V, bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == comparableExpunged { + return nil, false + } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return (*V)(p), true + } + } +} + +// Swap swaps the value for a key and returns the previous value if any. +// The loaded result reports whether the key was present. +func (m *ComparableMap[K, V]) Swap(key K, value V) (previous V, loaded bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + if v, ok := e.trySwap(&value); ok { + if v == nil { + return previous, false + } + return *v, true + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The comparableEntry was previously comparableExpunged, which implies that there is a + // non-nil dirty map and this comparableEntry is not in it. + m.dirty[key] = e + } + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else if e, ok := m.dirty[key]; ok { + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newComparableEntry(value) + } + m.mu.Unlock() + return previous, loaded +} + +// CompareAndSwap swaps the old and new values for key +// if the value stored in the map is equal to old. +// The old value must be of a comparable type. +func (m *ComparableMap[K, V]) CompareAndSwap(key K, old, new V) (swapped bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + return e.tryCompareAndSwap(old, new) + } else if !read.amended { + return false // No existing value for key. + } + + m.mu.Lock() + defer m.mu.Unlock() + read = m.loadReadOnly() + swapped = false + if e, ok := read.m[key]; ok { + swapped = e.tryCompareAndSwap(old, new) + } else if e, ok := m.dirty[key]; ok { + swapped = e.tryCompareAndSwap(old, new) + // We needed to lock mu in order to load the comparableEntry for key, + // and the operation didn't change the set of keys in the map + // (so it would be made more efficient by promoting the dirty + // map to read-only). + // Count it as a miss so that we will eventually switch to the + // more efficient steady state. + m.missLocked() + } + return swapped +} + +// CompareAndDelete deletes the comparableEntry for key if its value is equal to old. +// The old value must be of a comparable type. +// +// If there is no current value for key in the map, CompareAndDelete +// returns false (even if the old value is a nil pointer). +func (m *ComparableMap[K, V]) CompareAndDelete(key K, old V) (deleted bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Don't delete key from m.dirty: we still need to do the “compare” part + // of the operation. The comparableEntry will eventually be comparableExpunged when the + // dirty map is promoted to the read map. + // + // Regardless of whether the comparableEntry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + for ok { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged || *(*V)(p) != old { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } + return false +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the ComparableMap's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *ComparableMap[K, V]) Range(f func(key K, value V) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read := m.loadReadOnly() + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read = m.loadReadOnly() + if read.amended { + read = comparableReadOnly[K, V]{m: m.dirty} + copyRead := read + m.read.Store(©Read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *ComparableMap[K, V]) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(&comparableReadOnly[K, V]{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *ComparableMap[K, V]) dirtyLocked() { + if m.dirty != nil { + return + } + + read := m.loadReadOnly() + m.dirty = make(map[K]*comparableEntry[V], len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *comparableEntry[V]) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, comparableExpunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == comparableExpunged +} diff --git a/forged/internal/common/cmap/map.go b/forged/internal/common/cmap/map.go new file mode 100644 index 0000000..7a1fe5b --- /dev/null +++ b/forged/internal/common/cmap/map.go @@ -0,0 +1,446 @@ +// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically +// written from scratch with Go 1.23's sync.Map. +// Copyright 2024 Runxi Yu (porting it to generics) +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmap provides a generic Map safe for concurrent use. +package cmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map[K comparable, V any] is like a Go map[K]V but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. Loads, +// stores, and deletes run in amortized constant time. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +// +// In the terminology of [the Go memory model], Map arranges that a write operation +// “synchronizes before” any read operation that observes the effect of the write, where +// read and write operations are defined as follows. +// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap], +// and [Map.CompareAndDelete] are read operations; +// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations; +// [Map.LoadOrStore] is a write operation when it returns loaded set to false; +// [Map.CompareAndSwap] is a write operation when it returns swapped set to true; +// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true. +// +// [the Go memory model]: https://go.dev/ref/mem +type Map[K comparable, V any] struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Pointer[readOnly[K, V]] + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[K]*entry[V] + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly[K comparable, V any] struct { + m map[K]*entry[V] + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(any)) + +// An entry is a slot in the map corresponding to a particular key. +type entry[V any] struct { + // p points to the value stored for the entry. + // + // If p == nil, the entry has been deleted, and either m.dirty == nil or + // m.dirty[key] is e. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer +} + +func newEntry[V any](i V) *entry[V] { + return &entry[V]{p: unsafe.Pointer(&i)} +} + +func (m *Map[K, V]) loadReadOnly() readOnly[K, V] { + if p := m.read.Load(); p != nil { + return *p + } + return readOnly[K, V]{} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map[K, V]) Load(key K) (value V, ok bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return *new(V), false + } + return e.load() +} + +func (e *entry[V]) load() (value V, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return value, false + } + return *(*V)(p), true +} + +// Store sets the value for a key. +func (m *Map[K, V]) Store(key K, value V) { + _, _ = m.Swap(key, value) +} + +// Clear deletes all the entries, resulting in an empty Map. +func (m *Map[K, V]) Clear() { + read := m.loadReadOnly() + if len(read.m) == 0 && !read.amended { + // Avoid allocating a new readOnly when the map is already clear. + return + } + + m.mu.Lock() + defer m.mu.Unlock() + + read = m.loadReadOnly() + if len(read.m) > 0 || read.amended { + m.read.Store(&readOnly[K, V]{}) + } + + clear(m.dirty) + // Don't immediately promote the newly-cleared dirty map on the next operation. + m.misses = 0 +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry[V]) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// swapLocked unconditionally swaps a value into the entry. +// +// The entry must be known not to be expunged. +func (e *entry[V]) swapLocked(i *V) *V { + return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i))) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + // Avoid locking if it's a clean hit. + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + + // Copy the pointer after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, false +} + +// Delete deletes the value for a key. +func (m *Map[K, V]) Delete(key K) { + m.LoadAndDelete(key) +} + +func (e *entry[V]) delete() (value V, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return value, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(*V)(p), true + } + } +} + +// trySwap swaps a value if the entry has not been expunged. +// +// If the entry is expunged, trySwap returns false and leaves the entry +// unchanged. +func (e *entry[V]) trySwap(i *V) (*V, bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false + } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return (*V)(p), true + } + } +} + +// Swap swaps the value for a key and returns the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + if v, ok := e.trySwap(&value); ok { + if v == nil { + return previous, false + } + return *v, true + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else if e, ok := m.dirty[key]; ok { + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() + return previous, loaded +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read := m.loadReadOnly() + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read = m.loadReadOnly() + if read.amended { + read = readOnly[K, V]{m: m.dirty} + copyRead := read + m.read.Store(©Read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map[K, V]) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(&readOnly[K, V]{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map[K, V]) dirtyLocked() { + if m.dirty != nil { + return + } + + read := m.loadReadOnly() + m.dirty = make(map[K]*entry[V], len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry[V]) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/forged/internal/common/humanize/bytes.go b/forged/internal/common/humanize/bytes.go new file mode 100644 index 0000000..bea504c --- /dev/null +++ b/forged/internal/common/humanize/bytes.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net> +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// Package humanize provides functions to convert numbers into human-readable formats. +package humanize + +import ( + "fmt" + "math" +) + +// IBytes produces a human readable representation of an IEC size. +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} diff --git a/forged/internal/common/misc/back.go b/forged/internal/common/misc/back.go new file mode 100644 index 0000000..5351359 --- /dev/null +++ b/forged/internal/common/misc/back.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package misc + +// ErrorBack wraps a value and a channel for communicating an associated error. +// Typically used to get an error response after sending data across a channel. +type ErrorBack[T any] struct { + Content T + ErrorChan chan error +} diff --git a/forged/internal/common/misc/iter.go b/forged/internal/common/misc/iter.go new file mode 100644 index 0000000..61a96f4 --- /dev/null +++ b/forged/internal/common/misc/iter.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package misc + +import "iter" + +// iterSeqLimit returns an iterator equivalent to the supplied one, but stops +// after n iterations. +func IterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] { + return func(yield func(T) bool) { + var iterations uint + for v := range s { + if iterations > n-1 { + return + } + if !yield(v) { + return + } + iterations++ + } + } +} diff --git a/forged/internal/common/misc/misc.go b/forged/internal/common/misc/misc.go new file mode 100644 index 0000000..e9e10ab --- /dev/null +++ b/forged/internal/common/misc/misc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// Package misc provides miscellaneous functions and other definitions. +package misc diff --git a/forged/internal/common/misc/net.go b/forged/internal/common/misc/net.go new file mode 100644 index 0000000..967ea77 --- /dev/null +++ b/forged/internal/common/misc/net.go @@ -0,0 +1,42 @@ +package misc + +import ( + "context" + "errors" + "fmt" + "net" + "syscall" +) + +func ListenUnixSocket(ctx context.Context, path string) (listener net.Listener, replaced bool, err error) { + listenConfig := net.ListenConfig{} //exhaustruct:ignore + listener, err = listenConfig.Listen(ctx, "unix", path) + if errors.Is(err, syscall.EADDRINUSE) { + replaced = true + unlinkErr := syscall.Unlink(path) + if unlinkErr != nil { + return listener, false, fmt.Errorf("remove existing socket %q: %w", path, unlinkErr) + } + listener, err = listenConfig.Listen(ctx, "unix", path) + } + if err != nil { + return listener, replaced, fmt.Errorf("listen on unix socket %q: %w", path, err) + } + return listener, replaced, nil +} + +func Listen(ctx context.Context, net_, addr string) (listener net.Listener, err error) { + if net_ == "unix" { + listener, _, err = ListenUnixSocket(ctx, addr) + if err != nil { + return listener, fmt.Errorf("listen unix socket for web: %w", err) + } + } else { + listenConfig := net.ListenConfig{} //exhaustruct:ignore + listener, err = listenConfig.Listen(ctx, net_, addr) + if err != nil { + return listener, fmt.Errorf("listen %s for web: %w", net_, err) + } + } + return listener, nil +} diff --git a/forged/internal/common/misc/slices.go b/forged/internal/common/misc/slices.go new file mode 100644 index 0000000..3ad0211 --- /dev/null +++ b/forged/internal/common/misc/slices.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package misc + +import "strings" + +// sliceContainsNewlines returns true if and only if the given slice contains +// one or more strings that contains newlines. +func SliceContainsNewlines(s []string) bool { + for _, v := range s { + if strings.Contains(v, "\n") { + return true + } + } + return false +} diff --git a/forged/internal/common/misc/trivial.go b/forged/internal/common/misc/trivial.go new file mode 100644 index 0000000..83901e0 --- /dev/null +++ b/forged/internal/common/misc/trivial.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package misc + +import ( + "net/url" + "strings" +) + +// These are all trivial functions that are intended to be used in HTML +// templates. + +// FirstLine returns the first line of a string. +func FirstLine(s string) string { + before, _, _ := strings.Cut(s, "\n") + return before +} + +// PathEscape escapes the input as an URL path segment. +func PathEscape(s string) string { + return url.PathEscape(s) +} + +// QueryEscape escapes the input as an URL query segment. +func QueryEscape(s string) string { + return url.QueryEscape(s) +} + +// Dereference dereferences a pointer. +func Dereference[T any](p *T) T { //nolint:ireturn + return *p +} + +// DereferenceOrZero dereferences a pointer. If the pointer is nil, the zero +// value of its associated type is returned instead. +func DereferenceOrZero[T any](p *T) T { //nolint:ireturn + if p != nil { + return *p + } + var z T + return z +} + +// Minus subtracts two numbers. +func Minus(a, b int) int { + return a - b +} diff --git a/forged/internal/common/misc/unsafe.go b/forged/internal/common/misc/unsafe.go new file mode 100644 index 0000000..d827e7f --- /dev/null +++ b/forged/internal/common/misc/unsafe.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package misc + +import "unsafe" + +// StringToBytes converts a string to a byte slice without copying the string. +// Memory is borrowed from the string. +// The resulting byte slice must not be modified in any form. +func StringToBytes(s string) (bytes []byte) { + return unsafe.Slice(unsafe.StringData(s), len(s)) //#nosec G103 +} + +// BytesToString converts a byte slice to a string without copying the bytes. +// Memory is borrowed from the byte slice. +// The source byte slice must not be modified. +func BytesToString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) //#nosec G103 +} diff --git a/forged/internal/common/misc/url.go b/forged/internal/common/misc/url.go new file mode 100644 index 0000000..346ff76 --- /dev/null +++ b/forged/internal/common/misc/url.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package misc + +import ( + "net/http" + "net/url" + "strings" +) + +// ParseReqURI parses an HTTP request URL, and returns a slice of path segments +// and the query parameters. It handles %2F correctly. +func ParseReqURI(requestURI string) (segments []string, params url.Values, err error) { + path, paramsStr, _ := strings.Cut(requestURI, "?") + + segments, err = PathToSegments(path) + if err != nil { + return + } + + params, err = url.ParseQuery(paramsStr) + return +} + +// PathToSegments splits a path into unescaped segments. It handles %2F correctly. +func PathToSegments(path string) (segments []string, err error) { + segments = strings.Split(strings.TrimPrefix(path, "/"), "/") + + for i, segment := range segments { + segments[i], err = url.PathUnescape(segment) + if err != nil { + return + } + } + + return +} + +// RedirectDir returns true and redirects the user to a version of the URL with +// a trailing slash, if and only if the request URL does not already have a +// trailing slash. +func RedirectDir(writer http.ResponseWriter, request *http.Request) bool { + requestURI := request.RequestURI + + pathEnd := strings.IndexAny(requestURI, "?#") + var path, rest string + if pathEnd == -1 { + path = requestURI + } else { + path = requestURI[:pathEnd] + rest = requestURI[pathEnd:] + } + + if !strings.HasSuffix(path, "/") { + http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther) + return true + } + return false +} + +// RedirectNoDir returns true and redirects the user to a version of the URL +// without a trailing slash, if and only if the request URL has a trailing +// slash. +func RedirectNoDir(writer http.ResponseWriter, request *http.Request) bool { + requestURI := request.RequestURI + + pathEnd := strings.IndexAny(requestURI, "?#") + var path, rest string + if pathEnd == -1 { + path = requestURI + } else { + path = requestURI[:pathEnd] + rest = requestURI[pathEnd:] + } + + if strings.HasSuffix(path, "/") { + http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther) + return true + } + return false +} + +// RedirectUnconditionally unconditionally redirects the user back to the +// current page while preserving query parameters. +func RedirectUnconditionally(writer http.ResponseWriter, request *http.Request) { + requestURI := request.RequestURI + + pathEnd := strings.IndexAny(requestURI, "?#") + var path, rest string + if pathEnd == -1 { + path = requestURI + } else { + path = requestURI[:pathEnd] + rest = requestURI[pathEnd:] + } + + http.Redirect(writer, request, path+rest, http.StatusSeeOther) +} + +// SegmentsToURL joins URL segments to the path component of a URL. +// Each segment is escaped properly first. +func SegmentsToURL(segments []string) string { + for i, segment := range segments { + segments[i] = url.PathEscape(segment) + } + return strings.Join(segments, "/") +} + +// AnyContain returns true if and only if ss contains a string that contains c. +func AnyContain(ss []string, c string) bool { + for _, s := range ss { + if strings.Contains(s, c) { + return true + } + } + return false +} diff --git a/forged/internal/common/scfg/.golangci.yaml b/forged/internal/common/scfg/.golangci.yaml new file mode 100644 index 0000000..59f1970 --- /dev/null +++ b/forged/internal/common/scfg/.golangci.yaml @@ -0,0 +1,26 @@ +linters: + enable-all: true + disable: + - perfsprint + - wsl + - varnamelen + - nlreturn + - exhaustruct + - wrapcheck + - lll + - exhaustive + - intrange + - godox + - nestif + - err113 + - staticcheck + - errorlint + - cyclop + - nonamedreturns + - funlen + - gochecknoglobals + - tenv + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/forged/internal/common/scfg/LICENSE b/forged/internal/common/scfg/LICENSE new file mode 100644 index 0000000..3649823 --- /dev/null +++ b/forged/internal/common/scfg/LICENSE @@ -0,0 +1,18 @@ +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/forged/internal/common/scfg/reader.go b/forged/internal/common/scfg/reader.go new file mode 100644 index 0000000..b0e2cc0 --- /dev/null +++ b/forged/internal/common/scfg/reader.go @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr> + +package scfg + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// This limits the max block nesting depth to prevent stack overflows. +const maxNestingDepth = 1000 + +// Load loads a configuration file. +func Load(path string) (block Block, err error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer func() { + if cerr := f.Close(); err == nil && cerr != nil { + err = cerr + } + }() + + return Read(f) +} + +// Read parses a configuration file from an io.Reader. +func Read(r io.Reader) (Block, error) { + scanner := bufio.NewScanner(r) + + dec := decoder{scanner: scanner} + block, closingBrace, err := dec.readBlock() + if err != nil { + return nil, err + } else if closingBrace { + return nil, fmt.Errorf("line %v: unexpected '}'", dec.lineno) + } + + return block, scanner.Err() +} + +type decoder struct { + scanner *bufio.Scanner + lineno int + blockDepth int +} + +// readBlock reads a block. closingBrace is true if parsing stopped on '}' +// (otherwise, it stopped on Scanner.Scan). +func (dec *decoder) readBlock() (block Block, closingBrace bool, err error) { + dec.blockDepth++ + defer func() { + dec.blockDepth-- + }() + + if dec.blockDepth >= maxNestingDepth { + return nil, false, fmt.Errorf("exceeded max block depth") + } + + for dec.scanner.Scan() { + dec.lineno++ + + l := dec.scanner.Text() + words, err := splitWords(l) + if err != nil { + return nil, false, fmt.Errorf("line %v: %v", dec.lineno, err) + } else if len(words) == 0 { + continue + } + + if len(words) == 1 && l[len(l)-1] == '}' { + closingBrace = true + break + } + + var d *Directive + if words[len(words)-1] == "{" && l[len(l)-1] == '{' { + words = words[:len(words)-1] + + var name string + params := words + if len(words) > 0 { + name, params = words[0], words[1:] + } + + startLineno := dec.lineno + childBlock, childClosingBrace, err := dec.readBlock() + if err != nil { + return nil, false, err + } else if !childClosingBrace { + return nil, false, fmt.Errorf("line %v: unterminated block", startLineno) + } + + // Allows callers to tell apart "no block" and "empty block" + if childBlock == nil { + childBlock = Block{} + } + + d = &Directive{Name: name, Params: params, Children: childBlock, lineno: dec.lineno} + } else { + d = &Directive{Name: words[0], Params: words[1:], lineno: dec.lineno} + } + block = append(block, d) + } + + return block, closingBrace, nil +} + +func splitWords(l string) ([]string, error) { + var ( + words []string + sb strings.Builder + escape bool + quote rune + wantWSP bool + ) + for _, ch := range l { + switch { + case escape: + sb.WriteRune(ch) + escape = false + case wantWSP && (ch != ' ' && ch != '\t'): + return words, fmt.Errorf("atom not allowed after quoted string") + case ch == '\\': + escape = true + case quote != 0 && ch == quote: + quote = 0 + wantWSP = true + if sb.Len() == 0 { + words = append(words, "") + } + case quote == 0 && len(words) == 0 && sb.Len() == 0 && ch == '#': + return nil, nil + case quote == 0 && (ch == '\'' || ch == '"'): + if sb.Len() > 0 { + return words, fmt.Errorf("quoted string not allowed after atom") + } + quote = ch + case quote == 0 && (ch == ' ' || ch == '\t'): + if sb.Len() > 0 { + words = append(words, sb.String()) + } + sb.Reset() + wantWSP = false + default: + sb.WriteRune(ch) + } + } + if quote != 0 { + return words, fmt.Errorf("unterminated quoted string") + } + if sb.Len() > 0 { + words = append(words, sb.String()) + } + return words, nil +} diff --git a/forged/internal/common/scfg/scfg.go b/forged/internal/common/scfg/scfg.go new file mode 100644 index 0000000..4533e63 --- /dev/null +++ b/forged/internal/common/scfg/scfg.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr> + +// Package scfg parses and formats configuration files. +// Note that this fork of scfg behaves differently from upstream scfg. +package scfg + +import ( + "fmt" +) + +// Block is a list of directives. +type Block []*Directive + +// GetAll returns a list of directives with the provided name. +func (blk Block) GetAll(name string) []*Directive { + l := make([]*Directive, 0, len(blk)) + for _, child := range blk { + if child.Name == name { + l = append(l, child) + } + } + return l +} + +// Get returns the first directive with the provided name. +func (blk Block) Get(name string) *Directive { + for _, child := range blk { + if child.Name == name { + return child + } + } + return nil +} + +// Directive is a configuration directive. +type Directive struct { + Name string + Params []string + + Children Block + + lineno int +} + +// ParseParams extracts parameters from the directive. It errors out if the +// user hasn't provided enough parameters. +func (d *Directive) ParseParams(params ...*string) error { + if len(d.Params) < len(params) { + return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params)) + } + for i, ptr := range params { + if ptr == nil { + continue + } + *ptr = d.Params[i] + } + return nil +} diff --git a/forged/internal/common/scfg/struct.go b/forged/internal/common/scfg/struct.go new file mode 100644 index 0000000..98ec943 --- /dev/null +++ b/forged/internal/common/scfg/struct.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr> + +package scfg + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// structInfo contains scfg metadata for structs. +type structInfo struct { + param int // index of field storing parameters + children map[string]int // indices of fields storing child directives +} + +var ( + structCacheMutex sync.Mutex + structCache = make(map[reflect.Type]*structInfo) +) + +func getStructInfo(t reflect.Type) (*structInfo, error) { + structCacheMutex.Lock() + defer structCacheMutex.Unlock() + + if info := structCache[t]; info != nil { + return info, nil + } + + info := &structInfo{ + param: -1, + children: make(map[string]int), + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Anonymous { + return nil, fmt.Errorf("scfg: anonymous struct fields are not supported") + } else if !f.IsExported() { + continue + } + + tag := f.Tag.Get("scfg") + parts := strings.Split(tag, ",") + k, options := parts[0], parts[1:] + if k == "-" { + continue + } else if k == "" { + k = f.Name + } + + isParam := false + for _, opt := range options { + switch opt { + case "param": + isParam = true + default: + return nil, fmt.Errorf("scfg: invalid option %q in struct tag", opt) + } + } + + if isParam { + if info.param >= 0 { + return nil, fmt.Errorf("scfg: param option specified multiple times in struct tag in %v", t) + } + if parts[0] != "" { + return nil, fmt.Errorf("scfg: name must be empty when param option is specified in struct tag in %v", t) + } + info.param = i + } else { + if _, ok := info.children[k]; ok { + return nil, fmt.Errorf("scfg: key %q specified multiple times in struct tag in %v", k, t) + } + info.children[k] = i + } + } + + structCache[t] = info + return info, nil +} diff --git a/forged/internal/common/scfg/unmarshal.go b/forged/internal/common/scfg/unmarshal.go new file mode 100644 index 0000000..8befc10 --- /dev/null +++ b/forged/internal/common/scfg/unmarshal.go @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr> +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package scfg + +import ( + "encoding" + "fmt" + "io" + "reflect" + "strconv" +) + +// Decoder reads and decodes an scfg document from an input stream. +type Decoder struct { + r io.Reader + unknownDirectives []*Directive +} + +// NewDecoder returns a new decoder which reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// UnknownDirectives returns a slice of all unknown directives encountered +// during Decode. +func (dec *Decoder) UnknownDirectives() []*Directive { + return dec.unknownDirectives +} + +// Decode reads scfg document from the input and stores it in the value pointed +// to by v. +// +// If v is nil or not a pointer, Decode returns an error. +// +// Blocks can be unmarshaled to: +// +// - Maps. Each directive is unmarshaled into a map entry. The map key must +// be a string. +// - Structs. Each directive is unmarshaled into a struct field. +// +// Duplicate directives are not allowed, unless the struct field or map value +// is a slice of values representing a directive: structs or maps. +// +// Directives can be unmarshaled to: +// +// - Maps. The children block is unmarshaled into the map. Parameters are not +// allowed. +// - Structs. The children block is unmarshaled into the struct. Parameters +// are allowed if one of the struct fields contains the "param" option in +// its tag. +// - Slices. Parameters are unmarshaled into the slice. Children blocks are +// not allowed. +// - Arrays. Parameters are unmarshaled into the array. The number of +// parameters must match exactly the length of the array. Children blocks +// are not allowed. +// - Strings, booleans, integers, floating-point values, values implementing +// encoding.TextUnmarshaler. Only a single parameter is allowed and is +// unmarshaled into the value. Children blocks are not allowed. +// +// The decoding of each struct field can be customized by the format string +// stored under the "scfg" key in the struct field's tag. The tag contains the +// name of the field possibly followed by a comma-separated list of options. +// The name may be empty in order to specify options without overriding the +// default field name. As a special case, if the field name is "-", the field +// is ignored. The "param" option specifies that directive parameters are +// stored in this field (the name must be empty). +func (dec *Decoder) Decode(v interface{}) error { + block, err := Read(dec.r) + if err != nil { + return err + } + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return fmt.Errorf("scfg: invalid value for unmarshaling") + } + + return dec.unmarshalBlock(block, rv) +} + +func (dec *Decoder) unmarshalBlock(block Block, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + dirsByName := make(map[string][]*Directive, len(block)) + for _, dir := range block { + dirsByName[dir.Name] = append(dirsByName[dir.Name], dir) + } + + switch v.Kind() { + case reflect.Map: + if t.Key().Kind() != reflect.String { + return fmt.Errorf("scfg: map key type must be string") + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } else if v.Len() > 0 { + clearMap(v) + } + + for name, dirs := range dirsByName { + mv := reflect.New(t.Elem()).Elem() + if err := dec.unmarshalDirectiveList(dirs, mv); err != nil { + return err + } + v.SetMapIndex(reflect.ValueOf(name), mv) + } + + case reflect.Struct: + si, err := getStructInfo(t) + if err != nil { + return err + } + + seen := make(map[int]bool) + + for name, dirs := range dirsByName { + fieldIndex, ok := si.children[name] + if !ok { + dec.unknownDirectives = append(dec.unknownDirectives, dirs...) + continue + } + fv := v.Field(fieldIndex) + if err := dec.unmarshalDirectiveList(dirs, fv); err != nil { + return err + } + seen[fieldIndex] = true + } + + for name, fieldIndex := range si.children { + if fieldIndex == si.param { + continue + } + if _, ok := seen[fieldIndex]; !ok { + return fmt.Errorf("scfg: missing required directive %q", name) + } + } + + default: + return fmt.Errorf("scfg: unsupported type for unmarshaling blocks: %v", t) + } + + return nil +} + +func (dec *Decoder) unmarshalDirectiveList(dirs []*Directive, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + if v.Kind() != reflect.Slice || !isDirectiveType(t.Elem()) { + if len(dirs) > 1 { + return newUnmarshalDirectiveError(dirs[1], "directive must not be specified more than once") + } + return dec.unmarshalDirective(dirs[0], v) + } + + sv := reflect.MakeSlice(t, len(dirs), len(dirs)) + for i, dir := range dirs { + if err := dec.unmarshalDirective(dir, sv.Index(i)); err != nil { + return err + } + } + v.Set(sv) + return nil +} + +// isDirectiveType checks whether a type can only be unmarshaled as a +// directive, not as a parameter. Accepting too many types here would result in +// ambiguities, see: +// https://lists.sr.ht/~emersion/public-inbox/%3C20230629132458.152205-1-contact%40emersion.fr%3E#%3Ch4Y2peS_YBqY3ar4XlmPDPiNBFpYGns3EBYUx3_6zWEhV2o8_-fBQveRujGADWYhVVCucHBEryFGoPtpC3d3mQ-x10pWnFogfprbQTSvtxc=@emersion.fr%3E +func isDirectiveType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + textUnmarshalerType := reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + if reflect.PointerTo(t).Implements(textUnmarshalerType) { + return false + } + + switch t.Kind() { + case reflect.Struct, reflect.Map: + return true + default: + return false + } +} + +func (dec *Decoder) unmarshalDirective(dir *Directive, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + if v.CanAddr() { + if _, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok { + if len(dir.Children) != 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero children") + } + return unmarshalParamList(dir, v) + } + } + + switch v.Kind() { + case reflect.Map: + if len(dir.Params) > 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero parameters") + } + if err := dec.unmarshalBlock(dir.Children, v); err != nil { + return err + } + case reflect.Struct: + si, err := getStructInfo(t) + if err != nil { + return err + } + + if si.param >= 0 { + if err := unmarshalParamList(dir, v.Field(si.param)); err != nil { + return err + } + } else { + if len(dir.Params) > 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero parameters") + } + } + + if err := dec.unmarshalBlock(dir.Children, v); err != nil { + return err + } + default: + if len(dir.Children) != 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero children") + } + if err := unmarshalParamList(dir, v); err != nil { + return err + } + } + return nil +} + +func unmarshalParamList(dir *Directive, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + t := v.Type() + sv := reflect.MakeSlice(t, len(dir.Params), len(dir.Params)) + for i, param := range dir.Params { + if err := unmarshalParam(param, sv.Index(i)); err != nil { + return newUnmarshalParamError(dir, i, err) + } + } + v.Set(sv) + case reflect.Array: + if len(dir.Params) != v.Len() { + return newUnmarshalDirectiveError(dir, fmt.Sprintf("directive requires exactly %v parameters", v.Len())) + } + for i, param := range dir.Params { + if err := unmarshalParam(param, v.Index(i)); err != nil { + return newUnmarshalParamError(dir, i, err) + } + } + default: + if len(dir.Params) != 1 { + return newUnmarshalDirectiveError(dir, "directive requires exactly one parameter") + } + if err := unmarshalParam(dir.Params[0], v); err != nil { + return newUnmarshalParamError(dir, 0, err) + } + } + + return nil +} + +func unmarshalParam(param string, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + // TODO: improve our logic following: + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/encoding/json/decode.go;drc=b9b8cecbfc72168ca03ad586cc2ed52b0e8db409;l=421 + if v.CanAddr() { + if v, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok { + return v.UnmarshalText([]byte(param)) + } + } + + switch v.Kind() { + case reflect.String: + v.Set(reflect.ValueOf(param)) + case reflect.Bool: + switch param { + case "true": + v.Set(reflect.ValueOf(true)) + case "false": + v.Set(reflect.ValueOf(false)) + default: + return fmt.Errorf("invalid bool parameter %q", param) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := strconv.ParseInt(param, 10, t.Bits()) + if err != nil { + return fmt.Errorf("invalid %v parameter: %v", t, err) + } + v.Set(reflect.ValueOf(i).Convert(t)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + u, err := strconv.ParseUint(param, 10, t.Bits()) + if err != nil { + return fmt.Errorf("invalid %v parameter: %v", t, err) + } + v.Set(reflect.ValueOf(u).Convert(t)) + case reflect.Float32, reflect.Float64: + f, err := strconv.ParseFloat(param, t.Bits()) + if err != nil { + return fmt.Errorf("invalid %v parameter: %v", t, err) + } + v.Set(reflect.ValueOf(f).Convert(t)) + default: + return fmt.Errorf("unsupported type for unmarshaling parameter: %v", t) + } + + return nil +} + +func unwrapPointers(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} + +func clearMap(v reflect.Value) { + for _, k := range v.MapKeys() { + v.SetMapIndex(k, reflect.Value{}) + } +} + +type unmarshalDirectiveError struct { + lineno int + name string + msg string +} + +func newUnmarshalDirectiveError(dir *Directive, msg string) *unmarshalDirectiveError { + return &unmarshalDirectiveError{ + name: dir.Name, + lineno: dir.lineno, + msg: msg, + } +} + +func (err *unmarshalDirectiveError) Error() string { + return fmt.Sprintf("line %v, directive %q: %v", err.lineno, err.name, err.msg) +} + +type unmarshalParamError struct { + lineno int + directive string + paramIndex int + err error +} + +func newUnmarshalParamError(dir *Directive, paramIndex int, err error) *unmarshalParamError { + return &unmarshalParamError{ + directive: dir.Name, + lineno: dir.lineno, + paramIndex: paramIndex, + err: err, + } +} + +func (err *unmarshalParamError) Error() string { + return fmt.Sprintf("line %v, directive %q, parameter %v: %v", err.lineno, err.directive, err.paramIndex+1, err.err) +} diff --git a/forged/internal/common/scfg/writer.go b/forged/internal/common/scfg/writer.go new file mode 100644 index 0000000..02a07fe --- /dev/null +++ b/forged/internal/common/scfg/writer.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr> + +package scfg + +import ( + "errors" + "io" + "strings" +) + +var errDirEmptyName = errors.New("scfg: directive with empty name") + +// Write writes a parsed configuration to the provided io.Writer. +func Write(w io.Writer, blk Block) error { + enc := newEncoder(w) + err := enc.encodeBlock(blk) + return err +} + +// encoder write SCFG directives to an output stream. +type encoder struct { + w io.Writer + lvl int + err error +} + +// newEncoder returns a new encoder that writes to w. +func newEncoder(w io.Writer) *encoder { + return &encoder{w: w} +} + +func (enc *encoder) push() { + enc.lvl++ +} + +func (enc *encoder) pop() { + enc.lvl-- +} + +func (enc *encoder) writeIndent() { + for i := 0; i < enc.lvl; i++ { + enc.write([]byte("\t")) + } +} + +func (enc *encoder) write(p []byte) { + if enc.err != nil { + return + } + _, enc.err = enc.w.Write(p) +} + +func (enc *encoder) encodeBlock(blk Block) error { + for _, dir := range blk { + if err := enc.encodeDir(*dir); err != nil { + return err + } + } + return enc.err +} + +func (enc *encoder) encodeDir(dir Directive) error { + if enc.err != nil { + return enc.err + } + + if dir.Name == "" { + enc.err = errDirEmptyName + return enc.err + } + + enc.writeIndent() + enc.write([]byte(maybeQuote(dir.Name))) + for _, p := range dir.Params { + enc.write([]byte(" ")) + enc.write([]byte(maybeQuote(p))) + } + + if len(dir.Children) > 0 { + enc.write([]byte(" {\n")) + enc.push() + if err := enc.encodeBlock(dir.Children); err != nil { + return err + } + enc.pop() + + enc.writeIndent() + enc.write([]byte("}")) + } + enc.write([]byte("\n")) + + return enc.err +} + +const specialChars = "\"\\\r\n'{} \t" + +func maybeQuote(s string) string { + if s == "" || strings.ContainsAny(s, specialChars) { + var sb strings.Builder + sb.WriteByte('"') + for _, ch := range s { + if strings.ContainsRune(`"\`, ch) { + sb.WriteByte('\\') + } + sb.WriteRune(ch) + } + sb.WriteByte('"') + return sb.String() + } + return s +} diff --git a/forged/internal/config/config.go b/forged/internal/config/config.go new file mode 100644 index 0000000..1825882 --- /dev/null +++ b/forged/internal/config/config.go @@ -0,0 +1,111 @@ +package config + +import ( + "bufio" + "fmt" + "log/slog" + "os" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/scfg" +) + +type Config struct { + DB DB `scfg:"db"` + Web Web `scfg:"web"` + Hooks Hooks `scfg:"hooks"` + LMTP LMTP `scfg:"lmtp"` + SSH SSH `scfg:"ssh"` + IRC IRC `scfg:"irc"` + Git Git `scfg:"git"` + General General `scfg:"general"` + Pprof Pprof `scfg:"pprof"` +} + +type DB struct { + Conn string `scfg:"conn"` +} + +type Web struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + Root string `scfg:"root"` + CookieExpiry int `scfg:"cookie_expiry"` + ReadTimeout uint32 `scfg:"read_timeout"` + WriteTimeout uint32 `scfg:"write_timeout"` + IdleTimeout uint32 `scfg:"idle_timeout"` + MaxHeaderBytes int `scfg:"max_header_bytes"` + ReverseProxy bool `scfg:"reverse_proxy"` + ShutdownTimeout uint32 `scfg:"shutdown_timeout"` + TemplatesPath string `scfg:"templates_path"` + StaticPath string `scfg:"static_path"` +} + +type Hooks struct { + Socket string `scfg:"socket"` + Execs string `scfg:"execs"` +} + +type LMTP struct { + Socket string `scfg:"socket"` + Domain string `scfg:"domain"` + MaxSize int64 `scfg:"max_size"` + WriteTimeout uint32 `scfg:"write_timeout"` + ReadTimeout uint32 `scfg:"read_timeout"` +} + +type SSH struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + Key string `scfg:"key"` + Root string `scfg:"root"` + ShutdownTimeout uint32 `scfg:"shutdown_timeout"` +} + +type IRC struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + TLS bool `scfg:"tls"` + SendQ uint `scfg:"sendq"` + Nick string `scfg:"nick"` + User string `scfg:"user"` + Gecos string `scfg:"gecos"` +} + +type Git struct { + RepoDir string `scfg:"repo_dir"` + Socket string `scfg:"socket"` +} + +type General struct { + Title string `scfg:"title"` +} + +type Pprof struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` +} + +func Open(path string) (config Config, err error) { + var configFile *os.File + + configFile, err = os.Open(path) //#nosec G304 + if err != nil { + err = fmt.Errorf("open config file: %w", err) + return config, err + } + defer func() { + _ = configFile.Close() + }() + + decoder := scfg.NewDecoder(bufio.NewReader(configFile)) + err = decoder.Decode(&config) + if err != nil { + err = fmt.Errorf("decode config file: %w", err) + return config, err + } + for _, u := range decoder.UnknownDirectives() { + slog.Warn("unknown configuration directive", "directive", u) + } + + return config, err +} diff --git a/forged/internal/database/database.go b/forged/internal/database/database.go new file mode 100644 index 0000000..d96af6b --- /dev/null +++ b/forged/internal/database/database.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// Package database provides stubs and wrappers for databases. +package database + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5/pgxpool" +) + +type Database struct { + *pgxpool.Pool +} + +func Open(ctx context.Context, conn string) (Database, error) { + db, err := pgxpool.New(ctx, conn) + if err != nil { + err = fmt.Errorf("create pgxpool: %w", err) + } + return Database{db}, err +} diff --git a/forged/internal/database/queries/.gitignore b/forged/internal/database/queries/.gitignore new file mode 100644 index 0000000..1307f6d --- /dev/null +++ b/forged/internal/database/queries/.gitignore @@ -0,0 +1 @@ +/*.go diff --git a/forged/internal/global/global.go b/forged/internal/global/global.go new file mode 100644 index 0000000..99f85e7 --- /dev/null +++ b/forged/internal/global/global.go @@ -0,0 +1,18 @@ +package global + +import ( + "go.lindenii.runxiyu.org/forge/forged/internal/config" + "go.lindenii.runxiyu.org/forge/forged/internal/database" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" +) + +type Global struct { + ForgeTitle string // should be removed since it's in Config + ForgeVersion string + SSHPubkey string + SSHFingerprint string + + Config *config.Config + Queries *queries.Queries + DB *database.Database +} diff --git a/forged/internal/incoming/hooks/hooks.go b/forged/internal/incoming/hooks/hooks.go new file mode 100644 index 0000000..effd104 --- /dev/null +++ b/forged/internal/incoming/hooks/hooks.go @@ -0,0 +1,81 @@ +package hooks + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/gliderlabs/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/common/cmap" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type Server struct { + hookMap cmap.Map[string, hookInfo] + socketPath string + executablesPath string + global *global.Global +} +type hookInfo struct { + session ssh.Session + pubkey string + directAccess bool + repoPath string + userID int + userType string + repoID int + groupPath []string + repoName string + contribReq string +} + +func New(global *global.Global) (server *Server) { + cfg := global.Config.Hooks + return &Server{ + socketPath: cfg.Socket, + executablesPath: cfg.Execs, + hookMap: cmap.Map[string, hookInfo]{}, + global: global, + } +} + +func (server *Server) Run(ctx context.Context) error { + listener, _, err := misc.ListenUnixSocket(ctx, server.socketPath) + if err != nil { + return fmt.Errorf("listen unix socket for hooks: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + _ = listener.Close() + }) + defer stop() + + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("accept conn: %w", err) + } + + go server.handleConn(ctx, conn) + } +} + +func (server *Server) handleConn(ctx context.Context, conn net.Conn) { + defer func() { + _ = conn.Close() + }() + unblock := context.AfterFunc(ctx, func() { + _ = conn.SetDeadline(time.Now()) + _ = conn.Close() + }) + defer unblock() +} diff --git a/forged/internal/incoming/lmtp/lmtp.go b/forged/internal/incoming/lmtp/lmtp.go new file mode 100644 index 0000000..c8918f8 --- /dev/null +++ b/forged/internal/incoming/lmtp/lmtp.go @@ -0,0 +1,71 @@ +package lmtp + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type Server struct { + socket string + domain string + maxSize int64 + writeTimeout uint32 + readTimeout uint32 + global *global.Global +} + +func New(global *global.Global) (server *Server) { + cfg := global.Config.LMTP + return &Server{ + socket: cfg.Socket, + domain: cfg.Domain, + maxSize: cfg.MaxSize, + writeTimeout: cfg.WriteTimeout, + readTimeout: cfg.ReadTimeout, + global: global, + } +} + +func (server *Server) Run(ctx context.Context) error { + listener, _, err := misc.ListenUnixSocket(ctx, server.socket) + if err != nil { + return fmt.Errorf("listen unix socket for LMTP: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + _ = listener.Close() + }) + defer stop() + + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("accept conn: %w", err) + } + + go server.handleConn(ctx, conn) + } +} + +func (server *Server) handleConn(ctx context.Context, conn net.Conn) { + defer func() { + _ = conn.Close() + }() + unblock := context.AfterFunc(ctx, func() { + _ = conn.SetDeadline(time.Now()) + _ = conn.Close() + }) + defer unblock() +} diff --git a/forged/internal/incoming/ssh/ssh.go b/forged/internal/incoming/ssh/ssh.go new file mode 100644 index 0000000..1f27be2 --- /dev/null +++ b/forged/internal/incoming/ssh/ssh.go @@ -0,0 +1,90 @@ +package ssh + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + gliderssh "github.com/gliderlabs/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + gossh "golang.org/x/crypto/ssh" +) + +type Server struct { + gliderServer *gliderssh.Server + privkey gossh.Signer + net string + addr string + root string + shutdownTimeout uint32 + global *global.Global +} + +func New(global *global.Global) (server *Server, err error) { + cfg := global.Config.SSH + server = &Server{ + net: cfg.Net, + addr: cfg.Addr, + root: cfg.Root, + shutdownTimeout: cfg.ShutdownTimeout, + global: global, + } //exhaustruct:ignore + + var privkeyBytes []byte + + privkeyBytes, err = os.ReadFile(cfg.Key) + if err != nil { + return server, fmt.Errorf("read SSH private key: %w", err) + } + + server.privkey, err = gossh.ParsePrivateKey(privkeyBytes) + if err != nil { + return server, fmt.Errorf("parse SSH private key: %w", err) + } + + server.global.SSHPubkey = misc.BytesToString(gossh.MarshalAuthorizedKey(server.privkey.PublicKey())) + server.global.SSHFingerprint = gossh.FingerprintSHA256(server.privkey.PublicKey()) + + server.gliderServer = &gliderssh.Server{ + Handler: handle, + PublicKeyHandler: func(ctx gliderssh.Context, key gliderssh.PublicKey) bool { return true }, + KeyboardInteractiveHandler: func(ctx gliderssh.Context, challenge gossh.KeyboardInteractiveChallenge) bool { return true }, + } //exhaustruct:ignore + server.gliderServer.AddHostKey(server.privkey) + + return server, nil +} + +func (server *Server) Run(ctx context.Context) (err error) { + listener, err := misc.Listen(ctx, server.net, server.addr) + if err != nil { + return fmt.Errorf("listen for SSH: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + shCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Duration(server.shutdownTimeout)*time.Second) + defer cancel() + _ = server.gliderServer.Shutdown(shCtx) + _ = listener.Close() + }) + defer stop() + + err = server.gliderServer.Serve(listener) + if err != nil { + if errors.Is(err, gliderssh.ErrServerClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("serve SSH: %w", err) + } + panic("unreachable") +} + +func handle(session gliderssh.Session) { + panic("SSH server handler not implemented yet") +} diff --git a/forged/internal/incoming/web/authn.go b/forged/internal/incoming/web/authn.go new file mode 100644 index 0000000..9754eb1 --- /dev/null +++ b/forged/internal/incoming/web/authn.go @@ -0,0 +1,33 @@ +package web + +import ( + "crypto/sha256" + "errors" + "fmt" + "net/http" + + "github.com/jackc/pgx/v5" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +func userResolver(r *http.Request) (string, string, error) { + cookie, err := r.Cookie("session") + if err != nil { + if errors.Is(err, http.ErrNoCookie) { + return "", "", nil + } + return "", "", err + } + + tokenHash := sha256.Sum256([]byte(cookie.Value)) + + session, err := types.Base(r).Global.Queries.GetUserFromSession(r.Context(), tokenHash[:]) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return "", "", nil + } + return "", "", err + } + + return fmt.Sprint(session.UserID), session.Username, nil +} diff --git a/forged/internal/incoming/web/handler.go b/forged/internal/incoming/web/handler.go new file mode 100644 index 0000000..394469a --- /dev/null +++ b/forged/internal/incoming/web/handler.go @@ -0,0 +1,69 @@ +package web + +import ( + "html/template" + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + handlers "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/handlers" + repoHandlers "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/handlers/repo" + specialHandlers "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/handlers/special" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" +) + +type handler struct { + r *Router +} + +func NewHandler(global *global.Global) *handler { + cfg := global.Config.Web + h := &handler{r: NewRouter().ReverseProxy(cfg.ReverseProxy).Global(global).UserResolver(userResolver)} + + staticFS := http.FileServer(http.Dir(cfg.StaticPath)) + h.r.ANYHTTP("-/static/*rest", + http.StripPrefix("/-/static/", staticFS), + WithDirIfEmpty("rest"), + ) + + funcs := template.FuncMap{ + "path_escape": misc.PathEscape, + "query_escape": misc.QueryEscape, + "minus": misc.Minus, + "first_line": misc.FirstLine, + "dereference_error": misc.DereferenceOrZero[error], + } + t := templates.MustParseDir(cfg.TemplatesPath, funcs) + renderer := templates.New(t) + + indexHTTP := handlers.NewIndexHTTP(renderer) + loginHTTP := specialHandlers.NewLoginHTTP(renderer, cfg.CookieExpiry) + groupHTTP := handlers.NewGroupHTTP(renderer) + repoHTTP := repoHandlers.NewHTTP(renderer) + notImpl := handlers.NewNotImplementedHTTP(renderer) + + h.r.GET("/", indexHTTP.Index) + + h.r.ANY("-/login", loginHTTP.Login) + h.r.ANY("-/users", notImpl.Handle) + + h.r.GET("@group/", groupHTTP.Index) + h.r.POST("@group/", groupHTTP.Post) + + h.r.GET("@group/-/repos/:repo/", repoHTTP.Index) + h.r.ANY("@group/-/repos/:repo/info", notImpl.Handle) + h.r.ANY("@group/-/repos/:repo/git-upload-pack", notImpl.Handle) + h.r.GET("@group/-/repos/:repo/branches/", repoHTTP.Branches) + h.r.GET("@group/-/repos/:repo/log/", repoHTTP.Log) + h.r.GET("@group/-/repos/:repo/commit/:commit", repoHTTP.Commit) + h.r.GET("@group/-/repos/:repo/tree/*rest", repoHTTP.Tree, WithDirIfEmpty("rest")) + h.r.GET("@group/-/repos/:repo/raw/*rest", repoHTTP.Raw, WithDirIfEmpty("rest")) + h.r.GET("@group/-/repos/:repo/contrib/", notImpl.Handle) + h.r.GET("@group/-/repos/:repo/contrib/:mr", notImpl.Handle) + + return h +} + +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.r.ServeHTTP(w, r) +} diff --git a/forged/internal/incoming/web/handlers/group.go b/forged/internal/incoming/web/handlers/group.go new file mode 100644 index 0000000..4823cb7 --- /dev/null +++ b/forged/internal/incoming/web/handlers/group.go @@ -0,0 +1,156 @@ +package handlers + +import ( + "fmt" + "log/slog" + "net/http" + "path/filepath" + "strconv" + + "github.com/jackc/pgx/v5" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +type GroupHTTP struct { + r templates.Renderer +} + +func NewGroupHTTP(r templates.Renderer) *GroupHTTP { + return &GroupHTTP{ + r: r, + } +} + +func (h *GroupHTTP) Index(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + base := wtypes.Base(r) + userID, err := strconv.ParseInt(base.UserID, 10, 64) + if err != nil { + userID = 0 + } + + queryParams := queries.GetGroupByPathParams{ + Column1: base.URLSegments, + UserID: userID, + } + p, err := base.Global.Queries.GetGroupByPath(r.Context(), queryParams) + if err != nil { + slog.Error("failed to get group ID by path", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + subgroups, err := base.Global.Queries.GetSubgroups(r.Context(), &p.ID) + if err != nil { + slog.Error("failed to get subgroups", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + // TODO: gracefully fail this part of the page + } + repos, err := base.Global.Queries.GetReposInGroup(r.Context(), p.ID) + if err != nil { + slog.Error("failed to get repos in group", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + // TODO: gracefully fail this part of the page + } + err = h.r.Render(w, "group", struct { + BaseData *wtypes.BaseData + Subgroups []queries.GetSubgroupsRow + Repos []queries.GetReposInGroupRow + Description string + DirectAccess bool + }{ + BaseData: base, + Subgroups: subgroups, + Repos: repos, + Description: p.Description, + DirectAccess: p.HasRole, + }) + if err != nil { + slog.Error("failed to render index page", "error", err) + } +} + +func (h *GroupHTTP) Post(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + base := wtypes.Base(r) + userID, err := strconv.ParseInt(base.UserID, 10, 64) + if err != nil { + userID = 0 + } + + queryParams := queries.GetGroupByPathParams{ + Column1: base.URLSegments, + UserID: userID, + } + p, err := base.Global.Queries.GetGroupByPath(r.Context(), queryParams) + if err != nil { + slog.Error("failed to get group ID by path", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + + if !p.HasRole { + http.Error(w, "You do not have the necessary permissions to create repositories in this group.", http.StatusForbidden) + return + } + + name := r.PostFormValue("repo_name") + desc := r.PostFormValue("repo_desc") + contrib := r.PostFormValue("repo_contrib") + if name == "" { + http.Error(w, "Repo name is required", http.StatusBadRequest) + return + } + + if contrib == "" || contrib == "public" { + contrib = "open" + } + + tx, err := base.Global.DB.BeginTx(r.Context(), pgx.TxOptions{}) + if err != nil { + slog.Error("begin tx failed", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + defer func() { _ = tx.Rollback(r.Context()) }() + + txq := base.Global.Queries.WithTx(tx) + var descPtr *string + if desc != "" { + descPtr = &desc + } + repoID, err := txq.InsertRepo(r.Context(), queries.InsertRepoParams{ + GroupID: p.ID, + Name: name, + Description: descPtr, + ContribRequirements: contrib, + }) + if err != nil { + slog.Error("insert repo failed", "error", err) + http.Error(w, "Failed to create repository", http.StatusInternalServerError) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoID)) + + gitc, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err != nil { + slog.Error("git2d connect failed", "error", err) + http.Error(w, "Failed to initialize repository (backend)", http.StatusInternalServerError) + return + } + defer func() { _ = gitc.Close() }() + if err = gitc.InitRepo(repoPath, base.Global.Config.Hooks.Execs); err != nil { + slog.Error("git2d init failed", "error", err) + http.Error(w, "Failed to initialize repository", http.StatusInternalServerError) + return + } + + if err = tx.Commit(r.Context()); err != nil { + slog.Error("commit tx failed", "error", err) + http.Error(w, "Failed to finalize repository creation", http.StatusInternalServerError) + return + } + + http.Redirect(w, r, r.URL.Path, http.StatusSeeOther) +} diff --git a/forged/internal/incoming/web/handlers/index.go b/forged/internal/incoming/web/handlers/index.go new file mode 100644 index 0000000..a758b07 --- /dev/null +++ b/forged/internal/incoming/web/handlers/index.go @@ -0,0 +1,39 @@ +package handlers + +import ( + "log" + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type IndexHTTP struct { + r templates.Renderer +} + +func NewIndexHTTP(r templates.Renderer) *IndexHTTP { + return &IndexHTTP{ + r: r, + } +} + +func (h *IndexHTTP) Index(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + groups, err := wtypes.Base(r).Global.Queries.GetRootGroups(r.Context()) + if err != nil { + http.Error(w, "failed to get root groups", http.StatusInternalServerError) + log.Println("failed to get root groups", "error", err) + return + } + err = h.r.Render(w, "index", struct { + BaseData *wtypes.BaseData + Groups []queries.GetRootGroupsRow + }{ + BaseData: wtypes.Base(r), + Groups: groups, + }) + if err != nil { + log.Println("failed to render index page", "error", err) + } +} diff --git a/forged/internal/incoming/web/handlers/not_implemented.go b/forged/internal/incoming/web/handlers/not_implemented.go new file mode 100644 index 0000000..6813c88 --- /dev/null +++ b/forged/internal/incoming/web/handlers/not_implemented.go @@ -0,0 +1,22 @@ +package handlers + +import ( + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type NotImplementedHTTP struct { + r templates.Renderer +} + +func NewNotImplementedHTTP(r templates.Renderer) *NotImplementedHTTP { + return &NotImplementedHTTP{ + r: r, + } +} + +func (h *NotImplementedHTTP) Handle(w http.ResponseWriter, _ *http.Request, _ wtypes.Vars) { + http.Error(w, "not implemented", http.StatusNotImplemented) +} diff --git a/forged/internal/incoming/web/handlers/repo/branches.go b/forged/internal/incoming/web/handlers/repo/branches.go new file mode 100644 index 0000000..26f3b04 --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/branches.go @@ -0,0 +1,68 @@ +package repo + +import ( + "fmt" + "log/slog" + "net/http" + "net/url" + "path/filepath" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +func (h *HTTP) Branches(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repoName := v["repo"] + + var userID int64 + if base.UserID != "" { + _, _ = fmt.Sscan(base.UserID, &userID) + } + grp, err := base.Global.Queries.GetGroupByPath(r.Context(), queries.GetGroupByPathParams{Column1: base.GroupPath, UserID: userID}) + if err != nil { + slog.Error("get group by path", "error", err) + http.Error(w, "Group not found", http.StatusNotFound) + return + } + repoRow, err := base.Global.Queries.GetRepoByGroupAndName(r.Context(), queries.GetRepoByGroupAndNameParams{GroupID: grp.ID, Name: repoName}) + if err != nil { + slog.Error("get repo by name", "error", err) + http.Error(w, "Repository not found", http.StatusNotFound) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoRow.ID)) + client, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err != nil { + slog.Error("git2d connect failed", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + defer func() { _ = client.Close() }() + + branches, err := client.ListBranches(repoPath) + if err != nil { + slog.Error("list branches failed", "error", err) + branches = nil + } + + repoURLRoot := "/" + misc.SegmentsToURL(base.GroupPath) + "/-/repos/" + url.PathEscape(repoRow.Name) + "/" + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "repo_url_root": repoURLRoot, + "branches": branches, + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_branches", data); err != nil { + slog.Error("render repo branches", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } +} diff --git a/forged/internal/incoming/web/handlers/repo/commit.go b/forged/internal/incoming/web/handlers/repo/commit.go new file mode 100644 index 0000000..0a27f3b --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/commit.go @@ -0,0 +1,239 @@ +package repo + +import ( + "crypto/sha1" + "encoding/hex" + "fmt" + "log/slog" + "net/http" + "net/url" + "path/filepath" + "strings" + "time" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +type commitPerson struct { + Name string + Email string + When time.Time +} + +type commitObject struct { + Hash string + Message string + Author commitPerson + Committer commitPerson +} + +type usableChunk struct { + Operation int + Content string +} + +type diffFileMeta struct { + Hash string + Mode string + Path string +} + +type usableFilePatch struct { + From diffFileMeta + To diffFileMeta + Chunks []usableChunk +} + +func shortHash(s string) string { + if s == "" { + return "" + } + b := sha1.Sum([]byte(s)) + return hex.EncodeToString(b[:8]) +} + +func parseUnifiedPatch(p string) []usableFilePatch { + lines := strings.Split(p, "\n") + patches := []usableFilePatch{} + var cur *usableFilePatch + flush := func() { + if cur != nil { + patches = append(patches, *cur) + cur = nil + } + } + appendChunk := func(op int, buf *[]string) { + if len(*buf) == 0 || cur == nil { + return + } + content := strings.Join(*buf, "\n") + *buf = (*buf)[:0] + cur.Chunks = append(cur.Chunks, usableChunk{Operation: op, Content: content}) + } + var bufSame, bufAdd, bufDel []string + + for _, ln := range lines { + if strings.HasPrefix(ln, "diff --git ") { + appendChunk(0, &bufSame) + appendChunk(1, &bufAdd) + appendChunk(2, &bufDel) + flush() + parts := strings.SplitN(strings.TrimPrefix(ln, "diff --git "), " ", 2) + from := strings.TrimPrefix(strings.TrimSpace(parts[0]), "a/") + to := from + if len(parts) > 1 { + to = strings.TrimPrefix(strings.TrimSpace(strings.TrimPrefix(parts[1], "b/")), "b/") + } + cur = &usableFilePatch{ + From: diffFileMeta{Path: from, Hash: shortHash(from)}, + To: diffFileMeta{Path: to, Hash: shortHash(to)}, + } + continue + } + if cur == nil { + continue + } + switch { + case strings.HasPrefix(ln, "+"): + appendChunk(0, &bufSame) + appendChunk(2, &bufDel) + bufAdd = append(bufAdd, ln) + case strings.HasPrefix(ln, "-"): + appendChunk(0, &bufSame) + appendChunk(1, &bufAdd) + bufDel = append(bufDel, ln) + default: + appendChunk(1, &bufAdd) + appendChunk(2, &bufDel) + bufSame = append(bufSame, ln) + } + } + if cur != nil { + appendChunk(0, &bufSame) + appendChunk(1, &bufAdd) + appendChunk(2, &bufDel) + flush() + } + return patches +} + +func (h *HTTP) Commit(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repoName := v["repo"] + commitSpec := v["commit"] + wantPatch := strings.HasSuffix(commitSpec, ".patch") + commitSpec = strings.TrimSuffix(commitSpec, ".patch") + + var userID int64 + if base.UserID != "" { + _, _ = fmt.Sscan(base.UserID, &userID) + } + grp, err := base.Global.Queries.GetGroupByPath(r.Context(), queries.GetGroupByPathParams{Column1: base.GroupPath, UserID: userID}) + if err != nil { + slog.Error("get group by path", "error", err) + http.Error(w, "Group not found", http.StatusNotFound) + return + } + repoRow, err := base.Global.Queries.GetRepoByGroupAndName(r.Context(), queries.GetRepoByGroupAndNameParams{GroupID: grp.ID, Name: repoName}) + if err != nil { + slog.Error("get repo by name", "error", err) + http.Error(w, "Repository not found", http.StatusNotFound) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoRow.ID)) + client, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err != nil { + slog.Error("git2d connect failed", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + defer func() { _ = client.Close() }() + + resolved := commitSpec + if len(commitSpec) < 40 { + if list, lerr := client.Log(repoPath, commitSpec, 1); lerr == nil && len(list) > 0 { + resolved = list[0].Hash + } + } + if !wantPatch && resolved != "" && resolved != commitSpec { + u := *r.URL + basePath := strings.TrimSuffix(u.EscapedPath(), commitSpec) + u.Path = basePath + resolved + http.Redirect(w, r, u.String(), http.StatusSeeOther) + return + } + + if wantPatch { + patchStr, perr := client.FormatPatch(repoPath, resolved) + if perr != nil { + slog.Error("format patch failed", "error", perr) + http.Error(w, "Failed to format patch", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + _, _ = w.Write([]byte(patchStr)) + return + } + + info, derr := client.CommitInfo(repoPath, resolved) + if derr != nil { + slog.Error("commit info failed", "error", derr) + http.Error(w, "Failed to get commit info", http.StatusInternalServerError) + return + } + + toTime := func(sec, minoff int64) time.Time { + loc := time.FixedZone("", int(minoff*60)) + return time.Unix(sec, 0).In(loc) + } + co := commitObject{ + Hash: info.Hash, + Message: info.Message, + Author: commitPerson{Name: info.AuthorName, Email: info.AuthorEmail, When: toTime(info.AuthorWhen, info.AuthorTZMin)}, + Committer: commitPerson{Name: info.CommitterName, Email: info.CommitterEmail, When: toTime(info.CommitterWhen, info.CommitterTZMin)}, + } + + toUsable := func(files []git2c.FileDiff) []usableFilePatch { + out := make([]usableFilePatch, 0, len(files)) + for _, f := range files { + u := usableFilePatch{ + From: diffFileMeta{Path: f.FromPath, Mode: fmt.Sprintf("%06o", f.FromMode), Hash: shortHash(f.FromPath)}, + To: diffFileMeta{Path: f.ToPath, Mode: fmt.Sprintf("%06o", f.ToMode), Hash: shortHash(f.ToPath)}, + } + for _, ch := range f.Chunks { + u.Chunks = append(u.Chunks, usableChunk{Operation: int(ch.Op), Content: ch.Content}) + } + out = append(out, u) + } + return out + } + filePatches := toUsable(info.Files) + parentHex := "" + if len(info.Parents) > 0 { + parentHex = info.Parents[0] + } + + repoURLRoot := "/" + misc.SegmentsToURL(base.GroupPath) + "/-/repos/" + url.PathEscape(repoRow.Name) + "/" + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "repo_url_root": repoURLRoot, + "commit_object": co, + "commit_id": co.Hash, + "parent_commit_hash": parentHex, + "file_patches": filePatches, + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_commit", data); err != nil { + slog.Error("render repo commit", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } +} diff --git a/forged/internal/incoming/web/handlers/repo/handler.go b/forged/internal/incoming/web/handlers/repo/handler.go new file mode 100644 index 0000000..2881d7d --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/handler.go @@ -0,0 +1,15 @@ +package repo + +import ( + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" +) + +type HTTP struct { + r templates.Renderer +} + +func NewHTTP(r templates.Renderer) *HTTP { + return &HTTP{ + r: r, + } +} diff --git a/forged/internal/incoming/web/handlers/repo/index.go b/forged/internal/incoming/web/handlers/repo/index.go new file mode 100644 index 0000000..c2cb24a --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/index.go @@ -0,0 +1,132 @@ +package repo + +import ( + "bytes" + "fmt" + "html/template" + "log/slog" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/yuin/goldmark" + "github.com/yuin/goldmark/extension" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +func (h *HTTP) Index(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repoName := v["repo"] + slog.Info("repo index", "group_path", base.GroupPath, "repo", repoName) + + var userID int64 + if base.UserID != "" { + _, _ = fmt.Sscan(base.UserID, &userID) + } + grp, err := base.Global.Queries.GetGroupByPath(r.Context(), queries.GetGroupByPathParams{ + Column1: base.GroupPath, + UserID: userID, + }) + if err != nil { + slog.Error("get group by path", "error", err) + http.Error(w, "Group not found", http.StatusNotFound) + return + } + + repoRow, err := base.Global.Queries.GetRepoByGroupAndName(r.Context(), queries.GetRepoByGroupAndNameParams{ + GroupID: grp.ID, + Name: repoName, + }) + if err != nil { + slog.Error("get repo by name", "error", err) + http.Error(w, "Repository not found", http.StatusNotFound) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoRow.ID)) + + var commits []git2c.Commit + var readme template.HTML + var commitsErr error + var readmeFile *git2c.FilenameContents + var cerr error + client, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err == nil { + defer func() { _ = client.Close() }() + commits, readmeFile, cerr = client.CmdIndex(repoPath) + if cerr != nil { + commitsErr = cerr + slog.Error("git2d CmdIndex failed", "error", cerr, "path", repoPath) + } else if readmeFile != nil { + nameLower := strings.ToLower(readmeFile.Filename) + if strings.HasSuffix(nameLower, ".md") || strings.HasSuffix(nameLower, ".markdown") || nameLower == "readme" { + md := goldmark.New( + goldmark.WithExtensions(extension.GFM), + ) + var buf bytes.Buffer + if err := md.Convert(readmeFile.Content, &buf); err == nil { + readme = template.HTML(buf.String()) + } else { + readme = template.HTML(template.HTMLEscapeString(string(readmeFile.Content))) + } + } else { + readme = template.HTML(template.HTMLEscapeString(string(readmeFile.Content))) + } + } + } else { + commitsErr = err + slog.Error("git2d connect failed", "error", err) + } + + sshRoot := strings.TrimSuffix(base.Global.Config.SSH.Root, "/") + httpRoot := strings.TrimSuffix(base.Global.Config.Web.Root, "/") + pathPart := misc.SegmentsToURL(base.GroupPath) + "/-/repos/" + url.PathEscape(repoRow.Name) + sshURL := "" + httpURL := "" + if sshRoot != "" { + sshURL = sshRoot + "/" + pathPart + } + if httpRoot != "" { + httpURL = httpRoot + "/" + pathPart + } + + var notes []string + if len(commits) == 0 && commitsErr == nil { + notes = append(notes, "This repository has no commits yet.") + } + if readme == template.HTML("") { + notes = append(notes, "No README found in the default branch.") + } + if sshURL == "" && httpURL == "" { + notes = append(notes, "Clone URLs not configured (missing SSH root and HTTP root).") + } + + cloneURL := sshURL + if cloneURL == "" { + cloneURL = httpURL + } + + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "ssh_clone_url": cloneURL, + "ref_name": base.RefName, + "commits": commits, + "commits_err": &commitsErr, + "readme": readme, + "notes": notes, + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_index", data); err != nil { + slog.Error("render repo index", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } +} diff --git a/forged/internal/incoming/web/handlers/repo/log.go b/forged/internal/incoming/web/handlers/repo/log.go new file mode 100644 index 0000000..9a1a6b8 --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/log.go @@ -0,0 +1,107 @@ +package repo + +import ( + "fmt" + "log/slog" + "net/http" + "net/url" + "path/filepath" + "time" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +type logAuthor struct { + Name string + Email string + When time.Time +} + +type logCommit struct { + Hash string + Message string + Author logAuthor +} + +func (h *HTTP) Log(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repoName := v["repo"] + + var userID int64 + if base.UserID != "" { + _, _ = fmt.Sscan(base.UserID, &userID) + } + grp, err := base.Global.Queries.GetGroupByPath(r.Context(), queries.GetGroupByPathParams{Column1: base.GroupPath, UserID: userID}) + if err != nil { + slog.Error("get group by path", "error", err) + http.Error(w, "Group not found", http.StatusNotFound) + return + } + repoRow, err := base.Global.Queries.GetRepoByGroupAndName(r.Context(), queries.GetRepoByGroupAndNameParams{GroupID: grp.ID, Name: repoName}) + if err != nil { + slog.Error("get repo by name", "error", err) + http.Error(w, "Repository not found", http.StatusNotFound) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoRow.ID)) + client, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err != nil { + slog.Error("git2d connect failed", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + defer func() { _ = client.Close() }() + + var refspec string + if base.RefType == "" { + refspec = "" + } else { + hex, rerr := client.ResolveRef(repoPath, base.RefType, base.RefName) + if rerr != nil { + slog.Error("resolve ref failed", "error", rerr) + refspec = "" + } else { + refspec = hex + } + } + + var rawCommits []git2c.Commit + rawCommits, err = client.Log(repoPath, refspec, 0) + var commitsErr error + if err != nil { + commitsErr = err + slog.Error("git2d log failed", "error", err) + } + commits := make([]logCommit, 0, len(rawCommits)) + for _, c := range rawCommits { + when, _ := time.Parse("2006-01-02 15:04:05", c.Date) + commits = append(commits, logCommit{ + Hash: c.Hash, + Message: c.Message, + Author: logAuthor{Name: c.Author, Email: c.Email, When: when}, + }) + } + + repoURLRoot := "/" + misc.SegmentsToURL(base.GroupPath) + "/-/repos/" + url.PathEscape(repoRow.Name) + "/" + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "repo_url_root": repoURLRoot, + "ref_name": base.RefName, + "commits": commits, + "commits_err": &commitsErr, + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_log", data); err != nil { + slog.Error("render repo log", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } +} diff --git a/forged/internal/incoming/web/handlers/repo/raw.go b/forged/internal/incoming/web/handlers/repo/raw.go new file mode 100644 index 0000000..6d5db1e --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/raw.go @@ -0,0 +1,90 @@ +package repo + +import ( + "fmt" + "log/slog" + "net/http" + "net/url" + "path/filepath" + "strings" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +func (h *HTTP) Raw(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repoName := v["repo"] + rawPathSpec := v["rest"] + pathSpec := strings.TrimSuffix(rawPathSpec, "/") + + var userID int64 + if base.UserID != "" { + _, _ = fmt.Sscan(base.UserID, &userID) + } + grp, err := base.Global.Queries.GetGroupByPath(r.Context(), queries.GetGroupByPathParams{Column1: base.GroupPath, UserID: userID}) + if err != nil { + slog.Error("get group by path", "error", err) + http.Error(w, "Group not found", http.StatusNotFound) + return + } + repoRow, err := base.Global.Queries.GetRepoByGroupAndName(r.Context(), queries.GetRepoByGroupAndNameParams{GroupID: grp.ID, Name: repoName}) + if err != nil { + slog.Error("get repo by name", "error", err) + http.Error(w, "Repository not found", http.StatusNotFound) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoRow.ID)) + + client, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err != nil { + slog.Error("git2d connect failed", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + defer func() { _ = client.Close() }() + + files, content, err := client.CmdTreeRaw(repoPath, pathSpec) + if err != nil { + slog.Error("git2d CmdTreeRaw failed", "error", err, "path", repoPath, "spec", pathSpec) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + + repoURLRoot := "/" + misc.SegmentsToURL(base.GroupPath) + "/-/repos/" + url.PathEscape(repoRow.Name) + "/" + + switch { + case files != nil: + if !base.DirMode && misc.RedirectDir(w, r) { + return + } + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "repo_url_root": repoURLRoot, + "ref_name": base.RefName, + "path_spec": pathSpec, + "files": files, + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_raw_dir", data); err != nil { + slog.Error("render repo raw dir", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } + case content != "": + if base.DirMode && misc.RedirectNoDir(w, r) { + return + } + w.Header().Set("Content-Type", "application/octet-stream") + _, _ = w.Write([]byte(content)) + default: + http.Error(w, "Unknown object type", http.StatusInternalServerError) + } +} diff --git a/forged/internal/incoming/web/handlers/repo/tree.go b/forged/internal/incoming/web/handlers/repo/tree.go new file mode 100644 index 0000000..627c998 --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/tree.go @@ -0,0 +1,110 @@ +package repo + +import ( + "fmt" + "html/template" + "log/slog" + "net/http" + "net/url" + "path/filepath" + "strings" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/git2c" +) + +func (h *HTTP) Tree(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repoName := v["repo"] + rawPathSpec := v["rest"] + pathSpec := strings.TrimSuffix(rawPathSpec, "/") + + var userID int64 + if base.UserID != "" { + _, _ = fmt.Sscan(base.UserID, &userID) + } + grp, err := base.Global.Queries.GetGroupByPath(r.Context(), queries.GetGroupByPathParams{Column1: base.GroupPath, UserID: userID}) + if err != nil { + slog.Error("get group by path", "error", err) + http.Error(w, "Group not found", http.StatusNotFound) + return + } + repoRow, err := base.Global.Queries.GetRepoByGroupAndName(r.Context(), queries.GetRepoByGroupAndNameParams{GroupID: grp.ID, Name: repoName}) + if err != nil { + slog.Error("get repo by name", "error", err) + http.Error(w, "Repository not found", http.StatusNotFound) + return + } + + repoPath := filepath.Join(base.Global.Config.Git.RepoDir, fmt.Sprintf("%d.git", repoRow.ID)) + + client, err := git2c.NewClient(r.Context(), base.Global.Config.Git.Socket) + if err != nil { + slog.Error("git2d connect failed", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + defer func() { _ = client.Close() }() + + files, content, err := client.CmdTreeRaw(repoPath, pathSpec) + if err != nil { + slog.Error("git2d CmdTreeRaw failed", "error", err, "path", repoPath, "spec", pathSpec) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + + repoURLRoot := "/" + misc.SegmentsToURL(base.GroupPath) + "/-/repos/" + url.PathEscape(repoRow.Name) + "/" + + switch { + case files != nil: + if !base.DirMode && misc.RedirectDir(w, r) { + return + } + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "repo_url_root": repoURLRoot, + "ref_name": base.RefName, + "path_spec": pathSpec, + "files": files, + "readme_filename": "README.md", + "readme": template.HTML("<p>README rendering here is WIP.</p>"), + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_tree_dir", data); err != nil { + slog.Error("render repo tree dir", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } + case content != "": + if base.DirMode && misc.RedirectNoDir(w, r) { + return + } + escaped := template.HTMLEscapeString(content) + rendered := template.HTML("<pre class=\"chroma\"><code>" + escaped + "</code></pre>") + data := map[string]any{ + "BaseData": base, + "group_path": base.GroupPath, + "repo_name": repoRow.Name, + "repo_description": repoRow.Description, + "repo_url_root": repoURLRoot, + "ref_name": base.RefName, + "path_spec": pathSpec, + "file_contents": rendered, + "global": map[string]any{ + "forge_title": base.Global.ForgeTitle, + }, + } + if err := h.r.Render(w, "repo_tree_file", data); err != nil { + slog.Error("render repo tree file", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + } + default: + http.Error(w, "Unknown object type", http.StatusInternalServerError) + } +} diff --git a/forged/internal/incoming/web/handlers/special/login.go b/forged/internal/incoming/web/handlers/special/login.go new file mode 100644 index 0000000..5672f1f --- /dev/null +++ b/forged/internal/incoming/web/handlers/special/login.go @@ -0,0 +1,119 @@ +package handlers + +import ( + "crypto/rand" + "crypto/sha256" + "errors" + "log" + "net/http" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" + "go.lindenii.runxiyu.org/forge/forged/internal/common/argon2id" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type LoginHTTP struct { + r templates.Renderer + cookieExpiry int +} + +func NewLoginHTTP(r templates.Renderer, cookieExpiry int) *LoginHTTP { + return &LoginHTTP{ + r: r, + cookieExpiry: cookieExpiry, + } +} + +func (h *LoginHTTP) Login(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + renderLoginPage := func(loginError string) bool { + err := h.r.Render(w, "login", struct { + BaseData *wtypes.BaseData + LoginError string + }{ + BaseData: wtypes.Base(r), + LoginError: loginError, + }) + if err != nil { + log.Println("failed to render login page", "error", err) + http.Error(w, "Failed to render login page", http.StatusInternalServerError) + return true + } + return false + } + + if r.Method == http.MethodGet { + renderLoginPage("") + return + } + + username := r.PostFormValue("username") + password := r.PostFormValue("password") + + userCreds, err := wtypes.Base(r).Global.Queries.GetUserCreds(r.Context(), &username) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + renderLoginPage("User not found") + return + } + log.Println("failed to get user credentials", "error", err) + http.Error(w, "Failed to get user credentials", http.StatusInternalServerError) + return + } + + if userCreds.PasswordHash == "" { + renderLoginPage("No password set for this user") + return + } + + passwordMatches, err := argon2id.ComparePasswordAndHash(password, userCreds.PasswordHash) + if err != nil { + log.Println("failed to compare password and hash", "error", err) + http.Error(w, "Failed to verify password", http.StatusInternalServerError) + return + } + + if !passwordMatches { + renderLoginPage("Invalid password") + return + } + + cookieValue := rand.Text() + + now := time.Now() + expiry := now.Add(time.Duration(h.cookieExpiry) * time.Second) + + cookie := &http.Cookie{ + Name: "session", + Value: cookieValue, + SameSite: http.SameSiteLaxMode, + HttpOnly: true, + Secure: false, // TODO + Expires: expiry, + Path: "/", + } //exhaustruct:ignore + + http.SetCookie(w, cookie) + + tokenHash := sha256.Sum256(misc.StringToBytes(cookieValue)) + + err = wtypes.Base(r).Global.Queries.InsertSession(r.Context(), queries.InsertSessionParams{ + UserID: userCreds.ID, + TokenHash: tokenHash[:], + ExpiresAt: pgtype.Timestamptz{ + Time: expiry, + Valid: true, + }, + }) + if err != nil { + log.Println("failed to insert session", "error", err) + http.Error(w, "Failed to create session", http.StatusInternalServerError) + return + } + + http.Redirect(w, r, "/", http.StatusSeeOther) +} diff --git a/forged/internal/incoming/web/router.go b/forged/internal/incoming/web/router.go new file mode 100644 index 0000000..3809afb --- /dev/null +++ b/forged/internal/incoming/web/router.go @@ -0,0 +1,419 @@ +package web + +import ( + "fmt" + "net/http" + "net/url" + "sort" + "strings" + + "go.lindenii.runxiyu.org/forge/forged/internal/global" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type UserResolver func(*http.Request) (id string, username string, err error) + +type ErrorRenderers struct { + BadRequest func(http.ResponseWriter, *wtypes.BaseData, string) + BadRequestColon func(http.ResponseWriter, *wtypes.BaseData) + NotFound func(http.ResponseWriter, *wtypes.BaseData) + ServerError func(http.ResponseWriter, *wtypes.BaseData, string) +} + +type dirPolicy int + +const ( + dirIgnore dirPolicy = iota + dirRequire + dirForbid + dirRequireIfEmpty +) + +type patKind uint8 + +const ( + lit patKind = iota + param + splat + group // @group, must be first token +) + +type patSeg struct { + kind patKind + lit string + key string +} + +type route struct { + method string + rawPattern string + wantDir dirPolicy + ifEmptyKey string + segs []patSeg + h wtypes.HandlerFunc + hh http.Handler + priority int +} + +type Router struct { + routes []route + errors ErrorRenderers + user UserResolver + global *global.Global + reverseProxy bool +} + +func NewRouter() *Router { return &Router{} } + +func (r *Router) Global(g *global.Global) *Router { + r.global = g + return r +} +func (r *Router) ReverseProxy(enabled bool) *Router { r.reverseProxy = enabled; return r } +func (r *Router) Errors(e ErrorRenderers) *Router { r.errors = e; return r } +func (r *Router) UserResolver(u UserResolver) *Router { r.user = u; return r } + +type RouteOption func(*route) + +func WithDir() RouteOption { return func(rt *route) { rt.wantDir = dirRequire } } +func WithoutDir() RouteOption { return func(rt *route) { rt.wantDir = dirForbid } } +func WithDirIfEmpty(param string) RouteOption { + return func(rt *route) { rt.wantDir = dirRequireIfEmpty; rt.ifEmptyKey = param } +} + +func (r *Router) GET(pattern string, f wtypes.HandlerFunc, opts ...RouteOption) { + r.handle("GET", pattern, f, nil, opts...) +} + +func (r *Router) POST(pattern string, f wtypes.HandlerFunc, opts ...RouteOption) { + r.handle("POST", pattern, f, nil, opts...) +} + +func (r *Router) ANY(pattern string, f wtypes.HandlerFunc, opts ...RouteOption) { + r.handle("", pattern, f, nil, opts...) +} + +func (r *Router) ANYHTTP(pattern string, hh http.Handler, opts ...RouteOption) { + r.handle("", pattern, nil, hh, opts...) +} + +func (r *Router) handle(method, pattern string, f wtypes.HandlerFunc, hh http.Handler, opts ...RouteOption) { + want := dirIgnore + if strings.HasSuffix(pattern, "/") { + want = dirRequire + pattern = strings.TrimSuffix(pattern, "/") + } else if pattern != "" { + want = dirForbid + } + segs, prio := compilePattern(pattern) + rt := route{ + method: method, + rawPattern: pattern, + wantDir: want, + segs: segs, + h: f, + hh: hh, + priority: prio, + } + for _, o := range opts { + o(&rt) + } + r.routes = append(r.routes, rt) + + sort.SliceStable(r.routes, func(i, j int) bool { + return r.routes[i].priority > r.routes[j].priority + }) +} + +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + segments, dirMode, err := splitAndUnescapePath(req.URL.EscapedPath()) + if err != nil { + r.err400(w, &wtypes.BaseData{Global: r.global}, "Error parsing request URI: "+err.Error()) + return + } + for _, s := range segments { + if strings.Contains(s, ":") { + r.err400Colon(w, &wtypes.BaseData{Global: r.global}) + return + } + } + + bd := &wtypes.BaseData{ + Global: r.global, + URLSegments: segments, + DirMode: dirMode, + } + req = req.WithContext(wtypes.WithBaseData(req.Context(), bd)) + + bd.RefType, bd.RefName, err = GetParamRefTypeName(req) + if err != nil { + r.err400(w, bd, "Error parsing ref query parameters: "+err.Error()) + return + } + + if r.user != nil { + uid, uname, uerr := r.user(req) + if uerr != nil { + r.err500(w, bd, "Error getting user info from request: "+uerr.Error()) + return + } + bd.UserID = uid + bd.Username = uname + } + + method := req.Method + var pathMatched bool + var matchedRaw string + + for _, rt := range r.routes { + ok, vars, sepIdx := match(rt.segs, segments) + if !ok { + continue + } + pathMatched = true + matchedRaw = rt.rawPattern + + switch rt.wantDir { + case dirRequire: + if !dirMode && redirectAddSlash(w, req) { + return + } + case dirForbid: + if dirMode && redirectDropSlash(w, req) { + return + } + case dirRequireIfEmpty: + if v := vars[rt.ifEmptyKey]; v == "" && !dirMode && redirectAddSlash(w, req) { + return + } + } + + bd.SeparatorIndex = sepIdx + if g := vars["group"]; g == "" { + bd.GroupPath = []string{} + } else { + bd.GroupPath = strings.Split(g, "/") + } + + if rt.method != "" && rt.method != method && (method != http.MethodHead || rt.method != http.MethodGet) { + continue + } + + if rt.h != nil { + rt.h(w, req, wtypes.Vars(vars)) + } else if rt.hh != nil { + rt.hh.ServeHTTP(w, req) + } else { + r.err500(w, bd, "route has no handler") + } + return + } + + if pathMatched { + w.Header().Set("Allow", allowForPattern(r.routes, matchedRaw)) + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + return + } + r.err404(w, bd) +} + +func compilePattern(pat string) ([]patSeg, int) { + if pat == "" || pat == "/" { + return nil, 1000 + } + pat = strings.Trim(pat, "/") + raw := strings.Split(pat, "/") + + segs := make([]patSeg, 0, len(raw)) + prio := 0 + for i, t := range raw { + switch { + case t == "@group": + if i != 0 { + segs = append(segs, patSeg{kind: lit, lit: t}) + prio += 10 + continue + } + segs = append(segs, patSeg{kind: group}) + prio += 1 + case strings.HasPrefix(t, ":"): + segs = append(segs, patSeg{kind: param, key: t[1:]}) + prio += 5 + case strings.HasPrefix(t, "*"): + segs = append(segs, patSeg{kind: splat, key: t[1:]}) + default: + segs = append(segs, patSeg{kind: lit, lit: t}) + prio += 10 + } + } + return segs, prio +} + +func match(pat []patSeg, segs []string) (bool, map[string]string, int) { + vars := make(map[string]string) + i := 0 + sepIdx := -1 + for pi := 0; pi < len(pat); pi++ { + ps := pat[pi] + switch ps.kind { + case group: + start := i + for i < len(segs) && segs[i] != "-" { + i++ + } + if start < i { + vars["group"] = strings.Join(segs[start:i], "/") + } else { + vars["group"] = "" + } + if i < len(segs) && segs[i] == "-" { + sepIdx = i + } + case lit: + if i >= len(segs) || segs[i] != ps.lit { + return false, nil, -1 + } + i++ + case param: + if i >= len(segs) { + return false, nil, -1 + } + vars[ps.key] = segs[i] + i++ + case splat: + if i < len(segs) { + vars[ps.key] = strings.Join(segs[i:], "/") + i = len(segs) + } else { + vars[ps.key] = "" + } + pi = len(pat) + } + } + if i != len(segs) { + return false, nil, -1 + } + return true, vars, sepIdx +} + +func splitAndUnescapePath(escaped string) ([]string, bool, error) { + if escaped == "" { + return nil, false, nil + } + dir := strings.HasSuffix(escaped, "/") + path := strings.Trim(escaped, "/") + if path == "" { + return []string{}, dir, nil + } + raw := strings.Split(path, "/") + out := make([]string, 0, len(raw)) + for _, seg := range raw { + u, err := url.PathUnescape(seg) + if err != nil { + return nil, dir, err + } + if u != "" { + out = append(out, u) + } + } + return out, dir, nil +} + +func redirectAddSlash(w http.ResponseWriter, r *http.Request) bool { + u := *r.URL + u.Path = u.EscapedPath() + "/" + http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect) + return true +} + +func redirectDropSlash(w http.ResponseWriter, r *http.Request) bool { + u := *r.URL + u.Path = strings.TrimRight(u.EscapedPath(), "/") + if u.Path == "" { + u.Path = "/" + } + http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect) + return true +} + +func allowForPattern(routes []route, raw string) string { + seen := map[string]struct{}{} + out := make([]string, 0, 4) + for _, rt := range routes { + if rt.rawPattern != raw || rt.method == "" { + continue + } + if _, ok := seen[rt.method]; ok { + continue + } + seen[rt.method] = struct{}{} + out = append(out, rt.method) + } + sort.Strings(out) + return strings.Join(out, ", ") +} + +func (r *Router) err400(w http.ResponseWriter, b *wtypes.BaseData, msg string) { + if r.errors.BadRequest != nil { + r.errors.BadRequest(w, b, msg) + return + } + http.Error(w, msg, http.StatusBadRequest) +} + +func (r *Router) err400Colon(w http.ResponseWriter, b *wtypes.BaseData) { + if r.errors.BadRequestColon != nil { + r.errors.BadRequestColon(w, b) + return + } + http.Error(w, "bad request", http.StatusBadRequest) +} + +func (r *Router) err404(w http.ResponseWriter, b *wtypes.BaseData) { + if r.errors.NotFound != nil { + r.errors.NotFound(w, b) + return + } + http.NotFound(w, nil) +} + +func (r *Router) err500(w http.ResponseWriter, b *wtypes.BaseData, msg string) { + if r.errors.ServerError != nil { + r.errors.ServerError(w, b, msg) + return + } + http.Error(w, msg, http.StatusInternalServerError) +} + +func GetParamRefTypeName(request *http.Request) (retRefType, retRefName string, err error) { + rawQuery := request.URL.RawQuery + queryValues, err := url.ParseQuery(rawQuery) + if err != nil { + return + } + done := false + for _, refType := range []string{"commit", "branch", "tag"} { + refName, ok := queryValues[refType] + if ok { + if done { + err = errDupRefSpec + return + } + done = true + if len(refName) != 1 { + err = errDupRefSpec + return + } + retRefName = refName[0] + retRefType = refType + } + } + if !done { + retRefType = "" + retRefName = "" + err = nil + } + return +} + +var errDupRefSpec = fmt.Errorf("duplicate ref specifications") diff --git a/forged/internal/incoming/web/server.go b/forged/internal/incoming/web/server.go new file mode 100644 index 0000000..ab70aec --- /dev/null +++ b/forged/internal/incoming/web/server.go @@ -0,0 +1,70 @@ +package web + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "time" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type Server struct { + net string + addr string + root string + httpServer *http.Server + shutdownTimeout uint32 + global *global.Global +} + +func New(global *global.Global) *Server { + cfg := global.Config.Web + httpServer := &http.Server{ + Handler: NewHandler(global), + ReadTimeout: time.Duration(cfg.ReadTimeout) * time.Second, + WriteTimeout: time.Duration(cfg.WriteTimeout) * time.Second, + IdleTimeout: time.Duration(cfg.IdleTimeout) * time.Second, + MaxHeaderBytes: cfg.MaxHeaderBytes, + } //exhaustruct:ignore + return &Server{ + net: cfg.Net, + addr: cfg.Addr, + root: cfg.Root, + shutdownTimeout: cfg.ShutdownTimeout, + httpServer: httpServer, + global: global, + } +} + +func (server *Server) Run(ctx context.Context) (err error) { + server.httpServer.BaseContext = func(_ net.Listener) context.Context { return ctx } + + listener, err := misc.Listen(ctx, server.net, server.addr) + if err != nil { + return fmt.Errorf("listen for web: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + shCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Duration(server.shutdownTimeout)*time.Second) + defer cancel() + _ = server.httpServer.Shutdown(shCtx) + _ = listener.Close() + }) + defer stop() + + err = server.httpServer.Serve(listener) + if err != nil { + if errors.Is(err, http.ErrServerClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("serve web: %w", err) + } + panic("unreachable") +} diff --git a/forged/internal/incoming/web/templates/load.go b/forged/internal/incoming/web/templates/load.go new file mode 100644 index 0000000..4a6fc49 --- /dev/null +++ b/forged/internal/incoming/web/templates/load.go @@ -0,0 +1,31 @@ +package templates + +import ( + "html/template" + "io/fs" + "os" + "path/filepath" +) + +func MustParseDir(dir string, funcs template.FuncMap) *template.Template { + base := template.New("").Funcs(funcs) + + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + b, err := os.ReadFile(path) + if err != nil { + return err + } + _, err = base.Parse(string(b)) + return err + }) + if err != nil { + panic(err) + } + return base +} diff --git a/forged/internal/incoming/web/templates/renderer.go b/forged/internal/incoming/web/templates/renderer.go new file mode 100644 index 0000000..350e9ec --- /dev/null +++ b/forged/internal/incoming/web/templates/renderer.go @@ -0,0 +1,35 @@ +package templates + +import ( + "bytes" + "html/template" + "log/slog" + "net/http" +) + +type Renderer interface { + Render(w http.ResponseWriter, name string, data any) error +} + +type tmplRenderer struct { + t *template.Template +} + +func New(t *template.Template) Renderer { + return &tmplRenderer{t: t} +} + +func (r *tmplRenderer) Render(w http.ResponseWriter, name string, data any) error { + var buf bytes.Buffer + if err := r.t.ExecuteTemplate(&buf, name, data); err != nil { + slog.Error("template render failed", "name", name, "error", err) + return err + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + n, err := w.Write(buf.Bytes()) + if err != nil { + return err + } + slog.Info("template rendered", "name", name, "bytes", n) + return nil +} diff --git a/forged/internal/incoming/web/types/types.go b/forged/internal/incoming/web/types/types.go new file mode 100644 index 0000000..4b9a65a --- /dev/null +++ b/forged/internal/incoming/web/types/types.go @@ -0,0 +1,37 @@ +package types + +import ( + "context" + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type BaseData struct { + UserID string + Username string + URLSegments []string + DirMode bool + GroupPath []string + SeparatorIndex int + RefType string + RefName string + Global *global.Global +} + +type ctxKey struct{} + +func WithBaseData(ctx context.Context, b *BaseData) context.Context { + return context.WithValue(ctx, ctxKey{}, b) +} + +func Base(r *http.Request) *BaseData { + if v, ok := r.Context().Value(ctxKey{}).(*BaseData); ok && v != nil { + return v + } + return &BaseData{} +} + +type Vars map[string]string + +type HandlerFunc func(http.ResponseWriter, *http.Request, Vars) diff --git a/forged/internal/ipc/git2c/build.go b/forged/internal/ipc/git2c/build.go new file mode 100644 index 0000000..3d1b7a0 --- /dev/null +++ b/forged/internal/ipc/git2c/build.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import ( + "encoding/hex" + "fmt" + "path" + "sort" + "strings" +) + +func (c *Client) BuildTreeRecursive(repoPath, baseTreeHex string, updates map[string]string) (string, error) { + treeCache := make(map[string][]TreeEntryRaw) + var walk func(prefix, hexid string) error + walk = func(prefix, hexid string) error { + ents, err := c.TreeListByOID(repoPath, hexid) + if err != nil { + return err + } + treeCache[prefix] = ents + for _, e := range ents { + if e.Mode == 40000 { + sub := path.Join(prefix, e.Name) + if err := walk(sub, e.OID); err != nil { + return err + } + } + } + return nil + } + if err := walk("", baseTreeHex); err != nil { + return "", err + } + + for p, blob := range updates { + parts := strings.Split(p, "/") + dir := strings.Join(parts[:len(parts)-1], "/") + name := parts[len(parts)-1] + entries := treeCache[dir] + found := false + for i := range entries { + if entries[i].Name == name { + if blob == "" { + entries = append(entries[:i], entries[i+1:]...) + } else { + entries[i].Mode = 0o100644 + entries[i].OID = blob + } + found = true + break + } + } + if !found && blob != "" { + entries = append(entries, TreeEntryRaw{Mode: 0o100644, Name: name, OID: blob}) + } + treeCache[dir] = entries + } + + built := make(map[string]string) + var build func(prefix string) (string, error) + build = func(prefix string) (string, error) { + entries := treeCache[prefix] + for i := range entries { + if entries[i].Mode == 0o40000 || entries[i].Mode == 40000 { + sub := path.Join(prefix, entries[i].Name) + var ok bool + var oid string + if oid, ok = built[sub]; !ok { + var err error + oid, err = build(sub) + if err != nil { + return "", err + } + } + entries[i].Mode = 0o40000 + entries[i].OID = oid + } + } + sort.Slice(entries, func(i, j int) bool { + ni, nj := entries[i].Name, entries[j].Name + if ni == nj { + return entries[i].Mode != 0o40000 && entries[j].Mode == 0o40000 + } + if strings.HasPrefix(nj, ni) && len(ni) < len(nj) { + return entries[i].Mode != 0o40000 + } + if strings.HasPrefix(ni, nj) && len(nj) < len(ni) { + return entries[j].Mode == 0o40000 + } + return ni < nj + }) + wr := make([]TreeEntryRaw, 0, len(entries)) + for _, e := range entries { + if e.OID == "" { + continue + } + if e.Mode == 40000 { + e.Mode = 0o40000 + } + if _, err := hex.DecodeString(e.OID); err != nil { + return "", fmt.Errorf("invalid OID hex for %s/%s: %w", prefix, e.Name, err) + } + wr = append(wr, TreeEntryRaw{Mode: e.Mode, Name: e.Name, OID: e.OID}) + } + id, err := c.WriteTree(repoPath, wr) + if err != nil { + return "", err + } + built[prefix] = id + return id, nil + } + root, err := build("") + if err != nil { + return "", err + } + return root, nil +} diff --git a/forged/internal/ipc/git2c/client.go b/forged/internal/ipc/git2c/client.go new file mode 100644 index 0000000..79c2024 --- /dev/null +++ b/forged/internal/ipc/git2c/client.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import ( + "context" + "fmt" + "net" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/bare" +) + +type Client struct { + socketPath string + conn net.Conn + writer *bare.Writer + reader *bare.Reader +} + +func NewClient(ctx context.Context, socketPath string) (*Client, error) { + dialer := &net.Dialer{} //exhaustruct:ignore + conn, err := dialer.DialContext(ctx, "unix", socketPath) + if err != nil { + return nil, fmt.Errorf("git2d connection failed: %w", err) + } + + writer := bare.NewWriter(conn) + reader := bare.NewReader(conn) + + return &Client{ + socketPath: socketPath, + conn: conn, + writer: writer, + reader: reader, + }, nil +} + +func (c *Client) Close() (err error) { + if c.conn != nil { + err = c.conn.Close() + if err != nil { + return fmt.Errorf("close underlying socket: %w", err) + } + } + return nil +} diff --git a/forged/internal/ipc/git2c/cmd_index.go b/forged/internal/ipc/git2c/cmd_index.go new file mode 100644 index 0000000..44a0845 --- /dev/null +++ b/forged/internal/ipc/git2c/cmd_index.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import ( + "encoding/hex" + "errors" + "fmt" + "io" +) + +func (c *Client) CmdIndex(repoPath string) ([]Commit, *FilenameContents, error) { + err := c.writer.WriteData([]byte(repoPath)) + if err != nil { + return nil, nil, fmt.Errorf("sending repo path failed: %w", err) + } + err = c.writer.WriteUint(1) + if err != nil { + return nil, nil, fmt.Errorf("sending command failed: %w", err) + } + + status, err := c.reader.ReadUint() + if err != nil { + return nil, nil, fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return nil, nil, fmt.Errorf("git2d error: %d", status) + } + + // README + readmeRaw, err := c.reader.ReadData() + if err != nil { + readmeRaw = nil + } + + readmeFilename := "README.md" // TODO + readme := &FilenameContents{Filename: readmeFilename, Content: readmeRaw} + + // Commits + var commits []Commit + for { + id, err := c.reader.ReadData() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, nil, fmt.Errorf("reading commit ID failed: %w", err) + } + title, _ := c.reader.ReadData() + authorName, _ := c.reader.ReadData() + authorEmail, _ := c.reader.ReadData() + authorDate, _ := c.reader.ReadData() + + commits = append(commits, Commit{ + Hash: hex.EncodeToString(id), + Author: string(authorName), + Email: string(authorEmail), + Date: string(authorDate), + Message: string(title), + }) + } + + return commits, readme, nil +} diff --git a/forged/internal/ipc/git2c/cmd_init_repo.go b/forged/internal/ipc/git2c/cmd_init_repo.go new file mode 100644 index 0000000..ae1e92a --- /dev/null +++ b/forged/internal/ipc/git2c/cmd_init_repo.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import "fmt" + +func (c *Client) InitRepo(repoPath, hooksPath string) error { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(15); err != nil { + return fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(hooksPath)); err != nil { + return fmt.Errorf("sending hooks path failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return Perror(status) + } + return nil +} diff --git a/forged/internal/ipc/git2c/cmd_treeraw.go b/forged/internal/ipc/git2c/cmd_treeraw.go new file mode 100644 index 0000000..d2d5ac2 --- /dev/null +++ b/forged/internal/ipc/git2c/cmd_treeraw.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import ( + "errors" + "fmt" + "io" +) + +func (c *Client) CmdTreeRaw(repoPath, pathSpec string) ([]TreeEntry, string, error) { + err := c.writer.WriteData([]byte(repoPath)) + if err != nil { + return nil, "", fmt.Errorf("sending repo path failed: %w", err) + } + err = c.writer.WriteUint(2) + if err != nil { + return nil, "", fmt.Errorf("sending command failed: %w", err) + } + err = c.writer.WriteData([]byte(pathSpec)) + if err != nil { + return nil, "", fmt.Errorf("sending path failed: %w", err) + } + + status, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("reading status failed: %w", err) + } + + switch status { + case 0: + kind, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("reading object kind failed: %w", err) + } + + switch kind { + case 1: + // Tree + count, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("reading entry count failed: %w", err) + } + + var files []TreeEntry + for range count { + typeCode, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("error reading entry type: %w", err) + } + mode, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("error reading entry mode: %w", err) + } + size, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("error reading entry size: %w", err) + } + name, err := c.reader.ReadData() + if err != nil { + return nil, "", fmt.Errorf("error reading entry name: %w", err) + } + + files = append(files, TreeEntry{ + Name: string(name), + Mode: fmt.Sprintf("%06o", mode), + Size: size, + IsFile: typeCode == 2, + IsSubtree: typeCode == 1, + }) + } + + return files, "", nil + + case 2: + // Blob + content, err := c.reader.ReadData() + if err != nil && !errors.Is(err, io.EOF) { + return nil, "", fmt.Errorf("error reading file content: %w", err) + } + + return nil, string(content), nil + + default: + return nil, "", fmt.Errorf("unknown kind: %d", kind) + } + + case 3: + return nil, "", fmt.Errorf("path not found: %s", pathSpec) + + default: + return nil, "", fmt.Errorf("unknown status code: %d", status) + } +} diff --git a/forged/internal/ipc/git2c/doc.go b/forged/internal/ipc/git2c/doc.go new file mode 100644 index 0000000..e14dae0 --- /dev/null +++ b/forged/internal/ipc/git2c/doc.go @@ -0,0 +1,2 @@ +// Package git2c provides routines to interact with the git2d backend daemon. +package git2c diff --git a/forged/internal/ipc/git2c/extra.go b/forged/internal/ipc/git2c/extra.go new file mode 100644 index 0000000..1a3e3a6 --- /dev/null +++ b/forged/internal/ipc/git2c/extra.go @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import ( + "encoding/hex" + "fmt" + "time" +) + +type DiffChunk struct { + Op uint64 + Content string +} + +type FileDiff struct { + FromMode uint64 + ToMode uint64 + FromPath string + ToPath string + Chunks []DiffChunk +} + +type CommitInfo struct { + Hash string + AuthorName string + AuthorEmail string + AuthorWhen int64 // unix secs + AuthorTZMin int64 // minutes ofs + CommitterName string + CommitterEmail string + CommitterWhen int64 + CommitterTZMin int64 + Message string + Parents []string // hex + Files []FileDiff +} + +func (c *Client) ResolveRef(repoPath, refType, refName string) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(3); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(refType)); err != nil { + return "", fmt.Errorf("sending ref type failed: %w", err) + } + if err := c.writer.WriteData([]byte(refName)); err != nil { + return "", fmt.Errorf("sending ref name failed: %w", err) + } + + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + id, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading oid failed: %w", err) + } + return hex.EncodeToString(id), nil +} + +func (c *Client) ListBranches(repoPath string) ([]string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return nil, fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(4); err != nil { + return nil, fmt.Errorf("sending command failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return nil, Perror(status) + } + count, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading count failed: %w", err) + } + branches := make([]string, 0, count) + for range count { + name, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading branch name failed: %w", err) + } + branches = append(branches, string(name)) + } + return branches, nil +} + +func (c *Client) FormatPatch(repoPath, commitHex string) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(5); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(commitHex)); err != nil { + return "", fmt.Errorf("sending commit failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + buf, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading patch failed: %w", err) + } + return string(buf), nil +} + +func (c *Client) MergeBase(repoPath, hexA, hexB string) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(7); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(hexA)); err != nil { + return "", fmt.Errorf("sending oid A failed: %w", err) + } + if err := c.writer.WriteData([]byte(hexB)); err != nil { + return "", fmt.Errorf("sending oid B failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + base, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading base oid failed: %w", err) + } + return hex.EncodeToString(base), nil +} + +func (c *Client) Log(repoPath, refSpec string, n uint) ([]Commit, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return nil, fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(8); err != nil { + return nil, fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(refSpec)); err != nil { + return nil, fmt.Errorf("sending refspec failed: %w", err) + } + if err := c.writer.WriteUint(uint64(n)); err != nil { + return nil, fmt.Errorf("sending limit failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return nil, Perror(status) + } + var out []Commit + for { + id, err := c.reader.ReadData() + if err != nil { + break + } + title, _ := c.reader.ReadData() + authorName, _ := c.reader.ReadData() + authorEmail, _ := c.reader.ReadData() + date, _ := c.reader.ReadData() + out = append(out, Commit{ + Hash: hex.EncodeToString(id), + Author: string(authorName), + Email: string(authorEmail), + Date: string(date), + Message: string(title), + }) + } + return out, nil +} + +func (c *Client) CommitTreeOID(repoPath, commitHex string) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(12); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(commitHex)); err != nil { + return "", fmt.Errorf("sending oid failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + id, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading tree oid failed: %w", err) + } + return hex.EncodeToString(id), nil +} + +func (c *Client) CommitCreate(repoPath, treeHex string, parents []string, authorName, authorEmail string, when time.Time, message string) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(13); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(treeHex)); err != nil { + return "", fmt.Errorf("sending tree oid failed: %w", err) + } + if err := c.writer.WriteUint(uint64(len(parents))); err != nil { + return "", fmt.Errorf("sending parents count failed: %w", err) + } + for _, p := range parents { + if err := c.writer.WriteData([]byte(p)); err != nil { + return "", fmt.Errorf("sending parent oid failed: %w", err) + } + } + if err := c.writer.WriteData([]byte(authorName)); err != nil { + return "", fmt.Errorf("sending author name failed: %w", err) + } + if err := c.writer.WriteData([]byte(authorEmail)); err != nil { + return "", fmt.Errorf("sending author email failed: %w", err) + } + if err := c.writer.WriteInt(when.Unix()); err != nil { + return "", fmt.Errorf("sending when failed: %w", err) + } + _, offset := when.Zone() + if err := c.writer.WriteInt(int64(offset / 60)); err != nil { + return "", fmt.Errorf("sending tz offset failed: %w", err) + } + if err := c.writer.WriteData([]byte(message)); err != nil { + return "", fmt.Errorf("sending message failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + id, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading commit oid failed: %w", err) + } + return hex.EncodeToString(id), nil +} + +func (c *Client) UpdateRef(repoPath, refName, commitHex string) error { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(14); err != nil { + return fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(refName)); err != nil { + return fmt.Errorf("sending ref name failed: %w", err) + } + if err := c.writer.WriteData([]byte(commitHex)); err != nil { + return fmt.Errorf("sending commit oid failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return Perror(status) + } + return nil +} + +func (c *Client) CommitInfo(repoPath, commitHex string) (*CommitInfo, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return nil, fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(6); err != nil { + return nil, fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(commitHex)); err != nil { + return nil, fmt.Errorf("sending commit failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return nil, Perror(status) + } + id, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading id failed: %w", err) + } + aname, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading author name failed: %w", err) + } + aemail, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading author email failed: %w", err) + } + awhen, err := c.reader.ReadI64() + if err != nil { + return nil, fmt.Errorf("reading author time failed: %w", err) + } + aoff, err := c.reader.ReadI64() + if err != nil { + return nil, fmt.Errorf("reading author tz failed: %w", err) + } + cname, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading committer name failed: %w", err) + } + cemail, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading committer email failed: %w", err) + } + cwhen, err := c.reader.ReadI64() + if err != nil { + return nil, fmt.Errorf("reading committer time failed: %w", err) + } + coff, err := c.reader.ReadI64() + if err != nil { + return nil, fmt.Errorf("reading committer tz failed: %w", err) + } + msg, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading message failed: %w", err) + } + pcnt, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading parents count failed: %w", err) + } + parents := make([]string, 0, pcnt) + for i := uint64(0); i < pcnt; i++ { + praw, perr := c.reader.ReadData() + if perr != nil { + return nil, fmt.Errorf("reading parent failed: %w", perr) + } + parents = append(parents, hex.EncodeToString(praw)) + } + fcnt, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading file count failed: %w", err) + } + files := make([]FileDiff, 0, fcnt) + for i := uint64(0); i < fcnt; i++ { + fromMode, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading from mode failed: %w", err) + } + toMode, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading to mode failed: %w", err) + } + fromPath, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading from path failed: %w", err) + } + toPath, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading to path failed: %w", err) + } + ccnt, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading chunk count failed: %w", err) + } + chunks := make([]DiffChunk, 0, ccnt) + for j := uint64(0); j < ccnt; j++ { + op, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading chunk op failed: %w", err) + } + content, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading chunk content failed: %w", err) + } + chunks = append(chunks, DiffChunk{Op: op, Content: string(content)}) + } + files = append(files, FileDiff{ + FromMode: fromMode, + ToMode: toMode, + FromPath: string(fromPath), + ToPath: string(toPath), + Chunks: chunks, + }) + } + return &CommitInfo{ + Hash: hex.EncodeToString(id), + AuthorName: string(aname), + AuthorEmail: string(aemail), + AuthorWhen: awhen, + AuthorTZMin: aoff, + CommitterName: string(cname), + CommitterEmail: string(cemail), + CommitterWhen: cwhen, + CommitterTZMin: coff, + Message: string(msg), + Parents: parents, + Files: files, + }, nil +} diff --git a/forged/internal/ipc/git2c/git_types.go b/forged/internal/ipc/git2c/git_types.go new file mode 100644 index 0000000..da685bf --- /dev/null +++ b/forged/internal/ipc/git2c/git_types.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +type Commit struct { + Hash string + Author string + Email string + Date string + Message string +} + +type FilenameContents struct { + Filename string + Content []byte +} + +type TreeEntry struct { + Name string + Mode string + Size uint64 + IsFile bool + IsSubtree bool +} diff --git a/forged/internal/ipc/git2c/perror.go b/forged/internal/ipc/git2c/perror.go new file mode 100644 index 0000000..4be2a07 --- /dev/null +++ b/forged/internal/ipc/git2c/perror.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// TODO: Make the C part report detailed error messages too + +package git2c + +import "errors" + +var ( + ErrUnknown = errors.New("git2c: unknown error") + ErrPath = errors.New("git2c: get tree entry by path failed") + ErrRevparse = errors.New("git2c: revparse failed") + ErrReadme = errors.New("git2c: no readme") + ErrBlobExpected = errors.New("git2c: blob expected") + ErrEntryToObject = errors.New("git2c: tree entry to object conversion failed") + ErrBlobRawContent = errors.New("git2c: get blob raw content failed") + ErrRevwalk = errors.New("git2c: revwalk failed") + ErrRevwalkPushHead = errors.New("git2c: revwalk push head failed") + ErrBareProto = errors.New("git2c: bare protocol error") + ErrRefResolve = errors.New("git2c: ref resolve failed") + ErrBranches = errors.New("git2c: list branches failed") + ErrCommitLookup = errors.New("git2c: commit lookup failed") + ErrDiff = errors.New("git2c: diff failed") + ErrMergeBaseNone = errors.New("git2c: no merge base found") + ErrMergeBase = errors.New("git2c: merge base failed") + ErrCommitCreate = errors.New("git2c: commit create failed") + ErrUpdateRef = errors.New("git2c: update ref failed") + ErrCommitTree = errors.New("git2c: commit tree lookup failed") + ErrInitRepoCreate = errors.New("git2c: init repo: create failed") + ErrInitRepoConfig = errors.New("git2c: init repo: open config failed") + ErrInitRepoSetHooksPath = errors.New("git2c: init repo: set core.hooksPath failed") + ErrInitRepoSetAdvertisePushOptions = errors.New("git2c: init repo: set receive.advertisePushOptions failed") + ErrInitRepoMkdir = errors.New("git2c: init repo: create directory failed") +) + +func Perror(errno uint64) error { + switch errno { + case 0: + return nil + case 3: + return ErrPath + case 4: + return ErrRevparse + case 5: + return ErrReadme + case 6: + return ErrBlobExpected + case 7: + return ErrEntryToObject + case 8: + return ErrBlobRawContent + case 9: + return ErrRevwalk + case 10: + return ErrRevwalkPushHead + case 11: + return ErrBareProto + case 12: + return ErrRefResolve + case 13: + return ErrBranches + case 14: + return ErrCommitLookup + case 15: + return ErrDiff + case 16: + return ErrMergeBaseNone + case 17: + return ErrMergeBase + case 18: + return ErrUpdateRef + case 19: + return ErrCommitCreate + case 20: + return ErrInitRepoCreate + case 21: + return ErrInitRepoConfig + case 22: + return ErrInitRepoSetHooksPath + case 23: + return ErrInitRepoSetAdvertisePushOptions + case 24: + return ErrInitRepoMkdir + } + return ErrUnknown +} diff --git a/forged/internal/ipc/git2c/tree.go b/forged/internal/ipc/git2c/tree.go new file mode 100644 index 0000000..f598e14 --- /dev/null +++ b/forged/internal/ipc/git2c/tree.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +package git2c + +import ( + "encoding/hex" + "fmt" +) + +type TreeEntryRaw struct { + Mode uint64 + Name string + OID string // hex +} + +func (c *Client) TreeListByOID(repoPath, treeHex string) ([]TreeEntryRaw, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return nil, fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(9); err != nil { + return nil, fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData([]byte(treeHex)); err != nil { + return nil, fmt.Errorf("sending tree oid failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return nil, Perror(status) + } + count, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading count failed: %w", err) + } + entries := make([]TreeEntryRaw, 0, count) + for range count { + mode, err := c.reader.ReadUint() + if err != nil { + return nil, fmt.Errorf("reading mode failed: %w", err) + } + name, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading name failed: %w", err) + } + id, err := c.reader.ReadData() + if err != nil { + return nil, fmt.Errorf("reading oid failed: %w", err) + } + entries = append(entries, TreeEntryRaw{Mode: mode, Name: string(name), OID: hex.EncodeToString(id)}) + } + return entries, nil +} + +func (c *Client) WriteTree(repoPath string, entries []TreeEntryRaw) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(10); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteUint(uint64(len(entries))); err != nil { + return "", fmt.Errorf("sending count failed: %w", err) + } + for _, e := range entries { + if err := c.writer.WriteUint(e.Mode); err != nil { + return "", fmt.Errorf("sending mode failed: %w", err) + } + if err := c.writer.WriteData([]byte(e.Name)); err != nil { + return "", fmt.Errorf("sending name failed: %w", err) + } + raw, err := hex.DecodeString(e.OID) + if err != nil { + return "", fmt.Errorf("decode oid hex: %w", err) + } + if err := c.writer.WriteDataFixed(raw); err != nil { + return "", fmt.Errorf("sending oid failed: %w", err) + } + } + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + id, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading oid failed: %w", err) + } + return hex.EncodeToString(id), nil +} + +func (c *Client) WriteBlob(repoPath string, content []byte) (string, error) { + if err := c.writer.WriteData([]byte(repoPath)); err != nil { + return "", fmt.Errorf("sending repo path failed: %w", err) + } + if err := c.writer.WriteUint(11); err != nil { + return "", fmt.Errorf("sending command failed: %w", err) + } + if err := c.writer.WriteData(content); err != nil { + return "", fmt.Errorf("sending blob content failed: %w", err) + } + status, err := c.reader.ReadUint() + if err != nil { + return "", fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return "", Perror(status) + } + id, err := c.reader.ReadData() + if err != nil { + return "", fmt.Errorf("reading oid failed: %w", err) + } + return hex.EncodeToString(id), nil +} diff --git a/forged/internal/server/server.go b/forged/internal/server/server.go new file mode 100644 index 0000000..39a6823 --- /dev/null +++ b/forged/internal/server/server.go @@ -0,0 +1,87 @@ +package server + +import ( + "context" + "fmt" + + "go.lindenii.runxiyu.org/forge/forged/internal/config" + "go.lindenii.runxiyu.org/forge/forged/internal/database" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/hooks" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/lmtp" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web" + "golang.org/x/sync/errgroup" +) + +type Server struct { + config config.Config + + database database.Database + hookServer *hooks.Server + lmtpServer *lmtp.Server + webServer *web.Server + sshServer *ssh.Server + + global global.Global +} + +func New(configPath string) (server *Server, err error) { + server = &Server{} //exhaustruct:ignore + + server.config, err = config.Open(configPath) + if err != nil { + return server, fmt.Errorf("open config: %w", err) + } + + queries := queries.New(&server.database) + + server.global.ForgeVersion = "unknown" // TODO + server.global.ForgeTitle = server.config.General.Title + server.global.Config = &server.config + server.global.Queries = queries + + server.hookServer = hooks.New(&server.global) + server.lmtpServer = lmtp.New(&server.global) + server.webServer = web.New(&server.global) + server.sshServer, err = ssh.New(&server.global) + if err != nil { + return server, fmt.Errorf("create SSH server: %w", err) + } + + return server, nil +} + +func (server *Server) Run(ctx context.Context) (err error) { + // TODO: Not running git2d because it should be run separately. + // This needs to be documented somewhere, hence a TODO here for now. + + g, gctx := errgroup.WithContext(ctx) + + server.database, err = database.Open(gctx, server.config.DB.Conn) + if err != nil { + return fmt.Errorf("open database: %w", err) + } + defer server.database.Close() + + // TODO: neater way to do this for transactions in querypool? + server.global.DB = &server.database + + g.Go(func() error { return server.hookServer.Run(gctx) }) + g.Go(func() error { return server.lmtpServer.Run(gctx) }) + g.Go(func() error { return server.webServer.Run(gctx) }) + g.Go(func() error { return server.sshServer.Run(gctx) }) + + err = g.Wait() + if err != nil { + return fmt.Errorf("server error: %w", err) + } + + err = ctx.Err() + if err != nil { + return fmt.Errorf("context exceeded: %w", err) + } + + return nil +} diff --git a/forged/main.go b/forged/main.go new file mode 100644 index 0000000..38e22ff --- /dev/null +++ b/forged/main.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +// The main entry point to the Lindenii Forge daemon. +package main + +import ( + "context" + "flag" + + "go.lindenii.runxiyu.org/forge/forged/internal/server" +) + +func main() { + configPath := flag.String( + "config", + "/etc/lindenii/forge.scfg", + "path to configuration file", + ) + flag.Parse() + + s, err := server.New(*configPath) + if err != nil { + panic(err) + } + + panic(s.Run(context.Background())) +} diff --git a/forged/sql/queries/groups.sql b/forged/sql/queries/groups.sql new file mode 100644 index 0000000..f067aeb --- /dev/null +++ b/forged/sql/queries/groups.sql @@ -0,0 +1,47 @@ +-- name: GetRootGroups :many +SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL; + +-- name: GetGroupByPath :one +WITH RECURSIVE group_path_cte AS ( + SELECT + id, + parent_group, + name, + 1 AS depth + FROM groups + WHERE name = ($1::text[])[1] + AND parent_group IS NULL + + UNION ALL + + SELECT + g.id, + g.parent_group, + g.name, + group_path_cte.depth + 1 + FROM groups g + JOIN group_path_cte ON g.parent_group = group_path_cte.id + WHERE g.name = ($1::text[])[group_path_cte.depth + 1] + AND group_path_cte.depth + 1 <= cardinality($1::text[]) +) +SELECT + g.id, + g.name, + g.parent_group, + COALESCE(g.description, '') AS description, + EXISTS ( + SELECT 1 + FROM user_group_roles ugr + WHERE ugr.user_id = $2 + AND ugr.group_id = g.id + ) AS has_role +FROM group_path_cte c +JOIN groups g ON g.id = c.id +WHERE c.depth = cardinality($1::text[]); + + +-- name: GetReposInGroup :many +SELECT name, COALESCE(description, '') FROM repos WHERE group_id = $1; + +-- name: GetSubgroups :many +SELECT name, COALESCE(description, '') FROM groups WHERE parent_group = $1; diff --git a/forged/sql/queries/login.sql b/forged/sql/queries/login.sql new file mode 100644 index 0000000..ffc4026 --- /dev/null +++ b/forged/sql/queries/login.sql @@ -0,0 +1,8 @@ +-- name: GetUserCreds :one +SELECT id, COALESCE(password_hash, '') FROM users WHERE username = $1; + +-- name: InsertSession :exec +INSERT INTO sessions (user_id, token_hash, expires_at) VALUES ($1, $2, $3); + +-- name: GetUserFromSession :one +SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.token_hash = $1; diff --git a/forged/sql/queries/repos.sql b/forged/sql/queries/repos.sql new file mode 100644 index 0000000..cacc5b8 --- /dev/null +++ b/forged/sql/queries/repos.sql @@ -0,0 +1,9 @@ +-- name: InsertRepo :one +INSERT INTO repos (group_id, name, description, contrib_requirements) +VALUES ($1, $2, $3, $4) +RETURNING id; + +-- name: GetRepoByGroupAndName :one +SELECT id, name, COALESCE(description, '') AS description +FROM repos +WHERE group_id = $1 AND name = $2; diff --git a/forged/sql/schema.sql b/forged/sql/schema.sql new file mode 100644 index 0000000..72327a9 --- /dev/null +++ b/forged/sql/schema.sql @@ -0,0 +1,221 @@ +-- SPDX-License-Identifier: AGPL-3.0-only +-- SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + +-- Currently, slugs accept arbitrary unicode text. We should +-- look into normalization options later. +-- May consider using citext and limiting it to safe characters. + +CREATE TABLE groups ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name TEXT NOT NULL, + parent_group BIGINT REFERENCES groups(id) ON DELETE RESTRICT, + description TEXT, + UNIQUE NULLS NOT DISTINCT (parent_group, name) +); +CREATE INDEX ggroups_parent_idx ON groups(parent_group); + +DO $$ BEGIN + CREATE TYPE contrib_requirement AS ENUM ('closed','registered_user','federated','ssh_pubkey','open'); + -- closed means only those with direct access; each layer adds that level of user +EXCEPTION WHEN duplicate_object THEN END $$; +CREATE TABLE repos ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, -- I mean, should be CASCADE but deleting Git repos on disk also needs to be considered + name TEXT NOT NULL, + description TEXT, + contrib_requirements contrib_requirement NOT NULL, + UNIQUE(group_id, name) + -- The filesystem path can be derived from the repo ID. + -- The config has repo_dir, then we can do repo_dir/<id>.git +); +CREATE INDEX grepos_group_idx ON repos(group_id); + +CREATE TABLE mailing_lists ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, + name TEXT NOT NULL, + description TEXT, + UNIQUE(group_id, name) +); +CREATE INDEX gmailing_lists_group_idx ON mailing_lists(group_id); + +CREATE TABLE mailing_list_emails ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + list_id BIGINT NOT NULL REFERENCES mailing_lists(id) ON DELETE CASCADE, + title TEXT NOT NULL, + sender TEXT NOT NULL, + date TIMESTAMPTZ NOT NULL, -- everything must be in UTC + message_id TEXT, -- no uniqueness guarantee as it's arbitrarily set by senders + content BYTEA NOT NULL +); + +DO $$ BEGIN + CREATE TYPE user_type AS ENUM ('pubkey_only','federated','registered','admin'); +EXCEPTION WHEN duplicate_object THEN END $$; +CREATE TABLE users ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + CONSTRAINT id_positive CHECK (id > 0), + username TEXT UNIQUE, -- NULL when, for example, pubkey_only + type user_type NOT NULL, + password_hash TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TABLE ssh_public_keys ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + key_string TEXT NOT NULL, + CONSTRAINT unique_key_string EXCLUDE USING HASH (key_string WITH =) -- because apparently some haxxor like using rsa16384 keys which are too long for a simple UNIQUE constraint :D +); +CREATE INDEX gssh_keys_user_idx ON ssh_public_keys(user_id); + +CREATE TABLE sessions ( + session_id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + token_hash BYTEA UNIQUE NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + expires_at TIMESTAMPTZ NOT NULL +); +CREATE INDEX gsessions_user_idx ON sessions(user_id); + +DO $$ BEGIN + CREATE TYPE group_role AS ENUM ('owner'); -- just owner for now, might need to rethink ACL altogether later; might consider using a join table if we need it to be dynamic, but enum suffices for now +EXCEPTION WHEN duplicate_object THEN END $$; +CREATE TABLE user_group_roles ( + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + role group_role NOT NULL, + PRIMARY KEY(user_id, group_id) +); +CREATE INDEX gugr_group_idx ON user_group_roles(group_id); + +CREATE TABLE federated_identities ( + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + service TEXT NOT NULL, -- might need to constrain + remote_username TEXT NOT NULL, + PRIMARY KEY(user_id, service), + UNIQUE(service, remote_username) +); + +CREATE TABLE ticket_trackers ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, + name TEXT NOT NULL, + description TEXT, + UNIQUE(group_id, name) +); + +CREATE TABLE tickets ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + tracker_id BIGINT NOT NULL REFERENCES ticket_trackers(id) ON DELETE CASCADE, + tracker_local_id BIGINT NOT NULL, + title TEXT NOT NULL, + description TEXT, + UNIQUE(tracker_id, tracker_local_id) +); + +CREATE FUNCTION create_tracker_ticket_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('tracker_ticket_seq_%s', NEW.id); +BEGIN + EXECUTE format('CREATE SEQUENCE g%I', seq_name); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE FUNCTION drop_tracker_ticket_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('tracker_ticket_seq_%s', OLD.id); +BEGIN + EXECUTE format('DROP SEQUENCE IF EXISTS %I', seq_name); + RETURN OLD; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER after_insert_ticket_tracker +AFTER INSERT ON ticket_trackers +FOR EACH ROW +EXECUTE FUNCTION create_tracker_ticket_sequence(); +CREATE TRIGGER before_delete_ticket_tracker +BEFORE DELETE ON ticket_trackers +FOR EACH ROW +EXECUTE FUNCTION drop_tracker_ticket_sequence(); +CREATE FUNCTION assign_tracker_local_id() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('tracker_ticket_seq_%s', NEW.tracker_id); +BEGIN + IF NEW.tracker_local_id IS NULL THEN + EXECUTE format('SELECT nextval(%L)', seq_name) INTO NEW.tracker_local_id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER before_insert_ticket +BEFORE INSERT ON tickets +FOR EACH ROW +EXECUTE FUNCTION assign_tracker_local_id(); +CREATE INDEX gtickets_tracker_idx ON tickets(tracker_id); + +DO $$ BEGIN + CREATE TYPE mr_status AS ENUM ('open','merged','closed'); +EXCEPTION WHEN duplicate_object THEN END $$; + +CREATE TABLE merge_requests ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + repo_id BIGINT NOT NULL REFERENCES repos(id) ON DELETE CASCADE, + repo_local_id BIGINT NOT NULL, + title TEXT NOT NULL, + creator BIGINT REFERENCES users(id) ON DELETE SET NULL, + source_repo BIGINT NOT NULL REFERENCES repos(id) ON DELETE RESTRICT, + source_ref TEXT NOT NULL, + destination_branch TEXT, + status mr_status NOT NULL, + UNIQUE (repo_id, repo_local_id) +); +CREATE UNIQUE INDEX gmr_open_src_dst_uniq + ON merge_requests (repo_id, source_repo, source_ref, coalesce(destination_branch, '')) + WHERE status = 'open'; +CREATE INDEX gmr_repo_idx ON merge_requests(repo_id); +CREATE INDEX gmr_creator_idx ON merge_requests(creator); +CREATE FUNCTION create_repo_mr_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('repo_mr_seq_%s', NEW.id); +BEGIN + EXECUTE format('CREATE SEQUENCE g%I', seq_name); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE FUNCTION drop_repo_mr_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('repo_mr_seq_%s', OLD.id); +BEGIN + EXECUTE format('DROP SEQUENCE IF EXISTS %I', seq_name); + RETURN OLD; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER after_insert_repo +AFTER INSERT ON repos +FOR EACH ROW +EXECUTE FUNCTION create_repo_mr_sequence(); +CREATE TRIGGER before_delete_repo +BEFORE DELETE ON repos +FOR EACH ROW +EXECUTE FUNCTION drop_repo_mr_sequence(); +CREATE FUNCTION assign_repo_local_id() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('repo_mr_seq_%s', NEW.repo_id); +BEGIN + IF NEW.repo_local_id IS NULL THEN + EXECUTE format('SELECT nextval(%L)', seq_name) INTO NEW.repo_local_id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER before_insert_merge_request +BEFORE INSERT ON merge_requests +FOR EACH ROW +EXECUTE FUNCTION assign_repo_local_id(); diff --git a/forged/sqlc.yaml b/forged/sqlc.yaml new file mode 100644 index 0000000..2b6e035 --- /dev/null +++ b/forged/sqlc.yaml @@ -0,0 +1,15 @@ +version: "2" +sql: + - engine: "postgresql" + schema: "sql/schema.sql" + queries: "sql/queries" + gen: + go: + package: "queries" + out: "internal/database/queries" + sql_package: "pgx/v5" + emit_json_tags: true + emit_db_tags: true + emit_prepared_queries: true + emit_pointers_for_null_types: true + emit_enum_valid_method: true diff --git a/forged/static/.gitignore b/forged/static/.gitignore new file mode 100644 index 0000000..812b75f --- /dev/null +++ b/forged/static/.gitignore @@ -0,0 +1,2 @@ +/index.html +# used for testing css without recompiling the server diff --git a/forged/static/chroma.css b/forged/static/chroma.css new file mode 100644 index 0000000..1f7219a --- /dev/null +++ b/forged/static/chroma.css @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: MIT AND BSD-2-Clause + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 Pygments and Chroma authors + */ + +@media (prefers-color-scheme: light) { + /* Background */ .bg { ; } + /* PreWrapper */ .chroma { ; } + /* Error */ .chroma .err { } + /* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit } + /* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; } + /* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; } + /* LineHighlight */ .chroma .hl { background-color: #e5e5e5 } + /* LineNumbersTable */ .chroma .lnt { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f } + /* LineNumbers */ .chroma .ln { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f } + /* Line */ .chroma .line { display: flex; } + /* Keyword */ .chroma .k { color: #008000; font-weight: bold } + /* KeywordConstant */ .chroma .kc { color: #008000; font-weight: bold } + /* KeywordDeclaration */ .chroma .kd { color: #008000; font-weight: bold } + /* KeywordNamespace */ .chroma .kn { color: #008000; font-weight: bold } + /* KeywordPseudo */ .chroma .kp { color: #008000 } + /* KeywordReserved */ .chroma .kr { color: #008000; font-weight: bold } + /* KeywordType */ .chroma .kt { color: #b00040 } + /* NameAttribute */ .chroma .na { color: #7d9029 } + /* NameBuiltin */ .chroma .nb { color: #008000 } + /* NameClass */ .chroma .nc { color: #0000ff; font-weight: bold } + /* NameConstant */ .chroma .no { color: #880000 } + /* NameDecorator */ .chroma .nd { color: #aa22ff } + /* NameEntity */ .chroma .ni { color: #999999; font-weight: bold } + /* NameException */ .chroma .ne { color: #d2413a; font-weight: bold } + /* NameFunction */ .chroma .nf { color: #0000ff } + /* NameLabel */ .chroma .nl { color: #a0a000 } + /* NameNamespace */ .chroma .nn { color: #0000ff; font-weight: bold } + /* NameTag */ .chroma .nt { color: #008000; font-weight: bold } + /* NameVariable */ .chroma .nv { color: #19177c } + /* LiteralString */ .chroma .s { color: #ba2121 } + /* LiteralStringAffix */ .chroma .sa { color: #ba2121 } + /* LiteralStringBacktick */ .chroma .sb { color: #ba2121 } + /* LiteralStringChar */ .chroma .sc { color: #ba2121 } + /* LiteralStringDelimiter */ .chroma .dl { color: #ba2121 } + /* LiteralStringDoc */ .chroma .sd { color: #ba2121; font-style: italic } + /* LiteralStringDouble */ .chroma .s2 { color: #ba2121 } + /* LiteralStringEscape */ .chroma .se { color: #bb6622; font-weight: bold } + /* LiteralStringHeredoc */ .chroma .sh { color: #ba2121 } + /* LiteralStringInterpol */ .chroma .si { color: #bb6688; font-weight: bold } + /* LiteralStringOther */ .chroma .sx { color: #008000 } + /* LiteralStringRegex */ .chroma .sr { color: #bb6688 } + /* LiteralStringSingle */ .chroma .s1 { color: #ba2121 } + /* LiteralStringSymbol */ .chroma .ss { color: #19177c } + /* LiteralNumber */ .chroma .m { color: #666666 } + /* LiteralNumberBin */ .chroma .mb { color: #666666 } + /* LiteralNumberFloat */ .chroma .mf { color: #666666 } + /* LiteralNumberHex */ .chroma .mh { color: #666666 } + /* LiteralNumberInteger */ .chroma .mi { color: #666666 } + /* LiteralNumberIntegerLong */ .chroma .il { color: #666666 } + /* LiteralNumberOct */ .chroma .mo { color: #666666 } + /* Operator */ .chroma .o { color: #666666 } + /* OperatorWord */ .chroma .ow { color: #aa22ff; font-weight: bold } + /* Comment */ .chroma .c { color: #408080; font-style: italic } + /* CommentHashbang */ .chroma .ch { color: #408080; font-style: italic } + /* CommentMultiline */ .chroma .cm { color: #408080; font-style: italic } + /* CommentSingle */ .chroma .c1 { color: #408080; font-style: italic } + /* CommentSpecial */ .chroma .cs { color: #408080; font-style: italic } + /* CommentPreproc */ .chroma .cp { color: #bc7a00 } + /* CommentPreprocFile */ .chroma .cpf { color: #bc7a00 } + /* GenericDeleted */ .chroma .gd { color: #a00000 } + /* GenericEmph */ .chroma .ge { font-style: italic } + /* GenericError */ .chroma .gr { color: #ff0000 } + /* GenericHeading */ .chroma .gh { color: #000080; font-weight: bold } + /* GenericInserted */ .chroma .gi { color: #00a000 } + /* GenericOutput */ .chroma .go { color: #888888 } + /* GenericPrompt */ .chroma .gp { color: #000080; font-weight: bold } + /* GenericStrong */ .chroma .gs { font-weight: bold } + /* GenericSubheading */ .chroma .gu { color: #800080; font-weight: bold } + /* GenericTraceback */ .chroma .gt { color: #0044dd } + /* GenericUnderline */ .chroma .gl { text-decoration: underline } + /* TextWhitespace */ .chroma .w { color: #bbbbbb } +} +@media (prefers-color-scheme: dark) { + /* Background */ .bg { color: #e6edf3; background-color: #000000; } + /* PreWrapper */ .chroma { color: #e6edf3; background-color: #000000; } + /* Error */ .chroma .err { color: #f85149 } + /* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit } + /* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; } + /* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; } + /* LineHighlight */ .chroma .hl { background-color: #6e7681 } + /* LineNumbersTable */ .chroma .lnt { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #737679 } + /* LineNumbers */ .chroma .ln { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #6e7681 } + /* Line */ .chroma .line { display: flex; } + /* Keyword */ .chroma .k { color: #ff7b72 } + /* KeywordConstant */ .chroma .kc { color: #79c0ff } + /* KeywordDeclaration */ .chroma .kd { color: #ff7b72 } + /* KeywordNamespace */ .chroma .kn { color: #ff7b72 } + /* KeywordPseudo */ .chroma .kp { color: #79c0ff } + /* KeywordReserved */ .chroma .kr { color: #ff7b72 } + /* KeywordType */ .chroma .kt { color: #ff7b72 } + /* NameClass */ .chroma .nc { color: #f0883e; font-weight: bold } + /* NameConstant */ .chroma .no { color: #79c0ff; font-weight: bold } + /* NameDecorator */ .chroma .nd { color: #d2a8ff; font-weight: bold } + /* NameEntity */ .chroma .ni { color: #ffa657 } + /* NameException */ .chroma .ne { color: #f0883e; font-weight: bold } + /* NameFunction */ .chroma .nf { color: #d2a8ff; font-weight: bold } + /* NameLabel */ .chroma .nl { color: #79c0ff; font-weight: bold } + /* NameNamespace */ .chroma .nn { color: #ff7b72 } + /* NameProperty */ .chroma .py { color: #79c0ff } + /* NameTag */ .chroma .nt { color: #7ee787 } + /* NameVariable */ .chroma .nv { color: #79c0ff } + /* Literal */ .chroma .l { color: #a5d6ff } + /* LiteralDate */ .chroma .ld { color: #79c0ff } + /* LiteralString */ .chroma .s { color: #a5d6ff } + /* LiteralStringAffix */ .chroma .sa { color: #79c0ff } + /* LiteralStringBacktick */ .chroma .sb { color: #a5d6ff } + /* LiteralStringChar */ .chroma .sc { color: #a5d6ff } + /* LiteralStringDelimiter */ .chroma .dl { color: #79c0ff } + /* LiteralStringDoc */ .chroma .sd { color: #a5d6ff } + /* LiteralStringDouble */ .chroma .s2 { color: #a5d6ff } + /* LiteralStringEscape */ .chroma .se { color: #79c0ff } + /* LiteralStringHeredoc */ .chroma .sh { color: #79c0ff } + /* LiteralStringInterpol */ .chroma .si { color: #a5d6ff } + /* LiteralStringOther */ .chroma .sx { color: #a5d6ff } + /* LiteralStringRegex */ .chroma .sr { color: #79c0ff } + /* LiteralStringSingle */ .chroma .s1 { color: #a5d6ff } + /* LiteralStringSymbol */ .chroma .ss { color: #a5d6ff } + /* LiteralNumber */ .chroma .m { color: #a5d6ff } + /* LiteralNumberBin */ .chroma .mb { color: #a5d6ff } + /* LiteralNumberFloat */ .chroma .mf { color: #a5d6ff } + /* LiteralNumberHex */ .chroma .mh { color: #a5d6ff } + /* LiteralNumberInteger */ .chroma .mi { color: #a5d6ff } + /* LiteralNumberIntegerLong */ .chroma .il { color: #a5d6ff } + /* LiteralNumberOct */ .chroma .mo { color: #a5d6ff } + /* Operator */ .chroma .o { color: #ff7b72; font-weight: bold } + /* OperatorWord */ .chroma .ow { color: #ff7b72; font-weight: bold } + /* Comment */ .chroma .c { color: #8b949e; font-style: italic } + /* CommentHashbang */ .chroma .ch { color: #8b949e; font-style: italic } + /* CommentMultiline */ .chroma .cm { color: #8b949e; font-style: italic } + /* CommentSingle */ .chroma .c1 { color: #8b949e; font-style: italic } + /* CommentSpecial */ .chroma .cs { color: #8b949e; font-weight: bold; font-style: italic } + /* CommentPreproc */ .chroma .cp { color: #8b949e; font-weight: bold; font-style: italic } + /* CommentPreprocFile */ .chroma .cpf { color: #8b949e; font-weight: bold; font-style: italic } + /* GenericDeleted */ .chroma .gd { color: #ffa198; background-color: #490202 } + /* GenericEmph */ .chroma .ge { font-style: italic } + /* GenericError */ .chroma .gr { color: #ffa198 } + /* GenericHeading */ .chroma .gh { color: #79c0ff; font-weight: bold } + /* GenericInserted */ .chroma .gi { color: #56d364; background-color: #0f5323 } + /* GenericOutput */ .chroma .go { color: #8b949e } + /* GenericPrompt */ .chroma .gp { color: #8b949e } + /* GenericStrong */ .chroma .gs { font-weight: bold } + /* GenericSubheading */ .chroma .gu { color: #79c0ff } + /* GenericTraceback */ .chroma .gt { color: #ff7b72 } + /* GenericUnderline */ .chroma .gl { text-decoration: underline } + /* TextWhitespace */ .chroma .w { color: #6e7681 } +} diff --git a/static/style.css b/forged/static/style.css index e5398ce..f70fe69 100644 --- a/static/style.css +++ b/forged/static/style.css @@ -1,42 +1,50 @@ /* * SPDX-License-Identifier: AGPL-3.0-only - * SPDX-FileContributor: Runxi Yu <https://runxiyu.org> - * SPDX-FileContributor: luk3yx <https://luk3yx.github.io> + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + * SPDX-FileCopyrightText: Copyright (c) 2025 luk3yx <https://luk3yx.github.io> + * SPDX-FileCopyrightText: Copyright (c) 2017-2025 Drew DeVault <https://drewdevault.com> + * + * Drew did not directly contribute here but we took significant portions of + * SourceHut's CSS. */ +* { + box-sizing: border-box; +} + /* Base styles and variables */ html { font-family: sans-serif; background-color: var(--background-color); color: var(--text-color); - --radius-1: 0.32rem; - --background-color: hsl(0, 0%, 100%); - --text-color: hsl(0, 0%, 0%); - --link-color: hsl(320, 50%, 36%); - --light-text-color: hsl(0, 0%, 45%); - --darker-border-color: hsl(0, 0%, 72%); - --lighter-border-color: hsl(0, 0%, 85%); - --text-decoration-color: hsl(0, 0%, 72%); - --darker-box-background-color: hsl(0, 0%, 92%); - --lighter-box-background-color: hsl(0, 0%, 95%); - --primary-color: hsl(320, 50%, 36%); - --primary-color-contrast: hsl(320, 0%, 100%); - --danger-color: #ff0000; - --danger-color-contrast: #ffffff; + font-size: 1rem; + --background-color: oklch(1.000 0.000 0.0); + --text-color: oklch(0.000 0.000 0.0); + --link-color: oklch(0.457 0.143 343.4); + --light-text-color: oklch(0.555 0.000 0.0); + --darker-border-color: oklch(0.781 0.000 0.0); + --lighter-border-color: oklch(0.885 0.000 0.0); + --text-decoration-color: oklch(0.781 0.000 0.0); + --darker-box-background-color: oklch(0.939 0.000 0.0); + --lighter-box-background-color: oklch(0.962 0.000 0.0); + --primary-color: oklch(0.457 0.143 343.4); + --primary-color-contrast: oklch(1.000 0.000 0.0); + --danger-color: oklch(0.628 0.258 29.2); + --danger-color-contrast: oklch(1.000 0.000 0.0); } /* Dark mode overrides */ @media (prefers-color-scheme: dark) { html { - --background-color: hsl(0, 0%, 0%); - --text-color: hsl(0, 0%, 100%); - --link-color: hsl(320, 50%, 76%); - --light-text-color: hsl(0, 0%, 78%); - --darker-border-color: hsl(0, 0%, 35%); - --lighter-border-color: hsl(0, 0%, 25%); - --text-decoration-color: hsl(0, 0%, 30%); - --darker-box-background-color: hsl(0, 0%, 20%); - --lighter-box-background-color: hsl(0, 0%, 15%); + --background-color: oklch(0.000 0.000 0.0); + --text-color: oklch(1.000 0.000 0.0); + --link-color: oklch(0.786 0.089 339.4); + --light-text-color: oklch(0.829 0.000 0.0); + --darker-border-color: oklch(0.465 0.000 0.0); + --lighter-border-color: oklch(0.371 0.000 0.0); + --text-decoration-color: oklch(0.598 0.000 0.0); + --darker-box-background-color: oklch(0.321 0.000 0.0); + --lighter-box-background-color: oklch(0.270 0.000 0.0); } } @@ -76,46 +84,8 @@ html, code, pre { display: table-row-group; } -table.rounded, table.rounded-footed { - overflow: hidden; - border-spacing: 0; - border-collapse: separate; - border-radius: var(--radius-1); - border: var(--lighter-border-color) solid 1px; -} - -table.rounded th, table.rounded td, -table.rounded-footed th, table.rounded-footed td { - border: none; -} - -table.rounded th:not(:last-child), -table.rounded td:not(:last-child), -table.rounded-footed th:not(:last-child), -table.rounded-footed td:not(:last-child) { - border-right: var(--lighter-border-color) solid 1px; -} - -table.rounded>thead>tr>th, -table.rounded>thead>tr>td, -table.rounded>tbody>tr:not(:last-child)>th, -table.rounded>tbody>tr:not(:last-child)>td { - border-bottom: var(--lighter-border-color) solid 1px; -} - -table.rounded-footed>thead>tr>th, -table.rounded-footed>thead>tr>td, -table.rounded-footed>tbody>tr>th, -table.rounded-footed>tbody>tr>td, -table.rounded-footed>tfoot>tr:not(:last-child)>th, -table.rounded-footed>tfoot>tr:not(:last-child)>td { - border-bottom: var(--lighter-border-color) solid 1px; -} - - /* Footer styles */ footer { - margin-top: 1rem; margin-left: auto; margin-right: auto; display: block; @@ -128,14 +98,23 @@ footer a:link, footer a:visited { color: inherit; } -/* Padding containers */ -.padding-wrapper { - margin: 1rem auto; - max-width: 60rem; - padding: 0 5px; -} .padding { - padding: 0 5px; + padding: 0 1rem; +} + +/* Sticky footer */ +body { + position: relative; + min-height: 100vh; +} +main { + padding-bottom: 2.5rem; +} +footer { + position: absolute; + bottom: 0; + width: 100%; + height: 2rem; } /* Link styles */ @@ -303,7 +282,6 @@ textarea, input[type=text], input[type=password] { font-family: sans-serif; - font-size: smaller; background-color: var(--lighter-box-background-color); color: var(--text-color); border: none; @@ -326,6 +304,7 @@ th.tdinput input[type=password] { td.tdinput select { position: absolute; background-color: var(--background-color); + color: var(--text-color); border: none; /* width: 100%; @@ -372,9 +351,7 @@ input[type=file]::file-selector-button { display: inline-block; width: auto; min-width: fit-content; - border-radius: var(--radius-1); padding: .1rem .75rem; - font-size: 0.9rem; transition: background .1s linear; cursor: pointer; } @@ -384,18 +361,50 @@ a.btn, a.btn-white, a.btn-danger, a.btn-normal, a.btn-primary { /* Header layout */ header#main-header { - background-color: var(--lighter-box-background-color); + /* background-color: var(--lighter-box-background-color); */ display: flex; + flex-direction: row; + align-items: center; justify-content: space-between; + flex-wrap: wrap; + padding-top: 1rem; + padding-bottom: 1rem; + gap: 0.5rem; +} +#main-header a, #main-header a:link, main-header a:visited { + text-decoration: none; + color: inherit; +} +#main-header-forge-title { + white-space: nowrap; +} +#breadcrumb-nav { + display: flex; align-items: center; - padding: 10px; + flex: 1 1 auto; + min-width: 0; + overflow-x: auto; + gap: 0.25rem; + white-space: nowrap; } -header#main-header > div#main-header-forge-title { - flex-grow: 1; +.breadcrumb-separator { + margin: 0 0.25rem; } -header#main-header > div#main-header-user { +#main-header-user { display: flex; align-items: center; + white-space: nowrap; +} +@media (max-width: 37.5rem) { + header#main-header { + flex-direction: column; + align-items: flex-start; + } + + #breadcrumb-nav { + width: 100%; + overflow-x: auto; + } } /* Uncategorized */ @@ -408,3 +417,215 @@ td > ul { margin-top: 0; margin-bottom: 0; } + + + +.complete-error-page hr { + border: 0; + border-bottom: 1px dashed; +} + + + + + + +.key-val-grid { + display: grid; + grid-template-columns: auto 1fr; + gap: 0; + border: var(--lighter-border-color) 1px solid; + overflow: auto; +} + +.key-val-grid > .title-row { + grid-column: 1 / -1; + background-color: var(--lighter-box-background-color); + font-weight: bold; + padding: 3px 5px; + border-bottom: var(--lighter-border-color) 1px solid; +} + +.key-val-grid > .row-label { + background-color: var(--lighter-box-background-color); + padding: 3px 5px; + border-bottom: var(--lighter-border-color) 1px solid; + border-right: var(--lighter-border-color) 1px solid; + text-align: left; + font-weight: normal; +} + +.key-val-grid > .row-value { + padding: 3px 5px; + border-bottom: var(--lighter-border-color) 1px solid; + word-break: break-word; +} + +.key-val-grid code { + font-family: monospace; +} + +.key-val-grid ul { + margin: 0; + padding-left: 1.5rem; +} + +.key-val-grid > .row-label:nth-last-of-type(2), +.key-val-grid > .row-value:last-of-type { + border-bottom: none; +} + +@media (max-width: 37.5rem) { + .key-val-grid { + grid-template-columns: 1fr; + } + + .key-val-grid > .row-label { + border-right: none; + } +} +.key-val-grid > .title-row { + grid-column: 1 / -1; + background-color: var(--lighter-box-background-color); + font-weight: bold; + padding: 3px 5px; + border-bottom: var(--lighter-border-color) 1px solid; + margin: 0; + text-align: center; +} + +.key-val-grid-wrapper { + max-width: 100%; + width: fit-content; +} + +/* Tab navigation */ + +.nav-tabs-standalone { + border: none; + list-style: none; + margin: 0; + flex-grow: 1; + display: inline-flex; + flex-wrap: nowrap; + padding: 0; + border-bottom: 0.25rem var(--darker-box-background-color) solid; + width: 100%; + max-width: 100%; + min-width: 100%; +} + +.nav-tabs-standalone > li { + align-self: flex-end; +} +.nav-tabs-standalone > li > a { + padding: 0 0.75rem; +} + +.nav-item a.active { + background-color: var(--darker-box-background-color); +} + +.nav-item a, .nav-item a:link, .nav-item a:visited { + text-decoration: none; + color: inherit; +} + +.repo-header-extension { + margin-bottom: 1rem; + background-color: var(--darker-box-background-color); +} + +.repo-header > h2 { + display: inline; + margin: 0; + padding-right: 1rem; +} + +.repo-header > .nav-tabs-standalone { + border: none; + margin: 0; + flex-grow: 1; + display: inline-flex; + flex-wrap: nowrap; + padding: 0; +} + +.repo-header { + display: flex; + flex-wrap: nowrap; +} + +.repo-header-extension-content { + padding-top: 0.3rem; + padding-bottom: 0.2rem; +} + +.repo-header, .padding-wrapper, .repo-header-extension-content, #main-header, .readingwidth, .commit-list-small { + padding-left: 1rem; + padding-right: 1rem; + max-width: 60rem; + width: 100%; + margin-left: auto; + margin-right: auto; +} + +.padding-wrapper { + margin-bottom: 1rem; +} + +/* TODO */ + +.commit-list-small .event { + background-color: var(--lighter-box-background-color); + padding: 0.5rem; + margin-bottom: 1rem; + max-width: 30rem; +} + +.commit-list-small .event:last-child { + margin-bottom: 1rem; +} + +.commit-list-small a { + color: var(--link-color); + text-decoration: none; + font-weight: 500; +} + +.commit-list-small a:hover { + text-decoration: underline; + text-decoration-color: var(--text-decoration-color); +} + +.commit-list-small .event > div { + font-size: 0.95rem; +} + +.commit-list-small .pull-right { + float: right; + font-size: 0.85em; + margin-left: 1rem; +} + +.commit-list-small pre.commit { + margin: 0.25rem 0 0 0; + padding: 0; + font-family: inherit; + font-size: 0.95rem; + color: var(--text-color); + white-space: pre-wrap; +} + +.commit-list-small .commit-error { + color: var(--danger-color); + font-weight: bold; + margin-top: 1rem; +} + + +.breakable { + word-break: break-word; + /* overflow-wrap: break-word; + overflow: hidden; */ +} diff --git a/forged/templates/400.tmpl b/forged/templates/400.tmpl new file mode 100644 index 0000000..5bb2185 --- /dev/null +++ b/forged/templates/400.tmpl @@ -0,0 +1,25 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "400" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>400 Bad Request – {{ .global.forge_title }}</title> + </head> + <body class="400"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>400 Bad Request</h1> + <p>{{- .complete_error_msg -}}</p> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/400_colon.tmpl b/forged/templates/400_colon.tmpl new file mode 100644 index 0000000..6509c0f --- /dev/null +++ b/forged/templates/400_colon.tmpl @@ -0,0 +1,26 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "400_colon" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>400 Bad Request – {{ .global.forge_title }}</title> + </head> + <body class="400-colon"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>400 Bad Request</h1> + <p>We recently switched URL schemes. Previously “<code>:</code>” was used as our URL group separator, but because OpenSMTPD does not implement local-part address quoting properly, we’re unable to include “<code>:</code>” in URLs properly, hence we use “<code>-</code>” now.</p> + <p>As a precaution in case visitors get confused, this page was set up. <strong>You should probably replace the “<code>:</code>”s with “<code>-</code>”s in the URL bar.</strong> If there are colons in the URL that <em>is not</em> the group separator—that’s an edge case that we’ll fix later.</p> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/403.tmpl b/forged/templates/403.tmpl new file mode 100644 index 0000000..5090c60 --- /dev/null +++ b/forged/templates/403.tmpl @@ -0,0 +1,25 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "403" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>403 Forbidden – {{ .global.forge_title }}</title> + </head> + <body class="403"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>403 Forbidden</h1> + <p>{{- .complete_error_msg -}}</p> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/404.tmpl b/forged/templates/404.tmpl new file mode 100644 index 0000000..10ee1b3 --- /dev/null +++ b/forged/templates/404.tmpl @@ -0,0 +1,24 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "404" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>404 Not Found – {{ .global.forge_title }}</title> + </head> + <body class="404"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>404 Not Found</h1> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/451.tmpl b/forged/templates/451.tmpl new file mode 100644 index 0000000..962fd90 --- /dev/null +++ b/forged/templates/451.tmpl @@ -0,0 +1,25 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "451" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>451 Unavailable For Legal Reasons – {{ .global.forge_title }}</title> + </head> + <body class="451"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>451 Unavailable For Legal Reasons</h1> + <p>{{- .complete_error_msg -}}</p> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/500.tmpl b/forged/templates/500.tmpl new file mode 100644 index 0000000..38603d5 --- /dev/null +++ b/forged/templates/500.tmpl @@ -0,0 +1,25 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "500" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>500 Internal Server Error – {{ .global.forge_title }}</title> + </head> + <body class="500"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>500 Internal Server Error</h1> + <p>{{- .complete_error_msg -}}</p> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/501.tmpl b/forged/templates/501.tmpl new file mode 100644 index 0000000..f17b62e --- /dev/null +++ b/forged/templates/501.tmpl @@ -0,0 +1,24 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "501" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>501 Not Implemented – {{ .global.forge_title }}</title> + </head> + <body class="501"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper complete-error-page"> + <h1>501 Not Implemented</h1> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/_footer.tmpl b/forged/templates/_footer.tmpl new file mode 100644 index 0000000..11e2365 --- /dev/null +++ b/forged/templates/_footer.tmpl @@ -0,0 +1,11 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "footer" -}} +<a href="https://lindenii.runxiyu.org/forge/">Lindenii Forge</a> +{{ .BaseData.Global.ForgeVersion }} +(<a href="https://forge.lindenii.runxiyu.org/forge/-/repos/server/">upstream</a>, +<a href="/-/source/LICENSE">license</a>, +<a href="https://webirc.runxiyu.org/kiwiirc/#lindenii">support</a>) +{{- end -}} diff --git a/forged/templates/_group_path.tmpl b/forged/templates/_group_path.tmpl new file mode 100644 index 0000000..f5d3bf8 --- /dev/null +++ b/forged/templates/_group_path.tmpl @@ -0,0 +1,8 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "group_path_plain" -}} +{{- $p := . -}} +{{- range $i, $s := . -}}{{- $s -}}{{- if ne $i (minus (len $p) 1) -}}/{{- end -}}{{- end -}} +{{- end -}} diff --git a/forged/templates/_group_view.tmpl b/forged/templates/_group_view.tmpl new file mode 100644 index 0000000..de5d45d --- /dev/null +++ b/forged/templates/_group_view.tmpl @@ -0,0 +1,56 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "group_view" -}} +{{- if .Subgroups -}} + <table class="wide"> + <thead> + <tr> + <th colspan="2" class="title-row">Subgroups</th> + </tr> + <tr> + <th scope="col">Name</th> + <th scope="col">Description</th> + </tr> + </thead> + <tbody> + {{- range .Subgroups -}} + <tr> + <td> + <a href="{{- .Name | path_escape -}}/">{{- .Name -}}</a> + </td> + <td> + {{- .Description -}} + </td> + </tr> + {{- end -}} + </tbody> + </table> +{{- end -}} +{{- if .Repos -}} +<table class="wide"> + <thead> + <tr> + <th colspan="2" class="title-row">Repos</th> + <tr> + <th scope="col">Name</th> + <th scope="col">Description</th> + </tr> + </tr> + </thead> + <tbody> + {{- range .Repos -}} + <tr> + <td> + <a href="-/repos/{{- .Name | path_escape -}}/">{{- .Name -}}</a> + </td> + <td> + {{- .Description -}} + </td> + </tr> + {{- end -}} + </tbody> +</table> +{{- end -}} +{{- end -}} diff --git a/forged/templates/_head.tmpl b/forged/templates/_head.tmpl new file mode 100644 index 0000000..d6d6571 --- /dev/null +++ b/forged/templates/_head.tmpl @@ -0,0 +1,9 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "head_common" -}} +<meta charset="utf-8" /> +<meta name="viewport" content="width=device-width, initial-scale=1" /> +<link rel="stylesheet" href="/-/static/style.css" /> +{{- end -}} diff --git a/forged/templates/_header.tmpl b/forged/templates/_header.tmpl new file mode 100644 index 0000000..39d3491 --- /dev/null +++ b/forged/templates/_header.tmpl @@ -0,0 +1,35 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "header" -}} +<header id="main-header"> + <div id="main-header-forge-title"> + <a href="/">{{- .BaseData.Global.ForgeTitle -}}</a> + </div> + <nav id="breadcrumb-nav"> + {{- $path := "" -}} + {{- $url_segments := .BaseData.URLSegments -}} + {{- $dir_mode := .BaseData.DirMode -}} + {{- $ref_type := .BaseData.RefType -}} + {{- $ref := .BaseData.RefName -}} + {{- $separator_index := .BaseData.SeparatorIndex -}} + {{- if eq $separator_index -1 -}} + {{- $separator_index = len $url_segments -}} + {{- end -}} + {{- range $i := $separator_index -}} + {{- $segment := index $url_segments $i -}} + {{- $path = printf "%s/%s" $path $segment -}} + <span class="breadcrumb-separator">/</span> + <a href="{{ $path }}{{ if or (ne $i (minus (len $url_segments) 1)) $dir_mode }}/{{ end }}{{- if $ref_type -}}?{{- $ref_type -}}={{- $ref -}}{{- end -}}">{{ $segment }}</a> + {{- end -}} + </nav> + <div id="main-header-user"> + {{- if ne .BaseData.UserID "" -}} + <a href="/-/users/{{- .BaseData.UserID -}}/">{{- .BaseData.Username -}}</a> + {{- else -}} + <a href="/-/login/">Login</a> + {{- end -}} + </div> +</header> +{{- end -}} diff --git a/forged/templates/_ref_query.tmpl b/forged/templates/_ref_query.tmpl new file mode 100644 index 0000000..2f78955 --- /dev/null +++ b/forged/templates/_ref_query.tmpl @@ -0,0 +1,3 @@ +{{- define "ref_query" -}} +{{- if .ref_type -}}?{{- .ref_type -}}={{- .ref_name -}}{{- end -}} +{{- end -}} diff --git a/forged/templates/group.tmpl b/forged/templates/group.tmpl new file mode 100644 index 0000000..1f9609e --- /dev/null +++ b/forged/templates/group.tmpl @@ -0,0 +1,82 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "group" -}} +{{- $group_path := .BaseData.GroupPath -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>{{- range $i, $s := $group_path -}}{{- $s -}}{{- if ne $i (len $group_path) -}}/{{- end -}}{{- end }} – {{ .BaseData.Global.ForgeTitle -}}</title> + </head> + <body class="group"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper"> + {{- if .Description -}} + <p>{{- .Description -}}</p> + {{- end -}} + {{- template "group_view" . -}} + </div> + {{- if .DirectAccess -}} + <div class="padding-wrapper"> + <form method="POST" enctype="application/x-www-form-urlencoded"> + <table> + <thead> + <tr> + <th class="title-row" colspan="2"> + Create repo + </th> + </tr> + </thead> + <tbody> + <tr> + <th scope="row">Name</th> + <td class="tdinput"> + <input id="repo-name-input" name="repo_name" type="text" /> + </td> + </tr> + <tr> + <th scope="row">Description</th> + <td class="tdinput"> + <input id="repo-desc-input" name="repo_desc" type="text" /> + </td> + </tr> + <tr> + <th scope="row">Contrib</th> + <td class="tdinput"> + <select id="repo-contrib-input" name="repo_contrib"> + <option value="open">Public</option> + <option value="ssh_pubkey">SSH public key</option> + <option value="federated">Federated service</option> + <option value="registered_user">Registered user</option> + <option value="closed">Closed</option> + </select> + </td> + </tr> + </tbody> + <tfoot> + <tr> + <td class="th-like" colspan="2"> + <div class="flex-justify"> + <div class="left"> + </div> + <div class="right"> + <input class="btn-primary" type="submit" value="Create" /> + </div> + </div> + </td> + </tr> + </tfoot> + </table> + </form> + </div> + {{- end -}} + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/index.tmpl b/forged/templates/index.tmpl new file mode 100644 index 0000000..fa9b6a0 --- /dev/null +++ b/forged/templates/index.tmpl @@ -0,0 +1,65 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "index" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>Index – {{ .BaseData.Global.ForgeTitle -}}</title> + </head> + <body class="index"> + {{- template "header" . -}} + <main> + <div class="padding-wrapper"> + <table class="wide"> + <thead> + <tr> + <th colspan="2" class="title-row">Groups</th> + </tr> + <tr> + <th scope="col">Name</th> + <th scope="col">Description</th> + </tr> + </thead> + <tbody> + {{- range .Groups -}} + <tr> + <td> + <a href="{{- .Name | path_escape -}}/">{{- .Name -}}</a> + </td> + <td> + {{- .Description -}} + </td> + </tr> + {{- end -}} + </tbody> + </table> + <table class="wide"> + <thead> + <tr> + <th colspan="2" class="title-row"> + Info + </th> + </tr> + </thead> + <tbody> + <tr> + <th scope="row">SSH public key</th> + <td><code class="breakable">{{- .BaseData.Global.SSHPubkey -}}</code></td> + </tr> + <tr> + <th scope="row">SSH fingerprint</th> + <td><code class="breakable">{{- .BaseData.Global.SSHFingerprint -}}</code></td> + </tr> + </tbody> + </table> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/login.tmpl b/forged/templates/login.tmpl new file mode 100644 index 0000000..09cbb61 --- /dev/null +++ b/forged/templates/login.tmpl @@ -0,0 +1,61 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "login" -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>Login – {{ .BaseData.Global.ForgeTitle -}}</title> + </head> + <body class="index"> + <main> + {{- .LoginError -}} + <div class="padding-wrapper"> + <form method="POST" enctype="application/x-www-form-urlencoded"> + <table> + <thead> + <tr> + <th class="title-row" colspan="2"> + Password authentication + </th> + </tr> + </thead> + <tbody> + <tr> + <th scope="row">Username</th> + <td class="tdinput"> + <input id="usernameinput" name="username" type="text" /> + </td> + </tr> + <tr> + <th scope="row">Password</th> + <td class="tdinput"> + <input id="passwordinput" name="password" type="password" /> + </td> + </tr> + </tbody> + <tfoot> + <tr> + <td class="th-like" colspan="2"> + <div class="flex-justify"> + <div class="left"> + </div> + <div class="right"> + <input class="btn-primary" type="submit" value="Submit" /> + </div> + </div> + </td> + </tr> + </tfoot> + </table> + </form> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_branches.tmpl b/forged/templates/repo_branches.tmpl new file mode 100644 index 0000000..c32519d --- /dev/null +++ b/forged/templates/repo_branches.tmpl @@ -0,0 +1,76 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_branches" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>{{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-branches"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="../branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding-wrapper"> + <p> + <strong> + Warning: Due to various recent migrations, viewing non-HEAD refs may be broken. + </strong> + </p> + <table id="branches"> + <thead> + <tr class="title-row"> + <th colspan="1">Branches</th> + </tr> + </thead> + <tbody> + {{- range .branches -}} + <tr> + <td> + <a href="../?branch={{ . }}">{{ . }}</a> + </td> + </tr> + {{- end -}} + </tbody> + </table> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_commit.tmpl b/forged/templates/repo_commit.tmpl new file mode 100644 index 0000000..42f2bcd --- /dev/null +++ b/forged/templates/repo_commit.tmpl @@ -0,0 +1,119 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_commit" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>Commit {{ .commit_id }} – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-commit"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding-wrapper scroll"> + <div class="key-val-grid-wrapper"> + <section id="commit-info" class="key-val-grid"> + <div class="title-row">Commit info</div> + <div class="row-label">ID</div> + <div class="row-value">{{- .commit_id -}}</div> + <div class="row-label">Author</div> + <div class="row-value"> + <span>{{- .commit_object.Author.Name -}}</span> <span><<a href="mailto:{{- .commit_object.Author.Email -}}">{{- .commit_object.Author.Email -}}</a>></span> + </div> + <div class="row-label">Author date</div> + <div class="row-value">{{- .commit_object.Author.When.Format "Mon, 02 Jan 2006 15:04:05 -0700" -}}</div> + <div class="row-label">Committer</div> + <div class="row-value"> + <span>{{- .commit_object.Committer.Name -}}</span> <span><<a href="mailto:{{- .commit_object.Committer.Email -}}">{{- .commit_object.Committer.Email -}}</a>></span> + </div> + <div class="row-label">Committer date</div> + <div class="row-value">{{- .commit_object.Committer.When.Format "Mon, 02 Jan 2006 15:04:05 -0700" -}}</div> + <div class="row-label">Actions</div> + <div class="row-value"> + <a href="{{- .commit_object.Hash -}}.patch">Get patch</a> + </div> + </section> + </div> + </div> + + <div class="padding-wrapper scroll" id="this-commit-message"> + <pre>{{- .commit_object.Message -}}</pre> + </div> + <div class="padding-wrapper"> + {{- $parent_commit_hash := .parent_commit_hash -}} + {{- $commit_object := .commit_object -}} + {{- range .file_patches -}} + <div class="file-patch toggle-on-wrapper"> + <input type="checkbox" id="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-toggle toggle-on-toggle"> + <label for="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-header toggle-on-header"> + <div> + {{- if eq .From.Path "" -}} + --- /dev/null + {{- else -}} + --- a/<a href="../tree/{{- .From.Path -}}?commit={{- $parent_commit_hash -}}">{{- .From.Path -}}</a> {{ .From.Mode -}} + {{- end -}} + <br /> + {{- if eq .To.Path "" -}} + +++ /dev/null + {{- else -}} + +++ b/<a href="../tree/{{- .To.Path -}}?commit={{- $commit_object.Hash -}}">{{- .To.Path -}}</a> {{ .To.Mode -}} + {{- end -}} + </div> + </label> + <div class="file-content toggle-on-content scroll"> + {{- range .Chunks -}} + {{- if eq .Operation 0 -}} + <pre class="chunk chunk-unchanged">{{ .Content }}</pre> + {{- else if eq .Operation 1 -}} + <pre class="chunk chunk-addition">{{ .Content }}</pre> + {{- else if eq .Operation 2 -}} + <pre class="chunk chunk-deletion">{{ .Content }}</pre> + {{- else -}} + <pre class="chunk chunk-unknown">{{ .Content }}</pre> + {{- end -}} + {{- end -}} + </div> + </div> + {{- end -}} + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_contrib_index.tmpl b/forged/templates/repo_contrib_index.tmpl new file mode 100644 index 0000000..7d2d474 --- /dev/null +++ b/forged/templates/repo_contrib_index.tmpl @@ -0,0 +1,84 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_contrib_index" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>Merge requests – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-contrib-index"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="../contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding-wrapper"> + <h2>How to submit a merge request</h2> + <pre>git clone {{ .ssh_clone_url }} + cd {{ .repo_name }} + git checkout -b contrib/name_of_your_contribution + # edit and commit stuff + git push -u origin HEAD</pre> + <p>Pushes that update branches in other namespaces, or pushes to existing contribution branches belonging to other SSH keys, will be automatically + rejected, unless you are an authenticated maintainer. Otherwise, a merge request is automatically opened, and the maintainers are notified via IRC.</p> + <p>Alternatively, you may <a href="https://git-send-email.io">email patches</a> to <a href="mailto:{{ .repo_patch_mailing_list }}">{{ .repo_patch_mailing_list }}</a>.</p> + </div> + <div class="padding-wrapper"> + <table id="recent-merge_requests" class="wide"> + <thead> + <tr> + <th scope="col">ID</th> + <th scope="col">Title</th> + <th scope="col">Status</th> + </tr> + </thead> + <tbody> + {{- range .merge_requests -}} + <tr> + <td class="merge_request-id">{{- .ID -}}</td> + <td class="merge_request-title"><a href="{{- .ID -}}/">{{- .Title -}}</a></td> + <td class="merge_request-status">{{- .Status -}}</td> + </tr> + {{- end -}} + </tbody> + </table> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_contrib_one.tmpl b/forged/templates/repo_contrib_one.tmpl new file mode 100644 index 0000000..6556ea9 --- /dev/null +++ b/forged/templates/repo_contrib_one.tmpl @@ -0,0 +1,125 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_contrib_one" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>Merge requests – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-contrib-one"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="../contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding-wrapper"> + <table id="mr-info-table"> + <thead> + <tr class="title-row"> + <th colspan="2">Merge request info</th> + </tr> + </thead> + <tbody> + <tr> + <th scope="row">ID</th> + <td>{{- .mr_id -}}</td> + </tr> + <tr> + <th scope="row">Status</th> + <td>{{- .mr_status -}}</td> + </tr> + <tr> + <th scope="row">Title</th> + <td>{{- .mr_title -}}</td> + </tr> + <tr> + <th scope="row">Source ref</th> + <td>{{- .mr_source_ref -}}</td> + </tr> + <tr> + <th scope="row">Destination branch</th> + <td>{{- .mr_destination_branch -}}</td> + </tr> + <tr> + <th scope="row">Merge base</th> + <td>{{- .merge_base.Hash.String -}}</td> + </tr> + </tbody> + </table> + </div> + <div class="padding-wrapper"> + {{- $merge_base := .merge_base -}} + {{- $source_commit := .source_commit -}} + {{- range .file_patches -}} + <div class="file-patch toggle-on-wrapper"> + <input type="checkbox" id="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-toggle toggle-on-toggle"> + <label for="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-header toggle-on-header"> + <div> + {{- if eq .From.Path "" -}} + --- /dev/null + {{- else -}} + --- a/<a href="../../tree/{{- .From.Path -}}?commit={{- $merge_base.Hash -}}">{{- .From.Path -}}</a> {{ .From.Mode -}} + {{- end -}} + <br /> + {{- if eq .To.Path "" -}} + +++ /dev/null + {{- else -}} + +++ b/<a href="../../tree/{{- .To.Path -}}?commit={{- $source_commit.Hash -}}">{{- .To.Path -}}</a> {{ .To.Mode -}} + {{- end -}} + </div> + </label> + <div class="file-content toggle-on-content scroll"> + {{- range .Chunks -}} + {{- if eq .Operation 0 -}} + <pre class="chunk chunk-unchanged">{{ .Content }}</pre> + {{- else if eq .Operation 1 -}} + <pre class="chunk chunk-addition">{{ .Content }}</pre> + {{- else if eq .Operation 2 -}} + <pre class="chunk chunk-deletion">{{ .Content }}</pre> + {{- else -}} + <pre class="chunk chunk-unknown">{{ .Content }}</pre> + {{- end -}} + {{- end -}} + </div> + </div> + {{- end -}} + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_index.tmpl b/forged/templates/repo_index.tmpl new file mode 100644 index 0000000..a0d2b7b --- /dev/null +++ b/forged/templates/repo_index.tmpl @@ -0,0 +1,96 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_index" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>{{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-index"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link active" href="./{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + {{- if .notes -}} + <div id="notes">Notes</div> + <ul> + {{- range .notes -}}<li>{{- . -}}</li>{{- end -}} + </ul> + </div> + {{- end -}} + <p class="readingwidth"><code>{{- .ssh_clone_url -}}</code></p> + {{- if .ref_name -}} + <p class="readingwidth"> + <strong> + Warning: Due to various recent migrations, viewing non-HEAD refs may be broken. + </strong> + </p> + {{- end -}} + {{- if .commits -}} + <div class="commit-list-small"> + {{- range .commits -}} + <div class="event"> + <div> + <a href="commit/{{- .Hash -}}" title="{{- .Hash -}}" rel="nofollow"> + {{- .Hash | printf "%.8s" -}} + </a> + — <a href="mailto:{{- .Email -}}">{{- .Author -}}</a> + <small class="pull-right"> + <span title="{{- .Date -}}">{{- .Date -}}</span> + </small> + </div> + <pre class="commit">{{- .Message | first_line -}}</pre> + </div> + {{- end -}} + {{- if dereference_error .commits_err -}} + <div class="commit-error"> + Error while obtaining commit log: {{ .commits_err }} + </div> + {{- end -}} + </div> + {{- end -}} + {{- if .readme -}} + <div class="padding-wrapper" id="readme"> + {{- .readme -}} + </div> + {{- end -}} + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_log.tmpl b/forged/templates/repo_log.tmpl new file mode 100644 index 0000000..28aeed0 --- /dev/null +++ b/forged/templates/repo_log.tmpl @@ -0,0 +1,92 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_log" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>Log – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-log"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="../log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="../settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="scroll"> + {{- if .ref_name -}} + <p> + <strong> + Warning: Due to various recent migrations, viewing non-HEAD refs may be broken. + </strong> + </p> + {{- end -}} + <table id="commits" class="wide"> + <thead> + <tr class="title-row"> + <th colspan="4">Commits {{ if .ref_name }} on {{ .ref_name }}{{ end -}}</th> + </tr> + <tr> + <th scope="col">ID</th> + <th scope="col">Title</th> + <th scope="col">Author</th> + <th scope="col">Author date</th> + </tr> + </thead> + <tbody> + {{- range .commits -}} + <tr> + <td class="commit-id"><a href="../commit/{{- .Hash -}}">{{- .Hash -}}</a></td> + <td class="commit-title">{{- .Message | first_line -}}</td> + <td class="commit-author"> + <a class="email-name" href="mailto:{{- .Author.Email -}}">{{- .Author.Name -}}</a> + </td> + <td class="commit-time"> + {{- .Author.When.Format "2006-01-02 15:04:05 -0700" -}} + </td> + </tr> + {{- end -}} + {{- if dereference_error .commits_err -}} + Error while obtaining commit log: {{ .commits_err }} + {{- end -}} + </tbody> + </table> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_raw_dir.tmpl b/forged/templates/repo_raw_dir.tmpl new file mode 100644 index 0000000..d72a41f --- /dev/null +++ b/forged/templates/repo_raw_dir.tmpl @@ -0,0 +1,90 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_raw_dir" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>/{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }} – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-raw-dir"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="{{- .repo_url_root -}}tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding-wrapper scroll"> + {{- if .ref_name -}} + <p> + <strong> + Warning: Due to various recent migrations, viewing non-HEAD refs may be broken. + </strong> + </p> + {{- end -}} + <table id="file-tree" class="wide"> + <thead> + <tr class="title-row"> + <th colspan="3"> + (Raw) /{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }}{{ if .ref_name }} on {{ .ref_name }}{{ end -}} + </th> + </tr> + <tr> + <th scope="col">Mode</th> + <th scope="col">Filename</th> + <th scope="col">Size</th> + </tr> + </thead> + <tbody> + {{- $path_spec := .path_spec -}} + {{- range .files -}} + <tr> + <td class="file-mode">{{- .Mode -}}</td> + <td class="file-name"><a href="{{- .Name -}}{{- if not .IsFile -}}/{{- end -}}{{- template "ref_query" $root -}}">{{- .Name -}}</a>{{- if not .IsFile -}}/{{- end -}}</td> + <td class="file-size">{{- .Size -}}</td> + </tr> + {{- end -}} + </tbody> + </table> + </div> + <div class="padding-wrapper"> + <div id="refs"> + </div> + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_tree_dir.tmpl b/forged/templates/repo_tree_dir.tmpl new file mode 100644 index 0000000..3d8425c --- /dev/null +++ b/forged/templates/repo_tree_dir.tmpl @@ -0,0 +1,95 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_tree_dir" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <title>/{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }} – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-tree-dir"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="{{- .repo_url_root -}}tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding-wrapper scroll"> + {{- if .ref_name -}} + <p> + <strong> + Warning: Due to various recent migrations, viewing non-HEAD refs may be broken. + </strong> + </p> + {{- end -}} + <table id="file-tree" class="wide"> + <thead> + <tr class="title-row"> + <th colspan="3"> + /{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }}{{ if .ref_name }} on {{ .ref_name }}{{ end -}} + </th> + <tr> + <th scope="col">Mode</th> + <th scope="col">Filename</th> + <th scope="col">Size</th> + </tr> + </tr> + </thead> + <tbody> + {{- $path_spec := .path_spec -}} + {{- range .files -}} + <tr> + <td class="file-mode">{{- .Mode -}}</td> + <td class="file-name"><a href="{{- .Name -}}{{- if not .IsFile -}}/{{- end -}}{{- template "ref_query" $root -}}">{{- .Name -}}</a>{{- if not .IsFile -}}/{{- end -}}</td> + <td class="file-size">{{- .Size -}}</td> + </tr> + {{- end -}} + </tbody> + </table> + </div> + <div class="padding-wrapper"> + <div id="refs"> + </div> + </div> + {{- if .readme -}} + <div class="padding-wrapper" id="readme"> + {{- .readme -}} + </div> + {{- end -}} + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/forged/templates/repo_tree_file.tmpl b/forged/templates/repo_tree_file.tmpl new file mode 100644 index 0000000..a462b04 --- /dev/null +++ b/forged/templates/repo_tree_file.tmpl @@ -0,0 +1,67 @@ +{{/* + SPDX-License-Identifier: AGPL-3.0-only + SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> +*/}} +{{- define "repo_tree_file" -}} +{{- $root := . -}} +<!DOCTYPE html> +<html lang="en"> + <head> + {{- template "head_common" . -}} + <link rel="stylesheet" href="/-/static/chroma.css" /> + <title>/{{ .path_spec }} – {{ .repo_name }} – {{ template "group_path_plain" .group_path }} – {{ .global.forge_title -}}</title> + </head> + <body class="repo-tree-file"> + {{- template "header" . -}} + <main> + <div class="repo-header"> + <h2>{{- .repo_name -}}</h2> + <ul class="nav-tabs-standalone"> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}{{- template "ref_query" $root -}}">Summary</a> + </li> + <li class="nav-item"> + <a class="nav-link active" href="{{- .repo_url_root -}}tree/{{- template "ref_query" $root -}}">Tree</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}log/{{- template "ref_query" $root -}}">Log</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}branches/">Branches</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}tags/">Tags</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}contrib/">Merge requests</a> + </li> + <li class="nav-item"> + <a class="nav-link" href="{{- .repo_url_root -}}settings/">Settings</a> + </li> + </ul> + </div> + <div class="repo-header-extension"> + <div class="repo-header-extension-content"> + {{- .repo_description -}} + </div> + </div> + <div class="padding"> + {{- if .ref_name -}} + <p> + <strong> + Warning: Due to various recent migrations, viewing non-HEAD refs may be broken. + </strong> + </p> + {{- end -}} + <p> + /{{ .path_spec }} (<a href="/{{ template "group_path_plain" .group_path }}/-/repos/{{ .repo_name }}/raw/{{ .path_spec }}{{- template "ref_query" $root -}}">raw</a>) + </p> + {{- .file_contents -}} + </div> + </main> + <footer> + {{- template "footer" . -}} + </footer> + </body> +</html> +{{- end -}} diff --git a/git2d/bare.c b/git2d/bare.c new file mode 100644 index 0000000..307f3d8 --- /dev/null +++ b/git2d/bare.c @@ -0,0 +1,300 @@ +/*- + * SPDX-License-Identifier: MIT + * SPDX-FileCopyrightText: Copyright (c) 2022 Frank Smit <https://61924.nl/> + */ + +#include <string.h> +#include <stdbool.h> + +#include "bare.h" + +#define UNUSED(x) (void)(x) + +enum { + U8SZ = 1, + U16SZ = 2, + U32SZ = 4, + U64SZ = 8, + MAXVARINTSZ = 10, +}; + +bare_error bare_put_uint(struct bare_writer *ctx, uint64_t x) +{ + uint64_t i = 0; + uint8_t b[MAXVARINTSZ]; + + while (x >= 0x80) { + b[i] = (uint8_t) x | 0x80; + x >>= 7; + i++; + } + + b[i] = (uint8_t) x; + i++; + + return ctx->write(ctx->buffer, b, i); +} + +bare_error bare_get_uint(struct bare_reader *ctx, uint64_t *x) +{ + bare_error err = BARE_ERROR_NONE; + + uint8_t shift = 0; + uint64_t result = 0; + + for (uint8_t i = 0; i < 10; i++) { + uint8_t b; + + err = ctx->read(ctx->buffer, &b, U8SZ); + if (err != BARE_ERROR_NONE) { + break; + } + + if (b < 0x80) { + result |= (uint64_t) b << shift; + break; + } else { + result |= ((uint64_t) b & 0x7f) << shift; + shift += 7; + } + } + + *x = result; + + return err; +} + +bare_error bare_put_int(struct bare_writer *ctx, int64_t x) +{ + uint64_t ux = (uint64_t) x << 1; + + if (x < 0) { + ux = ~ux; + } + + return bare_put_uint(ctx, ux); +} + +bare_error bare_get_int(struct bare_reader *ctx, int64_t *x) +{ + uint64_t ux; + + bare_error err = bare_get_uint(ctx, &ux); + + if (err == BARE_ERROR_NONE) { + *x = (int64_t) (ux >> 1); + + if ((ux & 1) != 0) { + *x = ~(*x); + } + } + + return err; +} + +bare_error bare_put_u8(struct bare_writer *ctx, uint8_t x) +{ + return ctx->write(ctx->buffer, &x, U8SZ); +} + +bare_error bare_get_u8(struct bare_reader *ctx, uint8_t *x) +{ + return ctx->read(ctx->buffer, x, U8SZ); +} + +bare_error bare_put_u16(struct bare_writer *ctx, uint16_t x) +{ + return ctx->write(ctx->buffer, (uint8_t[U16SZ]) { + x, x >> 8} + , U16SZ); +} + +bare_error bare_get_u16(struct bare_reader *ctx, uint16_t *x) +{ + bare_error err = ctx->read(ctx->buffer, x, U16SZ); + + if (err == BARE_ERROR_NONE) { + *x = (uint16_t) ((uint8_t *) x)[0] + | (uint16_t) ((uint8_t *) x)[1] << 8; + } + + return err; +} + +bare_error bare_put_u32(struct bare_writer *ctx, uint32_t x) +{ + uint8_t buf[U32SZ]; + + buf[0] = (uint8_t) (x); + buf[1] = (uint8_t) (x >> 8); + buf[2] = (uint8_t) (x >> 16); + buf[3] = (uint8_t) (x >> 24); + + return ctx->write(ctx->buffer, buf, U32SZ); +} + +bare_error bare_get_u32(struct bare_reader *ctx, uint32_t *x) +{ + bare_error err = ctx->read(ctx->buffer, x, U32SZ); + + if (err == BARE_ERROR_NONE) { + *x = (uint32_t) (((uint8_t *) x)[0]) + | (uint32_t) (((uint8_t *) x)[1] << 8) + | (uint32_t) (((uint8_t *) x)[2] << 16) + | (uint32_t) (((uint8_t *) x)[3] << 24); + } + + return err; +} + +bare_error bare_put_u64(struct bare_writer *ctx, uint64_t x) +{ + uint8_t buf[U64SZ]; + + buf[0] = x; + buf[1] = x >> 8; + buf[2] = x >> 16; + buf[3] = x >> 24; + buf[4] = x >> 32; + buf[5] = x >> 40; + buf[6] = x >> 48; + buf[7] = x >> 56; + + return ctx->write(ctx->buffer, buf, U64SZ); +} + +bare_error bare_get_u64(struct bare_reader *ctx, uint64_t *x) +{ + bare_error err = ctx->read(ctx->buffer, x, U64SZ); + + if (err == BARE_ERROR_NONE) { + *x = (uint64_t) ((uint8_t *) x)[0] + | (uint64_t) ((uint8_t *) x)[1] << 8 | (uint64_t) ((uint8_t *) x)[2] << 16 | (uint64_t) ((uint8_t *) x)[3] << 24 | (uint64_t) ((uint8_t *) x)[4] << 32 | (uint64_t) ((uint8_t *) x)[5] << 40 | (uint64_t) ((uint8_t *) x)[6] << 48 | (uint64_t) ((uint8_t *) x)[7] << 56; + } + + return err; +} + +bare_error bare_put_i8(struct bare_writer *ctx, int8_t x) +{ + return bare_put_u8(ctx, x); +} + +bare_error bare_get_i8(struct bare_reader *ctx, int8_t *x) +{ + return bare_get_u8(ctx, (uint8_t *) x); +} + +bare_error bare_put_i16(struct bare_writer *ctx, int16_t x) +{ + return bare_put_u16(ctx, x); +} + +bare_error bare_get_i16(struct bare_reader *ctx, int16_t *x) +{ + return bare_get_u16(ctx, (uint16_t *) x); +} + +bare_error bare_put_i32(struct bare_writer *ctx, int32_t x) +{ + return bare_put_u32(ctx, x); +} + +bare_error bare_get_i32(struct bare_reader *ctx, int32_t *x) +{ + return bare_get_u32(ctx, (uint32_t *) x); +} + +bare_error bare_put_i64(struct bare_writer *ctx, int64_t x) +{ + return bare_put_u64(ctx, x); +} + +bare_error bare_get_i64(struct bare_reader *ctx, int64_t *x) +{ + return bare_get_u64(ctx, (uint64_t *) x); +} + +bare_error bare_put_f32(struct bare_writer *ctx, float x) +{ + uint32_t b; + memcpy(&b, &x, U32SZ); + + return bare_put_u32(ctx, b); +} + +bare_error bare_get_f32(struct bare_reader *ctx, float *x) +{ + return ctx->read(ctx->buffer, x, U32SZ); +} + +bare_error bare_put_f64(struct bare_writer *ctx, double x) +{ + uint64_t b; + memcpy(&b, &x, U64SZ); + + return bare_put_u64(ctx, b); +} + +bare_error bare_get_f64(struct bare_reader *ctx, double *x) +{ + return ctx->read(ctx->buffer, x, U64SZ); +} + +bare_error bare_put_bool(struct bare_writer *ctx, bool x) +{ + return bare_put_u8(ctx, (uint8_t) x); +} + +bare_error bare_get_bool(struct bare_reader *ctx, bool *x) +{ + return bare_get_u8(ctx, (uint8_t *) x); +} + +bare_error bare_put_fixed_data(struct bare_writer *ctx, const uint8_t *src, uint64_t sz) +{ + return ctx->write(ctx->buffer, (void *)src, sz); +} + +bare_error bare_get_fixed_data(struct bare_reader *ctx, uint8_t *dst, uint64_t sz) +{ + return ctx->read(ctx->buffer, dst, sz); +} + +bare_error bare_put_data(struct bare_writer *ctx, const uint8_t *src, uint64_t sz) +{ + bare_error err = BARE_ERROR_NONE; + + err = bare_put_uint(ctx, sz); + + if (err == BARE_ERROR_NONE) { + err = bare_put_fixed_data(ctx, src, sz); + } + + return err; +} + +bare_error bare_get_data(struct bare_reader *ctx, uint8_t *dst, uint64_t sz) +{ + bare_error err = BARE_ERROR_NONE; + uint64_t ssz = 0; + + err = bare_get_uint(ctx, &ssz); + + if (err == BARE_ERROR_NONE) { + err = ssz <= sz ? bare_get_fixed_data(ctx, dst, ssz) + : BARE_ERROR_BUFFER_TOO_SMALL; + } + + return err; +} + +bare_error bare_put_str(struct bare_writer *ctx, const char *src, uint64_t sz) +{ + return bare_put_data(ctx, (uint8_t *) src, sz); +} + +bare_error bare_get_str(struct bare_reader *ctx, char *dst, uint64_t sz) +{ + return bare_get_data(ctx, (uint8_t *) dst, sz); +} diff --git a/git2d/bare.h b/git2d/bare.h new file mode 100644 index 0000000..e049dd0 --- /dev/null +++ b/git2d/bare.h @@ -0,0 +1,72 @@ +/*- + * SPDX-License-Identifier: MIT + * SPDX-FileCopyrightText: Copyright (c) 2022 Frank Smit <https://61924.nl/> + */ + +#ifndef BARE_H +#define BARE_H + +#include <stdint.h> +#include <stdbool.h> + +typedef enum { + BARE_ERROR_NONE, + BARE_ERROR_WRITE_FAILED, + BARE_ERROR_READ_FAILED, + BARE_ERROR_BUFFER_TOO_SMALL, + BARE_ERROR_INVALID_UTF8, +} bare_error; + +typedef bare_error(*bare_write_func) (void *buffer, const void *src, uint64_t sz); +typedef bare_error(*bare_read_func) (void *buffer, void *dst, uint64_t sz); + +struct bare_writer { + void *buffer; + bare_write_func write; +}; + +struct bare_reader { + void *buffer; + bare_read_func read; +}; + +bare_error bare_put_uint(struct bare_writer *ctx, uint64_t x); /* varuint */ +bare_error bare_get_uint(struct bare_reader *ctx, uint64_t * x); /* varuint */ +bare_error bare_put_u8(struct bare_writer *ctx, uint8_t x); +bare_error bare_get_u8(struct bare_reader *ctx, uint8_t * x); +bare_error bare_put_u16(struct bare_writer *ctx, uint16_t x); +bare_error bare_get_u16(struct bare_reader *ctx, uint16_t * x); +bare_error bare_put_u32(struct bare_writer *ctx, uint32_t x); +bare_error bare_get_u32(struct bare_reader *ctx, uint32_t * x); +bare_error bare_put_u64(struct bare_writer *ctx, uint64_t x); +bare_error bare_get_u64(struct bare_reader *ctx, uint64_t * x); + +bare_error bare_put_int(struct bare_writer *ctx, int64_t x); /* varint */ +bare_error bare_get_int(struct bare_reader *ctx, int64_t * x); /* varint */ +bare_error bare_put_i8(struct bare_writer *ctx, int8_t x); +bare_error bare_get_i8(struct bare_reader *ctx, int8_t * x); +bare_error bare_put_i16(struct bare_writer *ctx, int16_t x); +bare_error bare_get_i16(struct bare_reader *ctx, int16_t * x); +bare_error bare_put_i32(struct bare_writer *ctx, int32_t x); +bare_error bare_get_i32(struct bare_reader *ctx, int32_t * x); +bare_error bare_put_i64(struct bare_writer *ctx, int64_t x); +bare_error bare_get_i64(struct bare_reader *ctx, int64_t * x); + +bare_error bare_put_f32(struct bare_writer *ctx, float x); +bare_error bare_get_f32(struct bare_reader *ctx, float *x); +bare_error bare_put_f64(struct bare_writer *ctx, double x); +bare_error bare_get_f64(struct bare_reader *ctx, double *x); + +bare_error bare_put_bool(struct bare_writer *ctx, bool x); +bare_error bare_get_bool(struct bare_reader *ctx, bool *x); + +bare_error bare_put_fixed_data(struct bare_writer *ctx, const uint8_t * src, uint64_t sz); +bare_error bare_get_fixed_data(struct bare_reader *ctx, uint8_t * dst, uint64_t sz); +bare_error bare_put_data(struct bare_writer *ctx, const uint8_t * src, uint64_t sz); +bare_error bare_get_data(struct bare_reader *ctx, uint8_t * dst, uint64_t sz); +bare_error bare_put_str(struct bare_writer *ctx, const char *src, uint64_t sz); +bare_error bare_get_str(struct bare_reader *ctx, char *dst, uint64_t sz); + +/* Note that the _str implementation here does not check for UTF-8 validity. */ + +#endif /* BARE_H */ diff --git a/git2d/cmd1.c b/git2d/cmd1.c new file mode 100644 index 0000000..ec3d1ad --- /dev/null +++ b/git2d/cmd1.c @@ -0,0 +1,124 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +int cmd_index(git_repository *repo, struct bare_writer *writer) +{ + /* HEAD tree */ + + git_object *obj = NULL; + int err = git_revparse_single(&obj, repo, "HEAD^{tree}"); + if (err != 0) { + bare_put_uint(writer, 4); + return -1; + } + git_tree *tree = (git_tree *) obj; + + /* README */ + + git_tree_entry *entry = NULL; + err = git_tree_entry_bypath(&entry, tree, "README.md"); + if (err != 0) { + bare_put_uint(writer, 5); + git_tree_free(tree); + return -1; + } + git_otype objtype = git_tree_entry_type(entry); + if (objtype != GIT_OBJECT_BLOB) { + bare_put_uint(writer, 6); + git_tree_entry_free(entry); + git_tree_free(tree); + return -1; + } + git_object *obj2 = NULL; + err = git_tree_entry_to_object(&obj2, repo, entry); + if (err != 0) { + bare_put_uint(writer, 7); + git_tree_entry_free(entry); + git_tree_free(tree); + return -1; + } + git_blob *blob = (git_blob *) obj2; + const void *content = git_blob_rawcontent(blob); + if (content == NULL) { + bare_put_uint(writer, 8); + git_blob_free(blob); + git_tree_entry_free(entry); + git_tree_free(tree); + return -1; + } + bare_put_uint(writer, 0); + bare_put_data(writer, content, git_blob_rawsize(blob)); + + /* Commits */ + + /* TODO BUG: This might be a different commit from the displayed README due to races */ + + git_revwalk *walker = NULL; + if (git_revwalk_new(&walker, repo) != 0) { + bare_put_uint(writer, 9); + git_blob_free(blob); + git_tree_entry_free(entry); + git_tree_free(tree); + return -1; + } + + if (git_revwalk_push_head(walker) != 0) { + bare_put_uint(writer, 9); + git_revwalk_free(walker); + git_blob_free(blob); + git_tree_entry_free(entry); + git_tree_free(tree); + return -1; + } + + int count = 0; + git_oid oid; + while (count < 3 && git_revwalk_next(&oid, walker) == 0) { + git_commit *commit = NULL; + if (git_commit_lookup(&commit, repo, &oid) != 0) + break; + + const char *msg = git_commit_summary(commit); + const git_signature *author = git_commit_author(commit); + + /* ID */ + bare_put_data(writer, oid.id, GIT_OID_RAWSZ); + + /* Title */ + size_t msg_len = msg ? strlen(msg) : 0; + bare_put_data(writer, (const uint8_t *)(msg ? msg : ""), msg_len); + + /* Author's name */ + const char *author_name = author ? author->name : ""; + bare_put_data(writer, (const uint8_t *)author_name, strlen(author_name)); + + /* Author's email */ + const char *author_email = author ? author->email : ""; + bare_put_data(writer, (const uint8_t *)author_email, strlen(author_email)); + + /* Author's date */ + /* TODO: Pass the integer instead of a string */ + time_t time = git_commit_time(commit); + char timebuf[64]; + struct tm *tm = localtime(&time); + if (tm) + strftime(timebuf, sizeof(timebuf), "%Y-%m-%d %H:%M:%S", tm); + else + strcpy(timebuf, "unknown"); + bare_put_data(writer, (const uint8_t *)timebuf, strlen(timebuf)); + + git_commit_free(commit); + count++; + } + + git_revwalk_free(walker); + git_blob_free(blob); + git_tree_entry_free(entry); + git_tree_free(tree); + + return 0; +} diff --git a/git2d/cmd2.c b/git2d/cmd2.c new file mode 100644 index 0000000..33947c6 --- /dev/null +++ b/git2d/cmd2.c @@ -0,0 +1,121 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +int cmd_treeraw(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + /* Path */ + char path[4096] = { 0 }; + int err = bare_get_data(reader, (uint8_t *) path, sizeof(path) - 1); + if (err != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + path[sizeof(path) - 1] = '\0'; + + /* HEAD^{tree} */ + git_object *head_obj = NULL; + err = git_revparse_single(&head_obj, repo, "HEAD^{tree}"); + if (err != 0) { + bare_put_uint(writer, 4); + return -1; + } + git_tree *tree = (git_tree *) head_obj; + + /* Path in tree */ + git_tree_entry *entry = NULL; + git_otype objtype; + if (strlen(path) == 0) { + entry = NULL; + objtype = GIT_OBJECT_TREE; + } else { + err = git_tree_entry_bypath(&entry, tree, path); + if (err != 0) { + bare_put_uint(writer, 3); + git_tree_free(tree); + return 0; + } + objtype = git_tree_entry_type(entry); + } + + if (objtype == GIT_OBJECT_TREE) { + /* Tree */ + git_object *tree_obj = NULL; + if (entry == NULL) { + tree_obj = (git_object *) tree; + } else { + err = git_tree_entry_to_object(&tree_obj, repo, entry); + if (err != 0) { + bare_put_uint(writer, 7); + goto cleanup; + } + } + git_tree *subtree = (git_tree *) tree_obj; + + size_t count = git_tree_entrycount(subtree); + bare_put_uint(writer, 0); + bare_put_uint(writer, 1); + bare_put_uint(writer, count); + for (size_t i = 0; i < count; i++) { + const git_tree_entry *subentry = git_tree_entry_byindex(subtree, i); + const char *name = git_tree_entry_name(subentry); + git_otype type = git_tree_entry_type(subentry); + uint32_t mode = git_tree_entry_filemode(subentry); + + uint8_t entry_type = 0; + uint64_t size = 0; + + if (type == GIT_OBJECT_TREE) { + entry_type = 1; + } else if (type == GIT_OBJECT_BLOB) { + entry_type = 2; + + git_object *subobj = NULL; + if (git_tree_entry_to_object(&subobj, repo, subentry) == 0) { + git_blob *b = (git_blob *) subobj; + size = git_blob_rawsize(b); + git_blob_free(b); + } + } + + bare_put_uint(writer, entry_type); + bare_put_uint(writer, mode); + bare_put_uint(writer, size); + bare_put_data(writer, (const uint8_t *)name, strlen(name)); + } + if (entry != NULL) { + git_tree_free(subtree); + } + } else if (objtype == GIT_OBJECT_BLOB) { + /* Blob */ + git_object *blob_obj = NULL; + err = git_tree_entry_to_object(&blob_obj, repo, entry); + if (err != 0) { + bare_put_uint(writer, 7); + goto cleanup; + } + git_blob *blob = (git_blob *) blob_obj; + const void *content = git_blob_rawcontent(blob); + if (content == NULL) { + bare_put_uint(writer, 8); + git_blob_free(blob); + goto cleanup; + } + bare_put_uint(writer, 0); + bare_put_uint(writer, 2); + bare_put_data(writer, content, git_blob_rawsize(blob)); + git_blob_free(blob); + } else { + /* Unknown */ + bare_put_uint(writer, -1); + } + + cleanup: + if (entry != NULL) + git_tree_entry_free(entry); + git_tree_free(tree); + return 0; +} diff --git a/git2d/cmd_commit.c b/git2d/cmd_commit.c new file mode 100644 index 0000000..4d4d0bf --- /dev/null +++ b/git2d/cmd_commit.c @@ -0,0 +1,403 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +static int append_buf(char **data, size_t *len, size_t *cap, const char *src, size_t n) +{ + if (n == 0) + return 0; + size_t need = *len + n; + if (need > *cap) { + size_t newcap = *cap ? *cap * 2 : 256; + while (newcap < need) + newcap *= 2; + char *p = (char *)realloc(*data, newcap); + if (!p) + return -1; + *data = p; + *cap = newcap; + } + memcpy(*data + *len, src, n); + *len += n; + return 0; +} + +int cmd_commit_tree_oid(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char hex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) hex, sizeof(hex) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid oid; + if (git_oid_fromstr(&oid, hex) != 0) { + bare_put_uint(writer, 14); + return -1; + } + git_commit *commit = NULL; + if (git_commit_lookup(&commit, repo, &oid) != 0) { + bare_put_uint(writer, 14); + return -1; + } + git_tree *tree = NULL; + if (git_commit_tree(&tree, commit) != 0) { + git_commit_free(commit); + bare_put_uint(writer, 14); + return -1; + } + const git_oid *toid = git_tree_id(tree); + bare_put_uint(writer, 0); + bare_put_data(writer, toid->id, GIT_OID_RAWSZ); + git_tree_free(tree); + git_commit_free(commit); + return 0; +} + +int cmd_commit_create(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char treehex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) treehex, sizeof(treehex) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid tree_oid; + if (git_oid_fromstr(&tree_oid, treehex) != 0) { + bare_put_uint(writer, 15); + return -1; + } + uint64_t pcnt = 0; + if (bare_get_uint(reader, &pcnt) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_commit **parents = NULL; + if (pcnt > 0) { + parents = (git_commit **) calloc(pcnt, sizeof(git_commit *)); + if (!parents) { + bare_put_uint(writer, 15); + return -1; + } + for (uint64_t i = 0; i < pcnt; i++) { + char phex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) phex, sizeof(phex) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + goto fail; + } + git_oid poid; + if (git_oid_fromstr(&poid, phex) != 0) { + bare_put_uint(writer, 15); + goto fail; + } + if (git_commit_lookup(&parents[i], repo, &poid) != 0) { + bare_put_uint(writer, 15); + goto fail; + } + } + } + char aname[512] = { 0 }; + char aemail[512] = { 0 }; + if (bare_get_data(reader, (uint8_t *) aname, sizeof(aname) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + goto fail; + } + if (bare_get_data(reader, (uint8_t *) aemail, sizeof(aemail) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + goto fail; + } + int64_t when = 0; + int64_t tzoff = 0; + if (bare_get_i64(reader, &when) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + goto fail; + } + if (bare_get_i64(reader, &tzoff) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + goto fail; + } + char *message = NULL; + { + uint64_t msz = 0; + if (bare_get_uint(reader, &msz) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + goto fail; + } + message = (char *)malloc(msz + 1); + if (!message) { + bare_put_uint(writer, 15); + goto fail; + } + if (bare_get_fixed_data(reader, (uint8_t *) message, msz) != BARE_ERROR_NONE) { + free(message); + bare_put_uint(writer, 11); + goto fail; + } + message[msz] = '\0'; + } + git_signature *sig = NULL; + if (git_signature_new(&sig, aname, aemail, (git_time_t) when, (int)tzoff) != 0) { + free(message); + bare_put_uint(writer, 19); + goto fail; + } + git_tree *tree = NULL; + if (git_tree_lookup(&tree, repo, &tree_oid) != 0) { + git_signature_free(sig); + free(message); + bare_put_uint(writer, 19); + goto fail; + } + git_oid out; + int rc = git_commit_create(&out, repo, NULL, sig, sig, NULL, message, tree, + (int)pcnt, (const git_commit **)parents); + git_tree_free(tree); + git_signature_free(sig); + free(message); + if (rc != 0) { + bare_put_uint(writer, 19); + goto fail; + } + bare_put_uint(writer, 0); + bare_put_data(writer, out.id, GIT_OID_RAWSZ); + if (parents) { + for (uint64_t i = 0; i < pcnt; i++) + if (parents[i]) + git_commit_free(parents[i]); + free(parents); + } + return 0; + fail: + if (parents) { + for (uint64_t i = 0; i < pcnt; i++) + if (parents[i]) + git_commit_free(parents[i]); + free(parents); + } + return -1; +} + +int cmd_update_ref(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char refname[4096] = { 0 }; + char commithex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) refname, sizeof(refname) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + if (bare_get_data(reader, (uint8_t *) commithex, sizeof(commithex) - 1) + != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid oid; + if (git_oid_fromstr(&oid, commithex) != 0) { + bare_put_uint(writer, 18); + return -1; + } + git_reference *out = NULL; + int rc = git_reference_create(&out, repo, refname, &oid, 1, NULL); + if (rc != 0) { + bare_put_uint(writer, 18); + return -1; + } + git_reference_free(out); + bare_put_uint(writer, 0); + return 0; +} + +int cmd_commit_info(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char hex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) hex, sizeof(hex) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid oid; + if (git_oid_fromstr(&oid, hex) != 0) { + bare_put_uint(writer, 14); + return -1; + } + git_commit *commit = NULL; + if (git_commit_lookup(&commit, repo, &oid) != 0) { + bare_put_uint(writer, 14); + return -1; + } + + const git_signature *author = git_commit_author(commit); + const git_signature *committer = git_commit_committer(commit); + + const char *aname = author && author->name ? author->name : ""; + const char *aemail = author && author->email ? author->email : ""; + git_time_t awhen = author ? author->when.time : 0; + int aoffset = author ? author->when.offset : 0; + + const char *cname = committer && committer->name ? committer->name : ""; + const char *cemail = committer && committer->email ? committer->email : ""; + git_time_t cwhen = committer ? committer->when.time : 0; + int coffset = committer ? committer->when.offset : 0; + + const char *message = git_commit_message(commit); + if (!message) message = ""; + + bare_put_uint(writer, 0); + /* Commit ID */ + const git_oid *cid = git_commit_id(commit); + bare_put_data(writer, cid->id, GIT_OID_RAWSZ); + /* Author */ + bare_put_data(writer, (const uint8_t *)aname, strlen(aname)); + bare_put_data(writer, (const uint8_t *)aemail, strlen(aemail)); + bare_put_i64(writer, (int64_t)awhen); + bare_put_i64(writer, (int64_t)aoffset); + /* Committer */ + bare_put_data(writer, (const uint8_t *)cname, strlen(cname)); + bare_put_data(writer, (const uint8_t *)cemail, strlen(cemail)); + bare_put_i64(writer, (int64_t)cwhen); + bare_put_i64(writer, (int64_t)coffset); + /* Message */ + bare_put_data(writer, (const uint8_t *)message, strlen(message)); + /* Parents */ + uint32_t pcnt = git_commit_parentcount(commit); + bare_put_uint(writer, (uint64_t)pcnt); + for (uint32_t i = 0; i < pcnt; i++) { + const git_commit *p = NULL; + if (git_commit_parent((git_commit **)&p, commit, i) == 0 && p) { + const git_oid *po = git_commit_id(p); + bare_put_data(writer, po->id, GIT_OID_RAWSZ); + git_commit_free((git_commit *)p); + } else { + uint8_t zero[GIT_OID_RAWSZ] = {0}; + bare_put_data(writer, zero, GIT_OID_RAWSZ); + } + } + + /* Structured diff */ + git_tree *tree = NULL; + if (git_commit_tree(&tree, commit) != 0) { + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + git_diff *diff = NULL; + if (pcnt == 0) { + if (git_diff_tree_to_tree(&diff, repo, NULL, tree, NULL) != 0) { + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + } else { + git_commit *parent = NULL; + git_tree *ptree = NULL; + if (git_commit_parent(&parent, commit, 0) != 0 || git_commit_tree(&ptree, parent) != 0) { + if (parent) git_commit_free(parent); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + if (git_diff_tree_to_tree(&diff, repo, ptree, tree, NULL) != 0) { + git_tree_free(ptree); + git_commit_free(parent); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + git_tree_free(ptree); + git_commit_free(parent); + } + + size_t files = git_diff_num_deltas(diff); + bare_put_uint(writer, (uint64_t)files); + for (size_t i = 0; i < files; i++) { + git_patch *patch = NULL; + if (git_patch_from_diff(&patch, diff, i) != 0) { + /* empty diff */ + bare_put_uint(writer, 0); + bare_put_uint(writer, 0); + bare_put_data(writer, (const uint8_t *)"", 0); + bare_put_data(writer, (const uint8_t *)"", 0); + bare_put_uint(writer, 0); + continue; + } + const git_diff_delta *delta = git_patch_get_delta(patch); + uint32_t from_mode = delta ? delta->old_file.mode : 0; + uint32_t to_mode = delta ? delta->new_file.mode : 0; + const char *from_path = (delta && delta->old_file.path) ? delta->old_file.path : ""; + const char *to_path = (delta && delta->new_file.path) ? delta->new_file.path : ""; + bare_put_uint(writer, (uint64_t)from_mode); + bare_put_uint(writer, (uint64_t)to_mode); + bare_put_data(writer, (const uint8_t *)from_path, strlen(from_path)); + bare_put_data(writer, (const uint8_t *)to_path, strlen(to_path)); + + size_t hunks = git_patch_num_hunks(patch); + uint64_t chunk_count = 0; + for (size_t h = 0; h < hunks; h++) { + const git_diff_hunk *hunk = NULL; + size_t lines = 0; + if (git_patch_get_hunk(&hunk, &lines, patch, h) != 0) continue; + int prev = -2; + for (size_t ln = 0; ln < lines; ln++) { + const git_diff_line *line = NULL; + if (git_patch_get_line_in_hunk(&line, patch, h, ln) != 0 || !line) continue; + int op = 0; + if (line->origin == '+') op = 1; + else if (line->origin == '-') op = 2; + else op = 0; + if (op != prev) { chunk_count++; prev = op; } + } + } + bare_put_uint(writer, chunk_count); + for (size_t h = 0; h < hunks; h++) { + const git_diff_hunk *hunk = NULL; + size_t lines = 0; + if (git_patch_get_hunk(&hunk, &lines, patch, h) != 0) continue; + int prev = -2; + struct { + char *data; + size_t len; + size_t cap; + } buf = {0}; + for (size_t ln = 0; ln < lines; ln++) { + const git_diff_line *line = NULL; + if (git_patch_get_line_in_hunk(&line, patch, h, ln) != 0 || !line) continue; + int op = 0; + if (line->origin == '+') op = 1; + else if (line->origin == '-') op = 2; + else op = 0; + if (prev == -2) prev = op; + if (op != prev) { + bare_put_uint(writer, (uint64_t)prev); + bare_put_data(writer, (const uint8_t *)buf.data, buf.len); + free(buf.data); + buf.data = NULL; buf.len = 0; buf.cap = 0; + prev = op; + } + if (line->content && line->content_len > 0) { + if (append_buf(&buf.data, &buf.len, &buf.cap, line->content, line->content_len) != 0) { + free(buf.data); + git_patch_free(patch); + git_diff_free(diff); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + } + } + if (prev != -2) { + bare_put_uint(writer, (uint64_t)prev); + bare_put_data(writer, (const uint8_t *)buf.data, buf.len); + free(buf.data); + } + } + git_patch_free(patch); + } + + git_diff_free(diff); + git_tree_free(tree); + git_commit_free(commit); + return 0; +} diff --git a/git2d/cmd_diff.c b/git2d/cmd_diff.c new file mode 100644 index 0000000..b32807e --- /dev/null +++ b/git2d/cmd_diff.c @@ -0,0 +1,276 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +static int diff_stats_to_string(git_diff *diff, git_buf *out) +{ + git_diff_stats *stats = NULL; + if (git_diff_get_stats(&stats, diff) != 0) { + return -1; + } + int rc = git_diff_stats_to_buf(out, stats, GIT_DIFF_STATS_FULL, 80); + git_diff_stats_free(stats); + return rc; +} + +static void split_message(const char *message, char **title_out, char **body_out) +{ + *title_out = NULL; + *body_out = NULL; + if (!message) + return; + const char *nl = strchr(message, '\n'); + if (!nl) { + *title_out = strdup(message); + *body_out = strdup(""); + return; + } + size_t title_len = (size_t)(nl - message); + *title_out = (char *)malloc(title_len + 1); + if (*title_out) { + memcpy(*title_out, message, title_len); + (*title_out)[title_len] = '\0'; + } + const char *rest = nl + 1; + if (*rest == '\n') + rest++; + *body_out = strdup(rest); +} + +int cmd_format_patch(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char hex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) hex, sizeof(hex) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid oid; + if (git_oid_fromstr(&oid, hex) != 0) { + bare_put_uint(writer, 14); + return -1; + } + + git_commit *commit = NULL; + if (git_commit_lookup(&commit, repo, &oid) != 0) { + bare_put_uint(writer, 14); + return -1; + } + + git_tree *tree = NULL; + if (git_commit_tree(&tree, commit) != 0) { + git_commit_free(commit); + bare_put_uint(writer, 14); + return -1; + } + + git_diff *diff = NULL; + if (git_commit_parentcount(commit) == 0) { + if (git_diff_tree_to_tree(&diff, repo, NULL, tree, NULL) != 0) { + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + } else { + git_commit *parent = NULL; + git_tree *ptree = NULL; + if (git_commit_parent(&parent, commit, 0) != 0 || git_commit_tree(&ptree, parent) != 0) { + if (parent) + git_commit_free(parent); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + if (git_diff_tree_to_tree(&diff, repo, ptree, tree, NULL) != 0) { + git_tree_free(ptree); + git_commit_free(parent); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + git_tree_free(ptree); + git_commit_free(parent); + } + + git_buf stats = { 0 }; + if (diff_stats_to_string(diff, &stats) != 0) { + git_diff_free(diff); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + + git_buf patch = { 0 }; + if (git_diff_to_buf(&patch, diff, GIT_DIFF_FORMAT_PATCH) != 0) { + git_buf_dispose(&stats); + git_diff_free(diff); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + + const git_signature *author = git_commit_author(commit); + char *title = NULL, *body = NULL; + split_message(git_commit_message(commit), &title, &body); + + char header[2048]; + char timebuf[64]; + { + time_t t = git_commit_time(commit); + struct tm *tm = localtime(&t); + if (tm) + strftime(timebuf, sizeof(timebuf), "%a, %d %b %Y %H:%M:%S %z", tm); + else + strcpy(timebuf, "unknown"); + } + snprintf(header, sizeof(header), "From %s Mon Sep 17 00:00:00 2001\nFrom: %s <%s>\nDate: %s\nSubject: [PATCH] %s\n\n", git_oid_tostr_s(&oid), author && author->name ? author->name : "", author && author->email ? author->email : "", timebuf, title ? title : ""); + + const char *trailer = "\n-- \n2.48.1\n"; + size_t header_len = strlen(header); + size_t body_len = body ? strlen(body) : 0; + size_t trailer_len = strlen(trailer); + size_t total = header_len + body_len + (body_len ? 1 : 0) + 4 + stats.size + 1 + patch.size + trailer_len; + + uint8_t *buf = (uint8_t *) malloc(total); + if (!buf) { + free(title); + free(body); + git_buf_dispose(&patch); + git_buf_dispose(&stats); + git_diff_free(diff); + git_tree_free(tree); + git_commit_free(commit); + bare_put_uint(writer, 15); + return -1; + } + size_t off = 0; + memcpy(buf + off, header, header_len); + off += header_len; + if (body_len) { + memcpy(buf + off, body, body_len); + off += body_len; + buf[off++] = '\n'; + } + memcpy(buf + off, "---\n", 4); + off += 4; + memcpy(buf + off, stats.ptr, stats.size); + off += stats.size; + buf[off++] = '\n'; + memcpy(buf + off, patch.ptr, patch.size); + off += patch.size; + memcpy(buf + off, trailer, trailer_len); + off += trailer_len; + + bare_put_uint(writer, 0); + bare_put_data(writer, buf, off); + + free(buf); + free(title); + free(body); + git_buf_dispose(&patch); + git_buf_dispose(&stats); + git_diff_free(diff); + git_tree_free(tree); + git_commit_free(commit); + return 0; +} + +int cmd_merge_base(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char hex1[64] = { 0 }; + char hex2[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) hex1, sizeof(hex1) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + if (bare_get_data(reader, (uint8_t *) hex2, sizeof(hex2) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid a, b, out; + if (git_oid_fromstr(&a, hex1) != 0 || git_oid_fromstr(&b, hex2) != 0) { + bare_put_uint(writer, 17); + return -1; + } + int rc = git_merge_base(&out, repo, &a, &b); + if (rc == GIT_ENOTFOUND) { + bare_put_uint(writer, 16); + return -1; + } + if (rc != 0) { + bare_put_uint(writer, 17); + return -1; + } + bare_put_uint(writer, 0); + bare_put_data(writer, out.id, GIT_OID_RAWSZ); + return 0; +} + +int cmd_log(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char spec[4096] = { 0 }; + uint64_t limit = 0; + if (bare_get_data(reader, (uint8_t *) spec, sizeof(spec) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + if (bare_get_uint(reader, &limit) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + + git_object *obj = NULL; + if (spec[0] == '\0') + strcpy(spec, "HEAD"); + if (git_revparse_single(&obj, repo, spec) != 0) { + bare_put_uint(writer, 4); + return -1; + } + git_commit *start = (git_commit *) obj; + + git_revwalk *walk = NULL; + if (git_revwalk_new(&walk, repo) != 0) { + git_commit_free(start); + bare_put_uint(writer, 9); + return -1; + } + git_revwalk_sorting(walk, GIT_SORT_TIME); + git_revwalk_push(walk, git_commit_id(start)); + git_commit_free(start); + + bare_put_uint(writer, 0); + git_oid oid; + uint64_t count = 0; + while ((limit == 0 || count < limit) + && git_revwalk_next(&oid, walk) == 0) { + git_commit *c = NULL; + if (git_commit_lookup(&c, repo, &oid) != 0) + break; + const char *msg = git_commit_summary(c); + const git_signature *author = git_commit_author(c); + time_t t = git_commit_time(c); + char timebuf[64]; + struct tm *tm = localtime(&t); + if (tm) + strftime(timebuf, sizeof(timebuf), "%Y-%m-%d %H:%M:%S", tm); + else + strcpy(timebuf, "unknown"); + + bare_put_data(writer, oid.id, GIT_OID_RAWSZ); + bare_put_data(writer, (const uint8_t *)(msg ? msg : ""), msg ? strlen(msg) : 0); + bare_put_data(writer, (const uint8_t *)(author && author->name ? author->name : ""), author && author->name ? strlen(author->name) : 0); + bare_put_data(writer, (const uint8_t *)(author && author->email ? author->email : ""), author && author->email ? strlen(author->email) : 0); + bare_put_data(writer, (const uint8_t *)timebuf, strlen(timebuf)); + git_commit_free(c); + count++; + } + git_revwalk_free(walk); + return 0; +} diff --git a/git2d/cmd_init.c b/git2d/cmd_init.c new file mode 100644 index 0000000..962d229 --- /dev/null +++ b/git2d/cmd_init.c @@ -0,0 +1,65 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +int cmd_init_repo(const char *path, struct bare_reader *reader, struct bare_writer *writer) +{ + char hooks[4096] = { 0 }; + if (bare_get_data(reader, (uint8_t *) hooks, sizeof(hooks) - 1) != BARE_ERROR_NONE) { + fprintf(stderr, "init_repo: protocol error reading hooks for path '%s'\n", path); + bare_put_uint(writer, 11); + return -1; + } + + fprintf(stderr, "init_repo: starting for path='%s' hooks='%s'\n", path, hooks); + + if (mkdir(path, 0700) != 0 && errno != EEXIST) { + fprintf(stderr, "init_repo: mkdir failed for '%s': %s\n", path, strerror(errno)); + bare_put_uint(writer, 24); + return -1; + } + + git_repository *repo = NULL; + git_repository_init_options opts; + git_repository_init_options_init(&opts, GIT_REPOSITORY_INIT_OPTIONS_VERSION); + opts.flags = GIT_REPOSITORY_INIT_BARE; + if (git_repository_init_ext(&repo, path, &opts) != 0) { + const git_error *ge = git_error_last(); + fprintf(stderr, "init_repo: git_repository_init_ext failed: %s (klass=%d)\n", ge && ge->message ? ge->message : "(no message)", ge ? ge->klass : 0); + bare_put_uint(writer, 20); + return -1; + } + git_config *cfg = NULL; + if (git_repository_config(&cfg, repo) != 0) { + git_repository_free(repo); + const git_error *ge = git_error_last(); + fprintf(stderr, "init_repo: open config failed: %s (klass=%d)\n", ge && ge->message ? ge->message : "(no message)", ge ? ge->klass : 0); + bare_put_uint(writer, 21); + return -1; + } + if (git_config_set_string(cfg, "core.hooksPath", hooks) != 0) { + git_config_free(cfg); + git_repository_free(repo); + const git_error *ge = git_error_last(); + fprintf(stderr, "init_repo: set hooksPath failed: %s (klass=%d) hooks='%s'\n", ge && ge->message ? ge->message : "(no message)", ge ? ge->klass : 0, hooks); + bare_put_uint(writer, 22); + return -1; + } + if (git_config_set_bool(cfg, "receive.advertisePushOptions", 1) != 0) { + git_config_free(cfg); + git_repository_free(repo); + const git_error *ge = git_error_last(); + fprintf(stderr, "init_repo: set advertisePushOptions failed: %s (klass=%d)\n", ge && ge->message ? ge->message : "(no message)", ge ? ge->klass : 0); + bare_put_uint(writer, 23); + return -1; + } + git_config_free(cfg); + + git_repository_free(repo); + fprintf(stderr, "init_repo: success for path='%s'\n", path); + bare_put_uint(writer, 0); + return 0; +} diff --git a/git2d/cmd_ref.c b/git2d/cmd_ref.c new file mode 100644 index 0000000..f4bae4a --- /dev/null +++ b/git2d/cmd_ref.c @@ -0,0 +1,113 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +static int write_oid(struct bare_writer *writer, const git_oid *oid) +{ + return bare_put_data(writer, oid->id, GIT_OID_RAWSZ) == BARE_ERROR_NONE ? 0 : -1; +} + +int cmd_resolve_ref(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char type[32] = { 0 }; + char name[4096] = { 0 }; + if (bare_get_data(reader, (uint8_t *) type, sizeof(type) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + if (bare_get_data(reader, (uint8_t *) name, sizeof(name) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + + git_oid oid = { 0 }; + int err = 0; + + if (type[0] == '\0') { + git_object *obj = NULL; + err = git_revparse_single(&obj, repo, "HEAD^{commit}"); + if (err != 0) { + bare_put_uint(writer, 12); + return -1; + } + git_commit *c = (git_commit *) obj; + git_oid_cpy(&oid, git_commit_id(c)); + git_commit_free(c); + } else if (strcmp(type, "commit") == 0) { + err = git_oid_fromstr(&oid, name); + if (err != 0) { + bare_put_uint(writer, 12); + return -1; + } + } else if (strcmp(type, "branch") == 0) { + char fullref[4608]; + snprintf(fullref, sizeof(fullref), "refs/heads/%s", name); + git_object *obj = NULL; + err = git_revparse_single(&obj, repo, fullref); + if (err != 0) { + bare_put_uint(writer, 12); + return -1; + } + git_commit *c = (git_commit *) obj; + git_oid_cpy(&oid, git_commit_id(c)); + git_commit_free(c); + } else if (strcmp(type, "tag") == 0) { + char spec[4608]; + snprintf(spec, sizeof(spec), "refs/tags/%s^{commit}", name); + git_object *obj = NULL; + err = git_revparse_single(&obj, repo, spec); + if (err != 0) { + bare_put_uint(writer, 12); + return -1; + } + git_commit *c = (git_commit *) obj; + git_oid_cpy(&oid, git_commit_id(c)); + git_commit_free(c); + } else { + bare_put_uint(writer, 12); + return -1; + } + + bare_put_uint(writer, 0); + return write_oid(writer, &oid); +} + +int cmd_list_branches(git_repository *repo, struct bare_writer *writer) +{ + git_branch_iterator *it = NULL; + int err = git_branch_iterator_new(&it, repo, GIT_BRANCH_LOCAL); + if (err != 0) { + bare_put_uint(writer, 13); + return -1; + } + size_t count = 0; + git_reference *ref; + git_branch_t type; + while (git_branch_next(&ref, &type, it) == 0) { + count++; + git_reference_free(ref); + } + git_branch_iterator_free(it); + + err = git_branch_iterator_new(&it, repo, GIT_BRANCH_LOCAL); + if (err != 0) { + bare_put_uint(writer, 13); + return -1; + } + + bare_put_uint(writer, 0); + bare_put_uint(writer, count); + while (git_branch_next(&ref, &type, it) == 0) { + const char *name = NULL; + git_branch_name(&name, ref); + if (name == NULL) + name = ""; + bare_put_data(writer, (const uint8_t *)name, strlen(name)); + git_reference_free(ref); + } + git_branch_iterator_free(it); + return 0; +} diff --git a/git2d/cmd_tree.c b/git2d/cmd_tree.c new file mode 100644 index 0000000..d18e817 --- /dev/null +++ b/git2d/cmd_tree.c @@ -0,0 +1,120 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +int cmd_tree_list_by_oid(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + char hex[64] = { 0 }; + if (bare_get_data(reader, (uint8_t *) hex, sizeof(hex) - 1) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_oid oid; + if (git_oid_fromstr(&oid, hex) != 0) { + bare_put_uint(writer, 4); + return -1; + } + git_tree *tree = NULL; + if (git_tree_lookup(&tree, repo, &oid) != 0) { + bare_put_uint(writer, 4); + return -1; + } + size_t count = git_tree_entrycount(tree); + bare_put_uint(writer, 0); + bare_put_uint(writer, count); + for (size_t i = 0; i < count; i++) { + const git_tree_entry *e = git_tree_entry_byindex(tree, i); + const char *name = git_tree_entry_name(e); + uint32_t mode = git_tree_entry_filemode(e); + const git_oid *id = git_tree_entry_id(e); + bare_put_uint(writer, mode); + bare_put_data(writer, (const uint8_t *)name, strlen(name)); + bare_put_data(writer, id->id, GIT_OID_RAWSZ); + } + git_tree_free(tree); + return 0; +} + +int cmd_write_tree(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + uint64_t count = 0; + if (bare_get_uint(reader, &count) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + git_treebuilder *bld = NULL; + if (git_treebuilder_new(&bld, repo, NULL) != 0) { + bare_put_uint(writer, 15); + return -1; + } + for (uint64_t i = 0; i < count; i++) { + uint64_t mode = 0; + if (bare_get_uint(reader, &mode) != BARE_ERROR_NONE) { + git_treebuilder_free(bld); + bare_put_uint(writer, 11); + return -1; + } + char name[4096] = { 0 }; + if (bare_get_data(reader, (uint8_t *) name, sizeof(name) - 1) != BARE_ERROR_NONE) { + git_treebuilder_free(bld); + bare_put_uint(writer, 11); + return -1; + } + uint8_t idraw[GIT_OID_RAWSZ] = { 0 }; + if (bare_get_fixed_data(reader, idraw, GIT_OID_RAWSZ) != BARE_ERROR_NONE) { + git_treebuilder_free(bld); + bare_put_uint(writer, 11); + return -1; + } + git_oid id; + memcpy(id.id, idraw, GIT_OID_RAWSZ); + git_filemode_t fm = (git_filemode_t) mode; + if (git_treebuilder_insert(NULL, bld, name, &id, fm) != 0) { + git_treebuilder_free(bld); + bare_put_uint(writer, 15); + return -1; + } + } + git_oid out; + if (git_treebuilder_write(&out, bld) != 0) { + git_treebuilder_free(bld); + bare_put_uint(writer, 15); + return -1; + } + git_treebuilder_free(bld); + bare_put_uint(writer, 0); + bare_put_data(writer, out.id, GIT_OID_RAWSZ); + return 0; +} + +int cmd_blob_write(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer) +{ + uint64_t sz = 0; + if (bare_get_uint(reader, &sz) != BARE_ERROR_NONE) { + bare_put_uint(writer, 11); + return -1; + } + uint8_t *data = (uint8_t *) malloc(sz); + if (!data) { + bare_put_uint(writer, 15); + return -1; + } + if (bare_get_fixed_data(reader, data, sz) != BARE_ERROR_NONE) { + free(data); + bare_put_uint(writer, 11); + return -1; + } + git_oid oid; + if (git_blob_create_frombuffer(&oid, repo, data, sz) != 0) { + free(data); + bare_put_uint(writer, 15); + return -1; + } + free(data); + bare_put_uint(writer, 0); + bare_put_data(writer, oid.id, GIT_OID_RAWSZ); + return 0; +} diff --git a/git2d/main.c b/git2d/main.c new file mode 100644 index 0000000..8518960 --- /dev/null +++ b/git2d/main.c @@ -0,0 +1,81 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +/* + * TODO: Pool repositories (and take care of thread safety) + * libgit2 has a nice builtin per-repo cache that we could utilize this way. + */ + +#include "x.h" + +int main(int argc, char **argv) +{ + if (argc != 2) { + errx(1, "provide one argument: the socket path"); + } + + signal(SIGPIPE, SIG_IGN); + + git_libgit2_init(); + + int sock; + if ((sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0)) < 0) + err(1, "socket"); + + struct sockaddr_un addr; + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strcpy(addr.sun_path, argv[1]); + + umask(0077); + + if (bind(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_un))) { + if (errno == EADDRINUSE) { + unlink(argv[1]); + if (bind(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_un))) + err(1, "bind"); + } else { + err(1, "bind"); + } + } + + listen(sock, 128); + + pthread_attr_t pthread_attr; + + if (pthread_attr_init(&pthread_attr) != 0) + err(1, "pthread_attr_init"); + + if (pthread_attr_setdetachstate(&pthread_attr, PTHREAD_CREATE_DETACHED) + != 0) + err(1, "pthread_attr_setdetachstate"); + + for (;;) { + int *conn = malloc(sizeof(int)); + if (conn == NULL) { + warn("malloc"); + continue; + } + + *conn = accept(sock, 0, 0); + if (*conn == -1) { + free(conn); + warn("accept"); + continue; + } + + pthread_t thread; + + if (pthread_create(&thread, &pthread_attr, session, (void *)conn) != 0) { + close(*conn); + free(conn); + warn("pthread_create"); + } + } + + close(sock); + + git_libgit2_shutdown(); +} diff --git a/git2d/rw.c b/git2d/rw.c new file mode 100644 index 0000000..09398c2 --- /dev/null +++ b/git2d/rw.c @@ -0,0 +1,34 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +bare_error conn_read(void *buffer, void *dst, uint64_t sz) +{ + conn_io_t *io = buffer; + ssize_t rsz = read(io->fd, dst, sz); + return (rsz == (ssize_t) sz) ? BARE_ERROR_NONE : BARE_ERROR_READ_FAILED; +} + +bare_error conn_write(void *buffer, const void *src, uint64_t sz) +{ + conn_io_t *io = buffer; + const uint8_t *data = src; + uint64_t total = 0; + + while (total < sz) { + ssize_t written = write(io->fd, data + total, sz - total); + if (written < 0) { + if (errno == EINTR) + continue; + return BARE_ERROR_WRITE_FAILED; + } + if (written == 0) + break; + total += written; + } + + return (total == sz) ? BARE_ERROR_NONE : BARE_ERROR_WRITE_FAILED; +} diff --git a/git2d/session.c b/git2d/session.c new file mode 100644 index 0000000..c757640 --- /dev/null +++ b/git2d/session.c @@ -0,0 +1,143 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#include "x.h" + +void *session(void *_conn) +{ + int conn = *(int *)_conn; + free((int *)_conn); + + int err; + + conn_io_t io = {.fd = conn }; + struct bare_reader reader = { + .buffer = &io, + .read = conn_read, + }; + struct bare_writer writer = { + .buffer = &io, + .write = conn_write, + }; + + /* Repo path */ + char path[4096] = { 0 }; + err = bare_get_data(&reader, (uint8_t *) path, sizeof(path) - 1); + if (err != BARE_ERROR_NONE) { + goto close; + } + path[sizeof(path) - 1] = '\0'; + fprintf(stderr, "session: path='%s'\n", path); + + /* Command */ + uint64_t cmd = 0; + err = bare_get_uint(&reader, &cmd); + if (err != BARE_ERROR_NONE) { + bare_put_uint(&writer, 2); + goto close; + } + fprintf(stderr, "session: cmd=%llu\n", (unsigned long long)cmd); + + /* Repo init does not require opening an existing repo so let's just do it here */ + if (cmd == 15) { + fprintf(stderr, "session: handling init for '%s'\n", path); + if (cmd_init_repo(path, &reader, &writer) != 0) { + } + goto close; + } + + git_repository *repo = NULL; + err = git_repository_open_ext(&repo, path, GIT_REPOSITORY_OPEN_NO_SEARCH | GIT_REPOSITORY_OPEN_BARE | GIT_REPOSITORY_OPEN_NO_DOTGIT, NULL); + if (err != 0) { + bare_put_uint(&writer, 1); + goto close; + } + switch (cmd) { + case 1: + err = cmd_index(repo, &writer); + if (err != 0) + goto free_repo; + break; + case 2: + err = cmd_treeraw(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 3: + err = cmd_resolve_ref(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 4: + err = cmd_list_branches(repo, &writer); + if (err != 0) + goto free_repo; + break; + case 5: + err = cmd_format_patch(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; +case 6: + err = cmd_commit_info(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 7: + err = cmd_merge_base(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 8: + err = cmd_log(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 9: + err = cmd_tree_list_by_oid(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 10: + err = cmd_write_tree(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 11: + err = cmd_blob_write(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 12: + err = cmd_commit_tree_oid(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 13: + err = cmd_commit_create(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 14: + err = cmd_update_ref(repo, &reader, &writer); + if (err != 0) + goto free_repo; + break; + case 0: + bare_put_uint(&writer, 3); + goto free_repo; + default: + bare_put_uint(&writer, 3); + goto free_repo; + } + + free_repo: + git_repository_free(repo); + + close: + close(conn); + + return NULL; +} diff --git a/git2d/x.h b/git2d/x.h new file mode 100644 index 0000000..972e60b --- /dev/null +++ b/git2d/x.h @@ -0,0 +1,55 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + */ + +#ifndef X_H +#define X_H + +#include <err.h> +#include <errno.h> +#include <git2.h> +#include <git2/buffer.h> +#include <pthread.h> +#include <signal.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/un.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include "bare.h" + +typedef struct { + int fd; +} conn_io_t; + +bare_error conn_read(void *buffer, void *dst, uint64_t sz); +bare_error conn_write(void *buffer, const void *src, uint64_t sz); + +void *session(void *_conn); + +int cmd_index(git_repository * repo, struct bare_writer *writer); +int cmd_treeraw(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); + +int cmd_resolve_ref(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_list_branches(git_repository * repo, struct bare_writer *writer); +int cmd_format_patch(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_merge_base(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_log(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); + +int cmd_tree_list_by_oid(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_write_tree(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_blob_write(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); + +int cmd_commit_tree_oid(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_commit_create(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_update_ref(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); +int cmd_commit_info(git_repository * repo, struct bare_reader *reader, struct bare_writer *writer); + +int cmd_init_repo(const char *path, struct bare_reader *reader, struct bare_writer *writer); + +#endif // X_H diff --git a/global.ha b/global.ha deleted file mode 100644 index ac5ac14..0000000 --- a/global.ha +++ /dev/null @@ -1,11 +0,0 @@ -let global: struct { - title: str, - version: str, - ssh_pubkey: str, - ssh_fp: str, -} = struct { - title: str = "Test Forge", - version: str = VERSION, - ssh_pubkey: str = "pubkey", - ssh_fp: str = "fp", -}; @@ -0,0 +1,21 @@ +module go.lindenii.runxiyu.org/forge + +go 1.24.1 + +require ( + github.com/gliderlabs/ssh v0.3.8 + github.com/jackc/pgx/v5 v5.7.5 + github.com/yuin/goldmark v1.7.13 + golang.org/x/crypto v0.41.0 + golang.org/x/sync v0.16.0 +) + +require ( + github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/stretchr/testify v1.10.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect +) @@ -0,0 +1,38 @@ +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= +github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/hookc/hookc.c b/hookc/hookc.c new file mode 100644 index 0000000..15a36e3 --- /dev/null +++ b/hookc/hookc.c @@ -0,0 +1,310 @@ +/*- + * SPDX-License-Identifier: AGPL-3.0-only + * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> + * SPDX-FileCopyrightText: Copyright (c) 2025 Test_User <hax@runxiyu.org> + */ + +#include <errno.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/socket.h> +#include <sys/un.h> +#include <sys/stat.h> +#include <string.h> +#include <fcntl.h> +#include <signal.h> +#ifdef __linux__ +#include <linux/limits.h> +#include <sys/sendfile.h> +#define USE_SPLICE 1 +#else +#define USE_SPLICE 0 +#endif + +int +main(int argc, char *argv[]) +{ + if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) { + perror("signal"); + return EXIT_FAILURE; + } + + const char *socket_path = getenv("LINDENII_FORGE_HOOKS_SOCKET_PATH"); + if (socket_path == NULL) { + dprintf(STDERR_FILENO, "environment variable LINDENII_FORGE_HOOKS_SOCKET_PATH undefined\n"); + return EXIT_FAILURE; + } + + const char *cookie = getenv("LINDENII_FORGE_HOOKS_COOKIE"); + if (cookie == NULL) { + dprintf(STDERR_FILENO, "environment variable LINDENII_FORGE_HOOKS_COOKIE undefined\n"); + return EXIT_FAILURE; + } + if (strlen(cookie) != 64) { + dprintf(STDERR_FILENO, "environment variable LINDENII_FORGE_HOOKS_COOKIE is not 64 characters long\n"); + return EXIT_FAILURE; + } + + /* + * All hooks in git (see builtin/receive-pack.c) use a pipe by + * setting .in = -1 on the child_process struct, which enables us to + * use splice(2) to move the data to the UNIX domain socket. + */ + + struct stat stdin_stat; + if (fstat(STDIN_FILENO, &stdin_stat) == -1) { + perror("fstat on stdin"); + return EXIT_FAILURE; + } + + if (!S_ISFIFO(stdin_stat.st_mode)) { + dprintf(STDERR_FILENO, "stdin must be a pipe\n"); + return EXIT_FAILURE; + } + +#if USE_SPLICE + int stdin_pipe_size = fcntl(STDIN_FILENO, F_GETPIPE_SZ); + if (stdin_pipe_size == -1) { + perror("fcntl on stdin"); + return EXIT_FAILURE; + } +#else + int stdin_pipe_size = 65536; +#endif + + if (stdin_pipe_size == -1) { + perror("fcntl on stdin"); + return EXIT_FAILURE; + } + + /* + * Same for stderr. + */ + struct stat stderr_stat; + if (fstat(STDERR_FILENO, &stderr_stat) == -1) { + perror("fstat on stderr"); + return EXIT_FAILURE; + } + if (!S_ISFIFO(stderr_stat.st_mode)) { + dprintf(STDERR_FILENO, "stderr must be a pipe\n"); + return EXIT_FAILURE; + } + +#if USE_SPLICE + int stderr_pipe_size = fcntl(STDERR_FILENO, F_GETPIPE_SZ); + if (stderr_pipe_size == -1) { + perror("fcntl on stderr"); + return EXIT_FAILURE; + } +#else + int stderr_pipe_size = 65536; +#endif + + if (stderr_pipe_size == -1) { + perror("fcntl on stderr"); + return EXIT_FAILURE; + } + + /* Connecting back to the main daemon */ + int sock; + struct sockaddr_un addr; + sock = socket(AF_UNIX, SOCK_STREAM, 0); + if (sock == -1) { + perror("internal socket creation"); + return EXIT_FAILURE; + } + memset(&addr, 0, sizeof(struct sockaddr_un)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path) - 1); + if (connect(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_un)) == -1) { + perror("internal socket connect"); + close(sock); + return EXIT_FAILURE; + } + + /* + * Send the 64-byte cookit back. + */ + ssize_t cookie_bytes_sent = send(sock, cookie, 64, 0); + switch (cookie_bytes_sent) { + case -1: + perror("send cookie"); + close(sock); + return EXIT_FAILURE; + case 64: + break; + default: + dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n"); + close(sock); + return EXIT_FAILURE; + } + + /* + * Report arguments. + */ + uint64_t argc64 = (uint64_t) argc; + ssize_t bytes_sent = send(sock, &argc64, sizeof(argc64), 0); + switch (bytes_sent) { + case -1: + perror("send argc"); + close(sock); + return EXIT_FAILURE; + case sizeof(argc64): + break; + default: + dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n"); + close(sock); + return EXIT_FAILURE; + } + for (int i = 0; i < argc; i++) { + unsigned long len = strlen(argv[i]) + 1; + bytes_sent = send(sock, argv[i], len, 0); + if (bytes_sent == -1) { + perror("send argv"); + close(sock); + exit(EXIT_FAILURE); + } else if ((unsigned long)bytes_sent == len) { + } else { + dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n"); + close(sock); + exit(EXIT_FAILURE); + } + } + + /* + * Report GIT_* environment. + */ + extern char **environ; + for (char **env = environ; *env != NULL; env++) { + if (strncmp(*env, "GIT_", 4) == 0) { + unsigned long len = strlen(*env) + 1; + bytes_sent = send(sock, *env, len, 0); + if (bytes_sent == -1) { + perror("send env"); + close(sock); + exit(EXIT_FAILURE); + } else if ((unsigned long)bytes_sent == len) { + } else { + dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n"); + close(sock); + exit(EXIT_FAILURE); + } + } + } + bytes_sent = send(sock, "", 1, 0); + if (bytes_sent == -1) { + perror("send env terminator"); + close(sock); + exit(EXIT_FAILURE); + } else if (bytes_sent == 1) { + } else { + dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n"); + close(sock); + exit(EXIT_FAILURE); + } + + /* + * Splice stdin to the daemon. For pre-receive it's just old/new/ref. + */ +#if USE_SPLICE + ssize_t stdin_bytes_spliced; + while ((stdin_bytes_spliced = splice(STDIN_FILENO, NULL, sock, NULL, stdin_pipe_size, SPLICE_F_MORE)) > 0) { + } + if (stdin_bytes_spliced == -1) { + perror("splice stdin to internal socket"); + close(sock); + return EXIT_FAILURE; + } +#else + char buf[65536]; + ssize_t n; + while ((n = read(STDIN_FILENO, buf, sizeof(buf))) > 0) { + if (write(sock, buf, n) != n) { + perror("write to internal socket"); + close(sock); + return EXIT_FAILURE; + } + } + if (n < 0) { + perror("read from stdin"); + close(sock); + return EXIT_FAILURE; + } +#endif + + /* + * The sending part of the UNIX socket should be shut down, to let + * io.Copy on the Go side return. + */ + if (shutdown(sock, SHUT_WR) == -1) { + perror("shutdown internal socket"); + close(sock); + return EXIT_FAILURE; + } + + /* + * The first byte of the response from the UNIX domain socket is the + * status code to return. + * + * FIXME: It doesn't make sense to require the return value to be + * sent before the log message. However, if we were to keep splicing, + * it's difficult to get the last byte before EOF. Perhaps we could + * hack together some sort of OOB message or ancillary data, or + * perhaps even use signals. + */ + char status_buf[1]; + ssize_t bytes_read = read(sock, status_buf, 1); + switch (bytes_read) { + case -1: + perror("read status code from internal socket"); + close(sock); + return EXIT_FAILURE; + case 0: + dprintf(STDERR_FILENO, "unexpected EOF on internal socket\n"); + close(sock); + return EXIT_FAILURE; + case 1: + break; + default: + dprintf(STDERR_FILENO, "read returned unexpected value on internal socket\n"); + close(sock); + return EXIT_FAILURE; + } + + /* + * Now we can splice data from the UNIX domain socket to stderr. This + * data is directly passed to the user (with "remote: " prepended). + * + * We usually don't actually use this as the daemon could easily + * write to the SSH connection's stderr directly anyway. + */ + +#if USE_SPLICE + ssize_t stderr_bytes_spliced; + while ((stderr_bytes_spliced = splice(sock, NULL, STDERR_FILENO, NULL, stderr_pipe_size, SPLICE_F_MORE)) > 0) { + } + if (stderr_bytes_spliced == -1 && errno != ECONNRESET) { + perror("splice internal socket to stderr"); + close(sock); + return EXIT_FAILURE; + } +#else + while ((n = read(sock, buf, sizeof(buf))) > 0) { + if (write(STDERR_FILENO, buf, n) != n) { + perror("write to stderr"); + close(sock); + return EXIT_FAILURE; + } + } + if (n < 0 && errno != ECONNRESET) { + perror("read from internal socket"); + close(sock); + return EXIT_FAILURE; + } +#endif + + close(sock); + return *status_buf; +} diff --git a/main.ha b/main.ha deleted file mode 100644 index d133c7b..0000000 --- a/main.ha +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> -// Adapted from template by Willow Barraco <contact@willowbarraco.fr> - -use fs; -use getopt; -use log; -use net; -use net::dial; -use net::http; -use net::ip; -use net::tcp; -use net::uri; -use os; -use memio; -use io; -use fmt; -use bufio; -use unix::signal; - -const usage: [_]getopt::help = [ - "Lindenii Forge Server", - ('c', "config", "path to configuration file") -]; - -let static_fs: nullable *fs::fs = null; - -let running: bool = true; - -export fn sigint_handler(sig: signal::sig, info: *signal::siginfo, ucontext: *opaque) void = { - running = false; -}; - -export fn main() void = { - signal::handle(signal::sig::INT, &sigint_handler, signal::flag::NONE, null); - - const cmd = getopt::parse(os::args, usage...); - defer getopt::finish(&cmd); - - let port: u16 = 8080; - let ip_addr: ip::addr4 = [127, 0, 0, 1]; - - for (let opt .. cmd.opts) { - switch (opt.0) { - case 'c' => yield; // TODO: actually handle the config - case => abort("unreachable"); - }; - }; - - static_fs = os::diropen("static")!; - defer fs::close(static_fs as *fs::fs); - - const server = match (http::listen(ip_addr, port, net::tcp::reuseport, net::tcp::reuseaddr)) { - case let this: *http::server => - yield this; - case => abort("failure while listening"); - }; - defer http::server_finish(server); - - for (running) { - const serv_req = match (http::serve(server)) { - case let this: *http::server_request => - yield this; - case => - log::println("failure while serving"); - continue; - }; - defer http::serve_finish(serv_req); - - match (handlereq(serv_req.socket, &serv_req.request)) { - case void => yield; - case => log::println("error while handling request"); - }; - }; -}; @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> - -use fmt; -use fs; -use htmpl; -use io; -use mime; -use net::http; -use net::uri; -use strconv; -use strings; - -fn handlereq(conn: io::handle, request: *http::request) (void | io::error | nomem | fs::error) = { - let segments = match(segments_from_path(request.target.raw_path)) { - case let s: []str => - yield s; - case uri::invalid => - start_response(conn, 400, "text/plain")?; - fmt::fprintln(conn, "Invalid URI")?; - return void; - case nomem => - return nomem; - case => - abort("unreachable"); - }; - defer strings::freeall(segments); - - let trailing_slash: bool = false; - - if (segments[len(segments) - 1] == "") { - trailing_slash = true; - free(segments[len(segments) - 1]); - segments = segments[.. len(segments) - 1]; - }; - - if (len(segments) == 0) { - start_response(conn, 200, "text/html")?; - return tp_index(conn); - }; - - if (segments[0] == ":") { - if (len(segments) == 1) { - start_response(conn, 404, "text/plain")?; - fmt::fprintln(conn, "Error: Blank system endpoint")?; - return; - }; - - switch (segments[1]) { - case "static" => - if (len(segments) == 2) { - start_response(conn, 404, "text/plain")?; - fmt::fprintln(conn, "Error: Blank static endpoint")?; - return; - }; - - let fs_segments = segments[2 ..]; - for (let fs_segment .. fs_segments) { - if (strings::contains(fs_segment, "/")) { - start_response(conn, 400, "text/plain")?; - fmt::fprintln(conn, "Error: Slash found in filesystem path")?; - return; - }; - }; - let fs_segment_path = strings::join("/", fs_segments...)?; - defer free(fs_segment_path); - - let file = match (fs::open(static_fs as *fs::fs, fs_segment_path)) { - case let f: io::handle => yield f; - case fs::error => - start_response(conn, 500, "text/plain")?; - fmt::fprintln(conn, "Filesystem error")?; - return; - }; - defer io::close(file)!; - - let ext = strings::rcut(fs_segments[len(fs_segments) - 1], ".").1; - - let mimetype = match (mime::lookup_ext(ext)) { - case let m: *mime::mimetype => yield m.mime; - case null => yield "application/octet-stream"; - }; - - start_response(conn, 200, mimetype)?; - io::copy(conn, file)?; - - case => - start_response(conn, 404, "text/plain")?; - fmt::fprintln(conn, "Error: Unknown system endpoint")?; - }; - }; -}; - -fn start_response(conn: io::handle, status: uint, content_type: str) (void | io::error | nomem) = { // TODO: add len and other headers - fmt::fprint(conn, "HTTP/1.1 ")?; - fmt::fprint(conn, strconv::utos(status))?; - fmt::fprint(conn, " ")?; - fmt::fprint(conn, http::status_reason(status))?; - fmt::fprint(conn, "\r\n")?; - fmt::fprint(conn, "Content-Type: ")?; - fmt::fprint(conn, content_type)?; - fmt::fprint(conn, "\r\n")?; - fmt::fprint(conn, "\r\n")?; -}; diff --git a/templates/_footer.htmpl b/templates/_footer.htmpl deleted file mode 100644 index 71d5318..0000000 --- a/templates/_footer.htmpl +++ /dev/null @@ -1,9 +0,0 @@ -{{ define _tp_footer(handle: io::handle) (void | io::error | nomem) }} -<a href="https://lindenii.runxiyu.org/forge/">Lindenii Forge</a> -{{ " " }} -{{ global.version }} -{{ " " }} -(<a href="/:/source/">source</a>, -{{ " " }} -<a href="https://forge.lindenii.runxiyu.org/lindenii/forge/:/repos/server/">upstream</a>) -{{ end }} diff --git a/templates/_head_common.htmpl b/templates/_head_common.htmpl deleted file mode 100644 index bc14cb6..0000000 --- a/templates/_head_common.htmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{ define _tp_head_common(handle: io::handle) (void | io::error | nomem) }} -<meta charset="utf-8" /> -<meta name="viewport" content="width=device-width, initial-scale=1" /> -<link rel="stylesheet" href="/:/static/style.css" /> -{{ end }} diff --git a/templates/_header.htmpl b/templates/_header.htmpl deleted file mode 100644 index 2eb8d19..0000000 --- a/templates/_header.htmpl +++ /dev/null @@ -1,14 +0,0 @@ -{{ define _tp_header(handle: io::handle, user_id_str: str, username: str) (void | io::error | nomem) }} -<header id="main-header"> - <div id="main-header-forge-title"> - <a href="/">{{ global.title }}</a> - </div> - <div id="main-header-user"> - {{ if user_id_str != "" }} - <a href="/:/users/{{ user_id_str }}">{{ username }}</a> - {{ else }} - <a href="/:/login/">Login</a> - {{ end }} - </div> -</header> -{{ end }} diff --git a/templates/index.htmpl b/templates/index.htmpl deleted file mode 100644 index e67cc09..0000000 --- a/templates/index.htmpl +++ /dev/null @@ -1,50 +0,0 @@ -{{ define tp_index(handle: io::handle) (void | io::error | nomem) }} -<!DOCTYPE html> -<html lang="en"> -<head> -{{ render _tp_head_common(handle) }} -<title>Index – {{ global.title }}</title> -</head> -<body> -{{ render _tp_header(handle, "test", "test") }} -<div class="padding-wrapper"> -<table class="wide rounded"> - <thead> - <tr> - <th colspan="2" class="title-row">Groups</th> - </tr> - <tr> - <th scope="col">Name</th> - <th scope="col">Description</th> - </tr> - </thead> - <tbody> - </tbody> -</table> -<div class="padding-wrapper"> - <table class="wide rounded"> - <thead> - <tr> - <th colspan="2" class="title-row"> - Info - </th> - </tr> - </thead> - <tbody> - <tr> - <th scope="row">SSH public key</th> - <td><code>{{ global.ssh_pubkey }}</code></td> - </tr> - <tr> - <th scope="row">SSH fingerprint</th> - <td><code>{{ global.ssh_fp }}</code></td> - </tr> - </tbody> - </table> -</div> -<footer> - {{ render _tp_footer(handle) }} -</footer> -</body> -</html> -{{ end }} @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> - -use strings; -use net::uri; - -// The result, if not erroring out, must be freed with strings::freeall. -fn segments_from_path(s: str) ([]str | nomem | uri::invalid) = { - let sp: []str = strings::split(s, "/")?; - for (let i = 1z; i < len(sp); i += 1) { - match (uri::percent_decode(sp[i])) { - case let s: str => - sp[i - 1] = s; - case uri::invalid => - strings::freeall(sp[.. i - 1]); - return uri::invalid; - }; - }; - return sp[.. len(sp) - 1]; -}; |