From 5717faed659a9eeb86c528ab56822c42eca1ad3f Mon Sep 17 00:00:00 2001 From: Runxi Yu Date: Tue, 12 Aug 2025 11:01:07 +0800 Subject: Refactor --- forged/.golangci.yaml | 42 +- forged/internal/ansiec/ansiec.go | 5 - forged/internal/ansiec/colors.go | 26 - forged/internal/ansiec/reset.go | 6 - forged/internal/ansiec/style.go | 11 - forged/internal/argon2id/argon2id.go | 185 ------- forged/internal/bare/errors.go | 20 - forged/internal/bare/limit.go | 58 --- forged/internal/bare/marshal.go | 311 ------------ forged/internal/bare/package.go | 8 - forged/internal/bare/reader.go | 190 -------- forged/internal/bare/unions.go | 79 --- forged/internal/bare/unmarshal.go | 362 -------------- forged/internal/bare/varint.go | 30 -- forged/internal/bare/writer.go | 121 ----- forged/internal/cmap/comparable_map.go | 539 --------------------- forged/internal/cmap/map.go | 446 ----------------- forged/internal/common/ansiec/colors.go | 24 + forged/internal/common/ansiec/doc.go | 5 + forged/internal/common/ansiec/reset.go | 7 + forged/internal/common/ansiec/style.go | 12 + forged/internal/common/argon2id/LICENSE | 18 + forged/internal/common/argon2id/argon2id.go | 185 +++++++ forged/internal/common/bare/LICENSE | 203 ++++++++ forged/internal/common/bare/doc.go | 8 + forged/internal/common/bare/errors.go | 20 + forged/internal/common/bare/limit.go | 58 +++ forged/internal/common/bare/marshal.go | 311 ++++++++++++ forged/internal/common/bare/reader.go | 190 ++++++++ forged/internal/common/bare/unions.go | 81 ++++ forged/internal/common/bare/unmarshal.go | 362 ++++++++++++++ forged/internal/common/bare/varint.go | 30 ++ forged/internal/common/bare/writer.go | 121 +++++ forged/internal/common/cmap/LICENSE | 22 + forged/internal/common/cmap/comparable_map.go | 539 +++++++++++++++++++++ forged/internal/common/cmap/map.go | 446 +++++++++++++++++ forged/internal/common/humanize/bytes.go | 35 ++ forged/internal/common/misc/back.go | 11 + forged/internal/common/misc/iter.go | 23 + forged/internal/common/misc/misc.go | 5 + forged/internal/common/misc/net.go | 42 ++ forged/internal/common/misc/slices.go | 17 + forged/internal/common/misc/trivial.go | 48 ++ forged/internal/common/misc/unsafe.go | 20 + forged/internal/common/misc/url.go | 118 +++++ forged/internal/common/scfg/.golangci.yaml | 26 + forged/internal/common/scfg/LICENSE | 18 + forged/internal/common/scfg/reader.go | 157 ++++++ forged/internal/common/scfg/scfg.go | 59 +++ forged/internal/common/scfg/struct.go | 82 ++++ forged/internal/common/scfg/unmarshal.go | 375 ++++++++++++++ forged/internal/common/scfg/writer.go | 112 +++++ forged/internal/config/config.go | 61 +++ forged/internal/database/config.go | 5 + forged/internal/database/database.go | 8 +- forged/internal/database/queries/.gitignore | 1 + forged/internal/embed/.gitignore | 6 - forged/internal/embed/embed.go | 20 - forged/internal/git2c/client.go | 46 -- forged/internal/git2c/cmd_index.go | 65 --- forged/internal/git2c/cmd_treeraw.go | 94 ---- forged/internal/git2c/git_types.go | 28 -- forged/internal/git2c/perror.go | 48 -- forged/internal/global/global.go | 8 + forged/internal/humanize/bytes.go | 35 -- forged/internal/incoming/hooks/config.go | 6 + forged/internal/incoming/hooks/hooks.go | 80 +++ forged/internal/incoming/lmtp/config.go | 9 + forged/internal/incoming/lmtp/lmtp.go | 70 +++ forged/internal/incoming/ssh/config.go | 9 + forged/internal/incoming/ssh/ssh.go | 89 ++++ forged/internal/incoming/web/authn.go | 33 ++ forged/internal/incoming/web/config.go | 16 + forged/internal/incoming/web/handler.go | 77 +++ forged/internal/incoming/web/handlers/group.go | 92 ++++ forged/internal/incoming/web/handlers/index.go | 40 ++ .../incoming/web/handlers/not_implemented.go | 22 + .../internal/incoming/web/handlers/repo/handler.go | 15 + .../internal/incoming/web/handlers/repo/index.go | 20 + forged/internal/incoming/web/handlers/repo/raw.go | 19 + forged/internal/incoming/web/handlers/repo/tree.go | 19 + .../incoming/web/handlers/special/login.go | 115 +++++ forged/internal/incoming/web/router.go | 428 ++++++++++++++++ forged/internal/incoming/web/server.go | 70 +++ forged/internal/incoming/web/templates/load.go | 31 ++ forged/internal/incoming/web/templates/renderer.go | 23 + forged/internal/incoming/web/types/types.go | 45 ++ forged/internal/ipc/git2c/client.go | 50 ++ forged/internal/ipc/git2c/cmd_index.go | 67 +++ forged/internal/ipc/git2c/cmd_treeraw.go | 97 ++++ forged/internal/ipc/git2c/doc.go | 2 + forged/internal/ipc/git2c/git_types.go | 28 ++ forged/internal/ipc/git2c/perror.go | 47 ++ forged/internal/ipc/irc/bot.go | 170 +++++++ forged/internal/ipc/irc/config.go | 13 + forged/internal/ipc/irc/conn.go | 58 +++ forged/internal/ipc/irc/doc.go | 2 + forged/internal/ipc/irc/errors.go | 8 + forged/internal/ipc/irc/message.go | 126 +++++ forged/internal/ipc/irc/source.go | 51 ++ forged/internal/irc/bot.go | 176 ------- forged/internal/irc/conn.go | 49 -- forged/internal/irc/errors.go | 8 - forged/internal/irc/message.go | 126 ----- forged/internal/irc/source.go | 50 -- forged/internal/misc/back.go | 11 - forged/internal/misc/deploy.go | 22 - forged/internal/misc/iter.go | 23 - forged/internal/misc/misc.go | 18 - forged/internal/misc/panic.go | 19 - forged/internal/misc/trivial.go | 48 -- forged/internal/misc/unsafe.go | 20 - forged/internal/misc/url.go | 118 ----- forged/internal/oldgit/fmtpatch.go | 56 --- forged/internal/oldgit/oldgit.go | 5 - forged/internal/oldgit/patch.go | 43 -- forged/internal/render/chroma.go | 41 -- forged/internal/render/escape.go | 14 - forged/internal/render/readme.go | 34 -- forged/internal/render/render.go | 5 - forged/internal/scfg/.golangci.yaml | 26 - forged/internal/scfg/reader.go | 157 ------ forged/internal/scfg/scfg.go | 59 --- forged/internal/scfg/struct.go | 82 ---- forged/internal/scfg/unmarshal.go | 375 -------------- forged/internal/scfg/writer.go | 112 ----- forged/internal/server/server.go | 82 ++++ forged/internal/unsorted/acl.go | 59 --- forged/internal/unsorted/config.go | 94 ---- forged/internal/unsorted/database.go | 43 -- forged/internal/unsorted/fedauth.go | 97 ---- forged/internal/unsorted/git_hooks_handle_linux.go | 377 -------------- forged/internal/unsorted/git_hooks_handle_other.go | 336 ------------- forged/internal/unsorted/git_init.go | 34 -- forged/internal/unsorted/git_misc.go | 95 ---- forged/internal/unsorted/git_plumbing.go | 188 ------- forged/internal/unsorted/git_ref.go | 37 -- forged/internal/unsorted/http_auth.go | 26 - forged/internal/unsorted/http_handle_branches.go | 46 -- .../internal/unsorted/http_handle_group_index.go | 196 -------- forged/internal/unsorted/http_handle_index.go | 26 - forged/internal/unsorted/http_handle_login.go | 108 ----- .../internal/unsorted/http_handle_repo_commit.go | 146 ------ .../unsorted/http_handle_repo_contrib_index.go | 52 -- .../unsorted/http_handle_repo_contrib_one.go | 98 ---- forged/internal/unsorted/http_handle_repo_index.go | 41 -- forged/internal/unsorted/http_handle_repo_info.go | 107 ---- forged/internal/unsorted/http_handle_repo_log.go | 39 -- forged/internal/unsorted/http_handle_repo_raw.go | 56 --- forged/internal/unsorted/http_handle_repo_tree.go | 55 --- .../unsorted/http_handle_repo_upload_pack.go | 120 ----- forged/internal/unsorted/http_handle_users.go | 15 - forged/internal/unsorted/http_server.go | 276 ----------- forged/internal/unsorted/http_template.go | 18 - forged/internal/unsorted/lmtp_handle_patch.go | 133 ----- forged/internal/unsorted/lmtp_server.go | 204 -------- forged/internal/unsorted/remote_url.go | 25 - forged/internal/unsorted/resources.go | 56 --- forged/internal/unsorted/server.go | 236 --------- .../internal/unsorted/ssh_handle_receive_pack.go | 131 ----- forged/internal/unsorted/ssh_handle_upload_pack.go | 39 -- forged/internal/unsorted/ssh_server.go | 96 ---- forged/internal/unsorted/ssh_utils.go | 79 --- forged/internal/unsorted/unsorted.go | 5 - forged/internal/unsorted/users.go | 35 -- forged/internal/unsorted/version.go | 6 - forged/internal/web/error_pages.go | 60 --- forged/internal/web/web.go | 5 - forged/main.go | 7 +- forged/sql/queries/groups.sql | 47 ++ forged/sql/queries/login.sql | 8 + forged/sql/schema.sql | 226 +++++++++ forged/sqlc.yaml | 15 + forged/static/style.css | 44 +- forged/templates/_footer.tmpl | 2 +- forged/templates/_group_view.tmpl | 8 +- forged/templates/_header.tmpl | 16 +- forged/templates/group.tmpl | 10 +- forged/templates/index.tmpl | 8 +- forged/templates/login.tmpl | 4 +- 180 files changed, 6362 insertions(+), 8408 deletions(-) delete mode 100644 forged/internal/ansiec/ansiec.go delete mode 100644 forged/internal/ansiec/colors.go delete mode 100644 forged/internal/ansiec/reset.go delete mode 100644 forged/internal/ansiec/style.go delete mode 100644 forged/internal/argon2id/argon2id.go delete mode 100644 forged/internal/bare/errors.go delete mode 100644 forged/internal/bare/limit.go delete mode 100644 forged/internal/bare/marshal.go delete mode 100644 forged/internal/bare/package.go delete mode 100644 forged/internal/bare/reader.go delete mode 100644 forged/internal/bare/unions.go delete mode 100644 forged/internal/bare/unmarshal.go delete mode 100644 forged/internal/bare/varint.go delete mode 100644 forged/internal/bare/writer.go delete mode 100644 forged/internal/cmap/comparable_map.go delete mode 100644 forged/internal/cmap/map.go create mode 100644 forged/internal/common/ansiec/colors.go create mode 100644 forged/internal/common/ansiec/doc.go create mode 100644 forged/internal/common/ansiec/reset.go create mode 100644 forged/internal/common/ansiec/style.go create mode 100644 forged/internal/common/argon2id/LICENSE create mode 100644 forged/internal/common/argon2id/argon2id.go create mode 100644 forged/internal/common/bare/LICENSE create mode 100644 forged/internal/common/bare/doc.go create mode 100644 forged/internal/common/bare/errors.go create mode 100644 forged/internal/common/bare/limit.go create mode 100644 forged/internal/common/bare/marshal.go create mode 100644 forged/internal/common/bare/reader.go create mode 100644 forged/internal/common/bare/unions.go create mode 100644 forged/internal/common/bare/unmarshal.go create mode 100644 forged/internal/common/bare/varint.go create mode 100644 forged/internal/common/bare/writer.go create mode 100644 forged/internal/common/cmap/LICENSE create mode 100644 forged/internal/common/cmap/comparable_map.go create mode 100644 forged/internal/common/cmap/map.go create mode 100644 forged/internal/common/humanize/bytes.go create mode 100644 forged/internal/common/misc/back.go create mode 100644 forged/internal/common/misc/iter.go create mode 100644 forged/internal/common/misc/misc.go create mode 100644 forged/internal/common/misc/net.go create mode 100644 forged/internal/common/misc/slices.go create mode 100644 forged/internal/common/misc/trivial.go create mode 100644 forged/internal/common/misc/unsafe.go create mode 100644 forged/internal/common/misc/url.go create mode 100644 forged/internal/common/scfg/.golangci.yaml create mode 100644 forged/internal/common/scfg/LICENSE create mode 100644 forged/internal/common/scfg/reader.go create mode 100644 forged/internal/common/scfg/scfg.go create mode 100644 forged/internal/common/scfg/struct.go create mode 100644 forged/internal/common/scfg/unmarshal.go create mode 100644 forged/internal/common/scfg/writer.go create mode 100644 forged/internal/config/config.go create mode 100644 forged/internal/database/config.go create mode 100644 forged/internal/database/queries/.gitignore delete mode 100644 forged/internal/embed/.gitignore delete mode 100644 forged/internal/embed/embed.go delete mode 100644 forged/internal/git2c/client.go delete mode 100644 forged/internal/git2c/cmd_index.go delete mode 100644 forged/internal/git2c/cmd_treeraw.go delete mode 100644 forged/internal/git2c/git_types.go delete mode 100644 forged/internal/git2c/perror.go create mode 100644 forged/internal/global/global.go delete mode 100644 forged/internal/humanize/bytes.go create mode 100644 forged/internal/incoming/hooks/config.go create mode 100644 forged/internal/incoming/hooks/hooks.go create mode 100644 forged/internal/incoming/lmtp/config.go create mode 100644 forged/internal/incoming/lmtp/lmtp.go create mode 100644 forged/internal/incoming/ssh/config.go create mode 100644 forged/internal/incoming/ssh/ssh.go create mode 100644 forged/internal/incoming/web/authn.go create mode 100644 forged/internal/incoming/web/config.go create mode 100644 forged/internal/incoming/web/handler.go create mode 100644 forged/internal/incoming/web/handlers/group.go create mode 100644 forged/internal/incoming/web/handlers/index.go create mode 100644 forged/internal/incoming/web/handlers/not_implemented.go create mode 100644 forged/internal/incoming/web/handlers/repo/handler.go create mode 100644 forged/internal/incoming/web/handlers/repo/index.go create mode 100644 forged/internal/incoming/web/handlers/repo/raw.go create mode 100644 forged/internal/incoming/web/handlers/repo/tree.go create mode 100644 forged/internal/incoming/web/handlers/special/login.go create mode 100644 forged/internal/incoming/web/router.go create mode 100644 forged/internal/incoming/web/server.go create mode 100644 forged/internal/incoming/web/templates/load.go create mode 100644 forged/internal/incoming/web/templates/renderer.go create mode 100644 forged/internal/incoming/web/types/types.go create mode 100644 forged/internal/ipc/git2c/client.go create mode 100644 forged/internal/ipc/git2c/cmd_index.go create mode 100644 forged/internal/ipc/git2c/cmd_treeraw.go create mode 100644 forged/internal/ipc/git2c/doc.go create mode 100644 forged/internal/ipc/git2c/git_types.go create mode 100644 forged/internal/ipc/git2c/perror.go create mode 100644 forged/internal/ipc/irc/bot.go create mode 100644 forged/internal/ipc/irc/config.go create mode 100644 forged/internal/ipc/irc/conn.go create mode 100644 forged/internal/ipc/irc/doc.go create mode 100644 forged/internal/ipc/irc/errors.go create mode 100644 forged/internal/ipc/irc/message.go create mode 100644 forged/internal/ipc/irc/source.go delete mode 100644 forged/internal/irc/bot.go delete mode 100644 forged/internal/irc/conn.go delete mode 100644 forged/internal/irc/errors.go delete mode 100644 forged/internal/irc/message.go delete mode 100644 forged/internal/irc/source.go delete mode 100644 forged/internal/misc/back.go delete mode 100644 forged/internal/misc/deploy.go delete mode 100644 forged/internal/misc/iter.go delete mode 100644 forged/internal/misc/misc.go delete mode 100644 forged/internal/misc/panic.go delete mode 100644 forged/internal/misc/trivial.go delete mode 100644 forged/internal/misc/unsafe.go delete mode 100644 forged/internal/misc/url.go delete mode 100644 forged/internal/oldgit/fmtpatch.go delete mode 100644 forged/internal/oldgit/oldgit.go delete mode 100644 forged/internal/oldgit/patch.go delete mode 100644 forged/internal/render/chroma.go delete mode 100644 forged/internal/render/escape.go delete mode 100644 forged/internal/render/readme.go delete mode 100644 forged/internal/render/render.go delete mode 100644 forged/internal/scfg/.golangci.yaml delete mode 100644 forged/internal/scfg/reader.go delete mode 100644 forged/internal/scfg/scfg.go delete mode 100644 forged/internal/scfg/struct.go delete mode 100644 forged/internal/scfg/unmarshal.go delete mode 100644 forged/internal/scfg/writer.go create mode 100644 forged/internal/server/server.go delete mode 100644 forged/internal/unsorted/acl.go delete mode 100644 forged/internal/unsorted/config.go delete mode 100644 forged/internal/unsorted/database.go delete mode 100644 forged/internal/unsorted/fedauth.go delete mode 100644 forged/internal/unsorted/git_hooks_handle_linux.go delete mode 100644 forged/internal/unsorted/git_hooks_handle_other.go delete mode 100644 forged/internal/unsorted/git_init.go delete mode 100644 forged/internal/unsorted/git_misc.go delete mode 100644 forged/internal/unsorted/git_plumbing.go delete mode 100644 forged/internal/unsorted/git_ref.go delete mode 100644 forged/internal/unsorted/http_auth.go delete mode 100644 forged/internal/unsorted/http_handle_branches.go delete mode 100644 forged/internal/unsorted/http_handle_group_index.go delete mode 100644 forged/internal/unsorted/http_handle_index.go delete mode 100644 forged/internal/unsorted/http_handle_login.go delete mode 100644 forged/internal/unsorted/http_handle_repo_commit.go delete mode 100644 forged/internal/unsorted/http_handle_repo_contrib_index.go delete mode 100644 forged/internal/unsorted/http_handle_repo_contrib_one.go delete mode 100644 forged/internal/unsorted/http_handle_repo_index.go delete mode 100644 forged/internal/unsorted/http_handle_repo_info.go delete mode 100644 forged/internal/unsorted/http_handle_repo_log.go delete mode 100644 forged/internal/unsorted/http_handle_repo_raw.go delete mode 100644 forged/internal/unsorted/http_handle_repo_tree.go delete mode 100644 forged/internal/unsorted/http_handle_repo_upload_pack.go delete mode 100644 forged/internal/unsorted/http_handle_users.go delete mode 100644 forged/internal/unsorted/http_server.go delete mode 100644 forged/internal/unsorted/http_template.go delete mode 100644 forged/internal/unsorted/lmtp_handle_patch.go delete mode 100644 forged/internal/unsorted/lmtp_server.go delete mode 100644 forged/internal/unsorted/remote_url.go delete mode 100644 forged/internal/unsorted/resources.go delete mode 100644 forged/internal/unsorted/server.go delete mode 100644 forged/internal/unsorted/ssh_handle_receive_pack.go delete mode 100644 forged/internal/unsorted/ssh_handle_upload_pack.go delete mode 100644 forged/internal/unsorted/ssh_server.go delete mode 100644 forged/internal/unsorted/ssh_utils.go delete mode 100644 forged/internal/unsorted/unsorted.go delete mode 100644 forged/internal/unsorted/users.go delete mode 100644 forged/internal/unsorted/version.go delete mode 100644 forged/internal/web/error_pages.go delete mode 100644 forged/internal/web/web.go create mode 100644 forged/sql/queries/groups.sql create mode 100644 forged/sql/queries/login.sql create mode 100644 forged/sql/schema.sql create mode 100644 forged/sqlc.yaml (limited to 'forged') diff --git a/forged/.golangci.yaml b/forged/.golangci.yaml index e475c41..499136b 100644 --- a/forged/.golangci.yaml +++ b/forged/.golangci.yaml @@ -4,32 +4,22 @@ linters: default: all disable: - depguard - - err113 # dynamically defined errors are fine for our purposes - - forcetypeassert # type assertion failures are usually programming errors - - gochecknoinits # we use inits sparingly for good reasons - - godox # they're just used as markers for where needs improvements - - ireturn # doesn't work well with how we use generics - - lll # long lines are acceptable - - mnd # it's a bit ridiculous to replace all of them - - nakedret # patterns should be consistent - - nonamedreturns # i like named returns - - wrapcheck # wrapping all errors is just not necessary - - varnamelen # "from" and "to" are very valid - - containedctx - - godot - - dogsled - - maintidx # e - - nestif # e - - gocognit # e - - gocyclo # e - - dupl # e - - cyclop # e - - goconst # e - - funlen # e - - wsl # e - - nlreturn # e - - unused # e - - exhaustruct # e + - wsl_v5 # tmp + - wsl # tmp + - unused # tmp + - nonamedreturns + - err113 # tmp + - gochecknoinits # tmp + - nlreturn # tmp + - cyclop # tmp + - gocognit # tmp + - varnamelen # tmp + - funlen # tmp + - lll + - mnd # tmp + - revive # tmp + - godox # tmp + - nestif # tmp linters-settings: revive: diff --git a/forged/internal/ansiec/ansiec.go b/forged/internal/ansiec/ansiec.go deleted file mode 100644 index 542c564..0000000 --- a/forged/internal/ansiec/ansiec.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package ansiec provides definitions for ANSI escape sequences. -package ansiec diff --git a/forged/internal/ansiec/colors.go b/forged/internal/ansiec/colors.go deleted file mode 100644 index 8e5f54b..0000000 --- a/forged/internal/ansiec/colors.go +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package ansiec - -const ( - Black = "\x1b[30m" - Red = "\x1b[31m" - Green = "\x1b[32m" - Yellow = "\x1b[33m" - Blue = "\x1b[34m" - Magenta = "\x1b[35m" - Cyan = "\x1b[36m" - White = "\x1b[37m" -) - -const ( - BrightBlack = "\x1b[30;1m" - BrightRed = "\x1b[31;1m" - BrightGreen = "\x1b[32;1m" - BrightYellow = "\x1b[33;1m" - BrightBlue = "\x1b[34;1m" - BrightMagenta = "\x1b[35;1m" - BrightCyan = "\x1b[36;1m" - BrightWhite = "\x1b[37;1m" -) diff --git a/forged/internal/ansiec/reset.go b/forged/internal/ansiec/reset.go deleted file mode 100644 index c5b6ba6..0000000 --- a/forged/internal/ansiec/reset.go +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package ansiec - -const Reset = "\x1b[0m" diff --git a/forged/internal/ansiec/style.go b/forged/internal/ansiec/style.go deleted file mode 100644 index dd37344..0000000 --- a/forged/internal/ansiec/style.go +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package ansiec - -const ( - Bold = "\x1b[1m" - Underline = "\x1b[4m" - Reversed = "\x1b[7m" - Italic = "\x1b[3m" -) diff --git a/forged/internal/argon2id/argon2id.go b/forged/internal/argon2id/argon2id.go deleted file mode 100644 index 88df8f6..0000000 --- a/forged/internal/argon2id/argon2id.go +++ /dev/null @@ -1,185 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2018 Alex Edwards - -// Package argon2id provides a wrapper around Go's golang.org/x/crypto/argon2. -package argon2id - -import ( - "crypto/rand" - "crypto/subtle" - "encoding/base64" - "errors" - "fmt" - "runtime" - "strings" - - "golang.org/x/crypto/argon2" -) - -var ( - // ErrInvalidHash in returned by ComparePasswordAndHash if the provided - // hash isn't in the expected format. - ErrInvalidHash = errors.New("argon2id: hash is not in the correct format") - - // ErrIncompatibleVariant is returned by ComparePasswordAndHash if the - // provided hash was created using a unsupported variant of Argon2. - // Currently only argon2id is supported by this package. - ErrIncompatibleVariant = errors.New("argon2id: incompatible variant of argon2") - - // ErrIncompatibleVersion is returned by ComparePasswordAndHash if the - // provided hash was created using a different version of Argon2. - ErrIncompatibleVersion = errors.New("argon2id: incompatible version of argon2") -) - -// DefaultParams provides some sane default parameters for hashing passwords. -// -// Follows recommendations given by the Argon2 RFC: -// "The Argon2id variant with t=1 and maximum available memory is RECOMMENDED as a -// default setting for all environments. This setting is secure against side-channel -// attacks and maximizes adversarial costs on dedicated bruteforce hardware."" -// -// The default parameters should generally be used for development/testing purposes -// only. Custom parameters should be set for production applications depending on -// available memory/CPU resources and business requirements. -var DefaultParams = &Params{ - Memory: 64 * 1024, - Iterations: 1, - Parallelism: uint8(runtime.NumCPU()), - SaltLength: 16, - KeyLength: 32, -} - -// Params describes the input parameters used by the Argon2id algorithm. The -// Memory and Iterations parameters control the computational cost of hashing -// the password. The higher these figures are, the greater the cost of generating -// the hash and the longer the runtime. It also follows that the greater the cost -// will be for any attacker trying to guess the password. If the code is running -// on a machine with multiple cores, then you can decrease the runtime without -// reducing the cost by increasing the Parallelism parameter. This controls the -// number of threads that the work is spread across. Important note: Changing the -// value of the Parallelism parameter changes the hash output. -// -// For guidance and an outline process for choosing appropriate parameters see -// https://tools.ietf.org/html/draft-irtf-cfrg-argon2-04#section-4 -type Params struct { - // The amount of memory used by the algorithm (in kibibytes). - Memory uint32 - - // The number of iterations over the memory. - Iterations uint32 - - // The number of threads (or lanes) used by the algorithm. - // Recommended value is between 1 and runtime.NumCPU(). - Parallelism uint8 - - // Length of the random salt. 16 bytes is recommended for password hashing. - SaltLength uint32 - - // Length of the generated key. 16 bytes or more is recommended. - KeyLength uint32 -} - -// CreateHash returns an Argon2id hash of a plain-text password using the -// provided algorithm parameters. The returned hash follows the format used by -// the Argon2 reference C implementation and contains the base64-encoded Argon2id d -// derived key prefixed by the salt and parameters. It looks like this: -// -// $argon2id$v=19$m=65536,t=3,p=2$c29tZXNhbHQ$RdescudvJCsgt3ub+b+dWRWJTmaaJObG -func CreateHash(password string, params *Params) (hash string, err error) { - salt, err := generateRandomBytes(params.SaltLength) - if err != nil { - return "", err - } - - key := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength) - - b64Salt := base64.RawStdEncoding.EncodeToString(salt) - b64Key := base64.RawStdEncoding.EncodeToString(key) - - hash = fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, params.Memory, params.Iterations, params.Parallelism, b64Salt, b64Key) - return hash, nil -} - -// ComparePasswordAndHash performs a constant-time comparison between a -// plain-text password and Argon2id hash, using the parameters and salt -// contained in the hash. It returns true if they match, otherwise it returns -// false. -func ComparePasswordAndHash(password, hash string) (match bool, err error) { - match, _, err = CheckHash(password, hash) - return match, err -} - -// CheckHash is like ComparePasswordAndHash, except it also returns the params that the hash was -// created with. This can be useful if you want to update your hash params over time (which you -// should). -func CheckHash(password, hash string) (match bool, params *Params, err error) { - params, salt, key, err := DecodeHash(hash) - if err != nil { - return false, nil, err - } - - otherKey := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength) - - keyLen := int32(len(key)) - otherKeyLen := int32(len(otherKey)) - - if subtle.ConstantTimeEq(keyLen, otherKeyLen) == 0 { - return false, params, nil - } - if subtle.ConstantTimeCompare(key, otherKey) == 1 { - return true, params, nil - } - return false, params, nil -} - -func generateRandomBytes(n uint32) ([]byte, error) { - b := make([]byte, n) - _, err := rand.Read(b) - if err != nil { - return nil, err - } - - return b, nil -} - -// DecodeHash expects a hash created from this package, and parses it to return the params used to -// create it, as well as the salt and key (password hash). -func DecodeHash(hash string) (params *Params, salt, key []byte, err error) { - vals := strings.Split(hash, "$") - if len(vals) != 6 { - return nil, nil, nil, ErrInvalidHash - } - - if vals[1] != "argon2id" { - return nil, nil, nil, ErrIncompatibleVariant - } - - var version int - _, err = fmt.Sscanf(vals[2], "v=%d", &version) - if err != nil { - return nil, nil, nil, err - } - if version != argon2.Version { - return nil, nil, nil, ErrIncompatibleVersion - } - - params = &Params{} - _, err = fmt.Sscanf(vals[3], "m=%d,t=%d,p=%d", ¶ms.Memory, ¶ms.Iterations, ¶ms.Parallelism) - if err != nil { - return nil, nil, nil, err - } - - salt, err = base64.RawStdEncoding.Strict().DecodeString(vals[4]) - if err != nil { - return nil, nil, nil, err - } - params.SaltLength = uint32(len(salt)) - - key, err = base64.RawStdEncoding.Strict().DecodeString(vals[5]) - if err != nil { - return nil, nil, nil, err - } - params.KeyLength = uint32(len(key)) - - return params, salt, key, nil -} diff --git a/forged/internal/bare/errors.go b/forged/internal/bare/errors.go deleted file mode 100644 index 39c951a..0000000 --- a/forged/internal/bare/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "errors" - "fmt" - "reflect" -) - -var ErrInvalidStr = errors.New("String contains invalid UTF-8 sequences") - -type UnsupportedTypeError struct { - Type reflect.Type -} - -func (e *UnsupportedTypeError) Error() string { - return fmt.Sprintf("Unsupported type for marshaling: %s\n", e.Type.String()) -} diff --git a/forged/internal/bare/limit.go b/forged/internal/bare/limit.go deleted file mode 100644 index 212bc05..0000000 --- a/forged/internal/bare/limit.go +++ /dev/null @@ -1,58 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "errors" - "io" -) - -var ( - maxUnmarshalBytes uint64 = 1024 * 1024 * 32 /* 32 MiB */ - maxArrayLength uint64 = 1024 * 4 /* 4096 elements */ - maxMapSize uint64 = 1024 -) - -// MaxUnmarshalBytes sets the maximum size of a message decoded by unmarshal. -// By default, this is set to 32 MiB. -func MaxUnmarshalBytes(bytes uint64) { - maxUnmarshalBytes = bytes -} - -// MaxArrayLength sets maximum number of elements in array. Defaults to 4096 elements -func MaxArrayLength(length uint64) { - maxArrayLength = length -} - -// MaxMapSize sets maximum size of map. Defaults to 1024 key/value pairs -func MaxMapSize(size uint64) { - maxMapSize = size -} - -// Use MaxUnmarshalBytes to prevent this error from occuring on messages which -// are large by design. -var ErrLimitExceeded = errors.New("Maximum message size exceeded") - -// Identical to io.LimitedReader, except it returns our custom error instead of -// EOF if the limit is reached. -type limitedReader struct { - R io.Reader - N uint64 -} - -func (l *limitedReader) Read(p []byte) (n int, err error) { - if l.N <= 0 { - return 0, ErrLimitExceeded - } - if uint64(len(p)) > l.N { - p = p[0:l.N] - } - n, err = l.R.Read(p) - l.N -= uint64(n) - return -} - -func newLimitedReader(r io.Reader) *limitedReader { - return &limitedReader{r, maxUnmarshalBytes} -} diff --git a/forged/internal/bare/marshal.go b/forged/internal/bare/marshal.go deleted file mode 100644 index 1ce942d..0000000 --- a/forged/internal/bare/marshal.go +++ /dev/null @@ -1,311 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sync" -) - -// A type which implements this interface will be responsible for marshaling -// itself when encountered. -type Marshalable interface { - Marshal(w *Writer) error -} - -var encoderBufferPool = sync.Pool{ - New: func() interface{} { - buf := &bytes.Buffer{} - buf.Grow(32) - return buf - }, -} - -// Marshals a value (val, which must be a pointer) into a BARE message. -// -// The encoding of each struct field can be customized by the format string -// stored under the "bare" key in the struct field's tag. -// -// As a special case, if the field tag is "-", the field is always omitted. -func Marshal(val interface{}) ([]byte, error) { - // reuse buffers from previous serializations - b := encoderBufferPool.Get().(*bytes.Buffer) - defer func() { - b.Reset() - encoderBufferPool.Put(b) - }() - - w := NewWriter(b) - err := MarshalWriter(w, val) - - msg := make([]byte, b.Len()) - copy(msg, b.Bytes()) - - return msg, err -} - -// Marshals a value (val, which must be a pointer) into a BARE message and -// writes it to a Writer. See Marshal for details. -func MarshalWriter(w *Writer, val interface{}) error { - t := reflect.TypeOf(val) - v := reflect.ValueOf(val) - if t.Kind() != reflect.Ptr { - return errors.New("Expected val to be pointer type") - } - - return getEncoder(t.Elem())(w, v.Elem()) -} - -type encodeFunc func(w *Writer, v reflect.Value) error - -var encodeFuncCache sync.Map // map[reflect.Type]encodeFunc - -// get decoder from cache -func getEncoder(t reflect.Type) encodeFunc { - if f, ok := encodeFuncCache.Load(t); ok { - return f.(encodeFunc) - } - - f := encoderFunc(t) - encodeFuncCache.Store(t, f) - return f -} - -var marshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem() - -func encoderFunc(t reflect.Type) encodeFunc { - if reflect.PointerTo(t).Implements(marshalableInterface) { - return func(w *Writer, v reflect.Value) error { - uv := v.Addr().Interface().(Marshalable) - return uv.Marshal(w) - } - } - - if t.Kind() == reflect.Interface && t.Implements(unionInterface) { - return encodeUnion(t) - } - - switch t.Kind() { - case reflect.Ptr: - return encodeOptional(t.Elem()) - case reflect.Struct: - return encodeStruct(t) - case reflect.Array: - return encodeArray(t) - case reflect.Slice: - return encodeSlice(t) - case reflect.Map: - return encodeMap(t) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return encodeUint - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return encodeInt - case reflect.Float32, reflect.Float64: - return encodeFloat - case reflect.Bool: - return encodeBool - case reflect.String: - return encodeString - } - - return func(w *Writer, v reflect.Value) error { - return &UnsupportedTypeError{v.Type()} - } -} - -func encodeOptional(t reflect.Type) encodeFunc { - return func(w *Writer, v reflect.Value) error { - if v.IsNil() { - return w.WriteBool(false) - } - - if err := w.WriteBool(true); err != nil { - return err - } - - return getEncoder(t)(w, v.Elem()) - } -} - -func encodeStruct(t reflect.Type) encodeFunc { - n := t.NumField() - encoders := make([]encodeFunc, n) - for i := 0; i < n; i++ { - field := t.Field(i) - if field.Tag.Get("bare") == "-" { - continue - } - encoders[i] = getEncoder(field.Type) - } - - return func(w *Writer, v reflect.Value) error { - for i := 0; i < n; i++ { - if encoders[i] == nil { - continue - } - err := encoders[i](w, v.Field(i)) - if err != nil { - return err - } - } - return nil - } -} - -func encodeArray(t reflect.Type) encodeFunc { - f := getEncoder(t.Elem()) - len := t.Len() - - return func(w *Writer, v reflect.Value) error { - for i := 0; i < len; i++ { - if err := f(w, v.Index(i)); err != nil { - return err - } - } - return nil - } -} - -func encodeSlice(t reflect.Type) encodeFunc { - elem := t.Elem() - f := getEncoder(elem) - - return func(w *Writer, v reflect.Value) error { - if err := w.WriteUint(uint64(v.Len())); err != nil { - return err - } - - for i := 0; i < v.Len(); i++ { - if err := f(w, v.Index(i)); err != nil { - return err - } - } - return nil - } -} - -func encodeMap(t reflect.Type) encodeFunc { - keyType := t.Key() - keyf := getEncoder(keyType) - - valueType := t.Elem() - valf := getEncoder(valueType) - - return func(w *Writer, v reflect.Value) error { - if err := w.WriteUint(uint64(v.Len())); err != nil { - return err - } - - iter := v.MapRange() - for iter.Next() { - if err := keyf(w, iter.Key()); err != nil { - return err - } - if err := valf(w, iter.Value()); err != nil { - return err - } - } - return nil - } -} - -func encodeUnion(t reflect.Type) encodeFunc { - ut, ok := unionRegistry[t] - if !ok { - return func(w *Writer, v reflect.Value) error { - return fmt.Errorf("Union type %s is not registered", t.Name()) - } - } - - encoders := make(map[uint64]encodeFunc) - for tag, t := range ut.types { - encoders[tag] = getEncoder(t) - } - - return func(w *Writer, v reflect.Value) error { - t := v.Elem().Type() - if t.Kind() == reflect.Ptr { - // If T is a valid union value type, *T is valid too. - t = t.Elem() - v = v.Elem() - } - tag, ok := ut.tags[t] - if !ok { - return fmt.Errorf("Invalid union value: %s", v.Elem().String()) - } - - if err := w.WriteUint(tag); err != nil { - return err - } - - return encoders[tag](w, v.Elem()) - } -} - -func encodeUint(w *Writer, v reflect.Value) error { - switch getIntKind(v.Type()) { - case reflect.Uint: - return w.WriteUint(v.Uint()) - - case reflect.Uint8: - return w.WriteU8(uint8(v.Uint())) - - case reflect.Uint16: - return w.WriteU16(uint16(v.Uint())) - - case reflect.Uint32: - return w.WriteU32(uint32(v.Uint())) - - case reflect.Uint64: - return w.WriteU64(uint64(v.Uint())) - } - - panic("not uint") -} - -func encodeInt(w *Writer, v reflect.Value) error { - switch getIntKind(v.Type()) { - case reflect.Int: - return w.WriteInt(v.Int()) - - case reflect.Int8: - return w.WriteI8(int8(v.Int())) - - case reflect.Int16: - return w.WriteI16(int16(v.Int())) - - case reflect.Int32: - return w.WriteI32(int32(v.Int())) - - case reflect.Int64: - return w.WriteI64(int64(v.Int())) - } - - panic("not int") -} - -func encodeFloat(w *Writer, v reflect.Value) error { - switch v.Type().Kind() { - case reflect.Float32: - return w.WriteF32(float32(v.Float())) - case reflect.Float64: - return w.WriteF64(v.Float()) - } - - panic("not float") -} - -func encodeBool(w *Writer, v reflect.Value) error { - return w.WriteBool(v.Bool()) -} - -func encodeString(w *Writer, v reflect.Value) error { - if v.Kind() != reflect.String { - panic("not string") - } - return w.WriteString(v.String()) -} diff --git a/forged/internal/bare/package.go b/forged/internal/bare/package.go deleted file mode 100644 index 2f12f55..0000000 --- a/forged/internal/bare/package.go +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package bare provides primitives to encode and decode BARE messages. -// -// There is no guarantee that this is compatible with the upstream -// implementation at https://git.sr.ht/~sircmpwn/go-bare. -package bare diff --git a/forged/internal/bare/reader.go b/forged/internal/bare/reader.go deleted file mode 100644 index 58325e3..0000000 --- a/forged/internal/bare/reader.go +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "unicode/utf8" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// A Reader for BARE primitive types. -type Reader struct { - base byteReader - scratch [8]byte -} - -type simpleByteReader struct { - io.Reader - scratch [1]byte -} - -func (r simpleByteReader) ReadByte() (byte, error) { - // using reference type here saves us allocations - _, err := r.Read(r.scratch[:]) - return r.scratch[0], err -} - -// Returns a new BARE primitive reader wrapping the given io.Reader. -func NewReader(base io.Reader) *Reader { - br, ok := base.(byteReader) - if !ok { - br = simpleByteReader{Reader: base} - } - return &Reader{base: br} -} - -func (r *Reader) ReadUint() (uint64, error) { - x, err := binary.ReadUvarint(r.base) - if err != nil { - return x, err - } - return x, nil -} - -func (r *Reader) ReadU8() (uint8, error) { - return r.base.ReadByte() -} - -func (r *Reader) ReadU16() (uint16, error) { - var i uint16 - if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil { - return i, err - } - return binary.LittleEndian.Uint16(r.scratch[:]), nil -} - -func (r *Reader) ReadU32() (uint32, error) { - var i uint32 - if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil { - return i, err - } - return binary.LittleEndian.Uint32(r.scratch[:]), nil -} - -func (r *Reader) ReadU64() (uint64, error) { - var i uint64 - if _, err := io.ReadAtLeast(r.base, r.scratch[:8], 8); err != nil { - return i, err - } - return binary.LittleEndian.Uint64(r.scratch[:]), nil -} - -func (r *Reader) ReadInt() (int64, error) { - return binary.ReadVarint(r.base) -} - -func (r *Reader) ReadI8() (int8, error) { - b, err := r.base.ReadByte() - return int8(b), err -} - -func (r *Reader) ReadI16() (int16, error) { - var i int16 - if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil { - return i, err - } - return int16(binary.LittleEndian.Uint16(r.scratch[:])), nil -} - -func (r *Reader) ReadI32() (int32, error) { - var i int32 - if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil { - return i, err - } - return int32(binary.LittleEndian.Uint32(r.scratch[:])), nil -} - -func (r *Reader) ReadI64() (int64, error) { - var i int64 - if _, err := io.ReadAtLeast(r.base, r.scratch[:], 8); err != nil { - return i, err - } - return int64(binary.LittleEndian.Uint64(r.scratch[:])), nil -} - -func (r *Reader) ReadF32() (float32, error) { - u, err := r.ReadU32() - f := math.Float32frombits(u) - if math.IsNaN(float64(f)) { - return 0.0, fmt.Errorf("NaN is not permitted in BARE floats") - } - return f, err -} - -func (r *Reader) ReadF64() (float64, error) { - u, err := r.ReadU64() - f := math.Float64frombits(u) - if math.IsNaN(f) { - return 0.0, fmt.Errorf("NaN is not permitted in BARE floats") - } - return f, err -} - -func (r *Reader) ReadBool() (bool, error) { - b, err := r.ReadU8() - if err != nil { - return false, err - } - - if b > 1 { - return false, fmt.Errorf("Invalid bool value: %#x", b) - } - - return b == 1, nil -} - -func (r *Reader) ReadString() (string, error) { - buf, err := r.ReadData() - if err != nil { - return "", err - } - if !utf8.Valid(buf) { - return "", ErrInvalidStr - } - return misc.BytesToString(buf), nil -} - -// Reads a fixed amount of arbitrary data, defined by the length of the slice. -func (r *Reader) ReadDataFixed(dest []byte) error { - var amt int = 0 - for amt < len(dest) { - n, err := r.base.Read(dest[amt:]) - if err != nil { - return err - } - amt += n - } - return nil -} - -// Reads arbitrary data whose length is read from the message. -func (r *Reader) ReadData() ([]byte, error) { - l, err := r.ReadUint() - if err != nil { - return nil, err - } - if l >= maxUnmarshalBytes { - return nil, ErrLimitExceeded - } - buf := make([]byte, l) - var amt uint64 = 0 - for amt < l { - n, err := r.base.Read(buf[amt:]) - if err != nil { - return nil, err - } - amt += uint64(n) - } - return buf, nil -} diff --git a/forged/internal/bare/unions.go b/forged/internal/bare/unions.go deleted file mode 100644 index 0270a5f..0000000 --- a/forged/internal/bare/unions.go +++ /dev/null @@ -1,79 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "fmt" - "reflect" -) - -// Any type which is a union member must implement this interface. You must -// also call RegisterUnion for go-bare to marshal or unmarshal messages which -// utilize your union type. -type Union interface { - IsUnion() -} - -type UnionTags struct { - iface reflect.Type - tags map[reflect.Type]uint64 - types map[uint64]reflect.Type -} - -var unionInterface = reflect.TypeOf((*Union)(nil)).Elem() -var unionRegistry map[reflect.Type]*UnionTags - -func init() { - unionRegistry = make(map[reflect.Type]*UnionTags) -} - -// Registers a union type in this context. Pass the union interface and the -// list of types associated with it, sorted ascending by their union tag. -func RegisterUnion(iface interface{}) *UnionTags { - ity := reflect.TypeOf(iface).Elem() - if _, ok := unionRegistry[ity]; ok { - panic(fmt.Errorf("Type %s has already been registered", ity.Name())) - } - - if !ity.Implements(reflect.TypeOf((*Union)(nil)).Elem()) { - panic(fmt.Errorf("Type %s does not implement bare.Union", ity.Name())) - } - - utypes := &UnionTags{ - iface: ity, - tags: make(map[reflect.Type]uint64), - types: make(map[uint64]reflect.Type), - } - unionRegistry[ity] = utypes - return utypes -} - -func (ut *UnionTags) Member(t interface{}, tag uint64) *UnionTags { - ty := reflect.TypeOf(t) - if !ty.AssignableTo(ut.iface) { - panic(fmt.Errorf("Type %s does not implement interface %s", - ty.Name(), ut.iface.Name())) - } - if _, ok := ut.tags[ty]; ok { - panic(fmt.Errorf("Type %s is already registered for union %s", - ty.Name(), ut.iface.Name())) - } - if _, ok := ut.types[tag]; ok { - panic(fmt.Errorf("Tag %d is already registered for union %s", - tag, ut.iface.Name())) - } - ut.tags[ty] = tag - ut.types[tag] = ty - return ut -} - -func (ut *UnionTags) TagFor(v interface{}) (uint64, bool) { - tag, ok := ut.tags[reflect.TypeOf(v)] - return tag, ok -} - -func (ut *UnionTags) TypeFor(tag uint64) (reflect.Type, bool) { - t, ok := ut.types[tag] - return t, ok -} diff --git a/forged/internal/bare/unmarshal.go b/forged/internal/bare/unmarshal.go deleted file mode 100644 index d55f32c..0000000 --- a/forged/internal/bare/unmarshal.go +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sync" -) - -// A type which implements this interface will be responsible for unmarshaling -// itself when encountered. -type Unmarshalable interface { - Unmarshal(r *Reader) error -} - -// Unmarshals a BARE message into val, which must be a pointer to a value of -// the message type. -func Unmarshal(data []byte, val interface{}) error { - b := bytes.NewReader(data) - r := NewReader(b) - return UnmarshalBareReader(r, val) -} - -// Unmarshals a BARE message into value (val, which must be a pointer), from a -// reader. See Unmarshal for details. -func UnmarshalReader(r io.Reader, val interface{}) error { - r = newLimitedReader(r) - return UnmarshalBareReader(NewReader(r), val) -} - -type decodeFunc func(r *Reader, v reflect.Value) error - -var decodeFuncCache sync.Map // map[reflect.Type]decodeFunc - -func UnmarshalBareReader(r *Reader, val interface{}) error { - t := reflect.TypeOf(val) - v := reflect.ValueOf(val) - if t.Kind() != reflect.Ptr { - return errors.New("Expected val to be pointer type") - } - - return getDecoder(t.Elem())(r, v.Elem()) -} - -// get decoder from cache -func getDecoder(t reflect.Type) decodeFunc { - if f, ok := decodeFuncCache.Load(t); ok { - return f.(decodeFunc) - } - - f := decoderFunc(t) - decodeFuncCache.Store(t, f) - return f -} - -var unmarshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem() - -func decoderFunc(t reflect.Type) decodeFunc { - if reflect.PointerTo(t).Implements(unmarshalableInterface) { - return func(r *Reader, v reflect.Value) error { - uv := v.Addr().Interface().(Unmarshalable) - return uv.Unmarshal(r) - } - } - - if t.Kind() == reflect.Interface && t.Implements(unionInterface) { - return decodeUnion(t) - } - - switch t.Kind() { - case reflect.Ptr: - return decodeOptional(t.Elem()) - case reflect.Struct: - return decodeStruct(t) - case reflect.Array: - return decodeArray(t) - case reflect.Slice: - return decodeSlice(t) - case reflect.Map: - return decodeMap(t) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return decodeUint - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return decodeInt - case reflect.Float32, reflect.Float64: - return decodeFloat - case reflect.Bool: - return decodeBool - case reflect.String: - return decodeString - } - - return func(r *Reader, v reflect.Value) error { - return &UnsupportedTypeError{v.Type()} - } -} - -func decodeOptional(t reflect.Type) decodeFunc { - return func(r *Reader, v reflect.Value) error { - s, err := r.ReadU8() - if err != nil { - return err - } - - if s > 1 { - return fmt.Errorf("Invalid optional value: %#x", s) - } - - if s == 0 { - return nil - } - - v.Set(reflect.New(t)) - return getDecoder(t)(r, v.Elem()) - } -} - -func decodeStruct(t reflect.Type) decodeFunc { - n := t.NumField() - decoders := make([]decodeFunc, n) - for i := 0; i < n; i++ { - field := t.Field(i) - if field.Tag.Get("bare") == "-" { - continue - } - decoders[i] = getDecoder(field.Type) - } - - return func(r *Reader, v reflect.Value) error { - for i := 0; i < n; i++ { - if decoders[i] == nil { - continue - } - err := decoders[i](r, v.Field(i)) - if err != nil { - return err - } - } - return nil - } -} - -func decodeArray(t reflect.Type) decodeFunc { - f := getDecoder(t.Elem()) - len := t.Len() - - return func(r *Reader, v reflect.Value) error { - for i := 0; i < len; i++ { - err := f(r, v.Index(i)) - if err != nil { - return err - } - } - return nil - } -} - -func decodeSlice(t reflect.Type) decodeFunc { - elem := t.Elem() - f := getDecoder(elem) - - return func(r *Reader, v reflect.Value) error { - len, err := r.ReadUint() - if err != nil { - return err - } - - if len > maxArrayLength { - return fmt.Errorf("Array length %d exceeds configured limit of %d", len, maxArrayLength) - } - - v.Set(reflect.MakeSlice(t, int(len), int(len))) - - for i := 0; i < int(len); i++ { - if err := f(r, v.Index(i)); err != nil { - return err - } - } - return nil - } -} - -func decodeMap(t reflect.Type) decodeFunc { - keyType := t.Key() - keyf := getDecoder(keyType) - - valueType := t.Elem() - valf := getDecoder(valueType) - - return func(r *Reader, v reflect.Value) error { - size, err := r.ReadUint() - if err != nil { - return err - } - - if size > maxMapSize { - return fmt.Errorf("Map size %d exceeds configured limit of %d", size, maxMapSize) - } - - v.Set(reflect.MakeMapWithSize(t, int(size))) - - key := reflect.New(keyType).Elem() - value := reflect.New(valueType).Elem() - - for i := uint64(0); i < size; i++ { - if err := keyf(r, key); err != nil { - return err - } - - if v.MapIndex(key).Kind() > reflect.Invalid { - return fmt.Errorf("Encountered duplicate map key: %v", key.Interface()) - } - - if err := valf(r, value); err != nil { - return err - } - - v.SetMapIndex(key, value) - } - return nil - } -} - -func decodeUnion(t reflect.Type) decodeFunc { - ut, ok := unionRegistry[t] - if !ok { - return func(r *Reader, v reflect.Value) error { - return fmt.Errorf("Union type %s is not registered", t.Name()) - } - } - - decoders := make(map[uint64]decodeFunc) - for tag, t := range ut.types { - t := t - f := getDecoder(t) - - decoders[tag] = func(r *Reader, v reflect.Value) error { - nv := reflect.New(t) - if err := f(r, nv.Elem()); err != nil { - return err - } - - v.Set(nv) - return nil - } - } - - return func(r *Reader, v reflect.Value) error { - tag, err := r.ReadUint() - if err != nil { - return err - } - - if f, ok := decoders[tag]; ok { - return f(r, v) - } - - return fmt.Errorf("Invalid union tag %d for type %s", tag, t.Name()) - } -} - -func decodeUint(r *Reader, v reflect.Value) error { - var err error - switch getIntKind(v.Type()) { - case reflect.Uint: - var u uint64 - u, err = r.ReadUint() - v.SetUint(u) - - case reflect.Uint8: - var u uint8 - u, err = r.ReadU8() - v.SetUint(uint64(u)) - - case reflect.Uint16: - var u uint16 - u, err = r.ReadU16() - v.SetUint(uint64(u)) - case reflect.Uint32: - var u uint32 - u, err = r.ReadU32() - v.SetUint(uint64(u)) - - case reflect.Uint64: - var u uint64 - u, err = r.ReadU64() - v.SetUint(uint64(u)) - - default: - panic("not an uint") - } - - return err -} - -func decodeInt(r *Reader, v reflect.Value) error { - var err error - switch getIntKind(v.Type()) { - case reflect.Int: - var i int64 - i, err = r.ReadInt() - v.SetInt(i) - - case reflect.Int8: - var i int8 - i, err = r.ReadI8() - v.SetInt(int64(i)) - - case reflect.Int16: - var i int16 - i, err = r.ReadI16() - v.SetInt(int64(i)) - case reflect.Int32: - var i int32 - i, err = r.ReadI32() - v.SetInt(int64(i)) - - case reflect.Int64: - var i int64 - i, err = r.ReadI64() - v.SetInt(int64(i)) - - default: - panic("not an int") - } - - return err -} - -func decodeFloat(r *Reader, v reflect.Value) error { - var err error - switch v.Type().Kind() { - case reflect.Float32: - var f float32 - f, err = r.ReadF32() - v.SetFloat(float64(f)) - case reflect.Float64: - var f float64 - f, err = r.ReadF64() - v.SetFloat(f) - default: - panic("not a float") - } - return err -} - -func decodeBool(r *Reader, v reflect.Value) error { - b, err := r.ReadBool() - v.SetBool(b) - return err -} - -func decodeString(r *Reader, v reflect.Value) error { - s, err := r.ReadString() - v.SetString(s) - return err -} diff --git a/forged/internal/bare/varint.go b/forged/internal/bare/varint.go deleted file mode 100644 index a185ac8..0000000 --- a/forged/internal/bare/varint.go +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "reflect" -) - -// Int is a variable-length encoded signed integer. -type Int int64 - -// Uint is a variable-length encoded unsigned integer. -type Uint uint64 - -var ( - intType = reflect.TypeOf(Int(0)) - uintType = reflect.TypeOf(Uint(0)) -) - -func getIntKind(t reflect.Type) reflect.Kind { - switch t { - case intType: - return reflect.Int - case uintType: - return reflect.Uint - default: - return t.Kind() - } -} diff --git a/forged/internal/bare/writer.go b/forged/internal/bare/writer.go deleted file mode 100644 index bada045..0000000 --- a/forged/internal/bare/writer.go +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault - -package bare - -import ( - "encoding/binary" - "fmt" - "io" - "math" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -// A Writer for BARE primitive types. -type Writer struct { - base io.Writer - scratch [binary.MaxVarintLen64]byte -} - -// Returns a new BARE primitive writer wrapping the given io.Writer. -func NewWriter(base io.Writer) *Writer { - return &Writer{base: base} -} - -func (w *Writer) WriteUint(i uint64) error { - n := binary.PutUvarint(w.scratch[:], i) - _, err := w.base.Write(w.scratch[:n]) - return err -} - -func (w *Writer) WriteU8(i uint8) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteU16(i uint16) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteU32(i uint32) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteU64(i uint64) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteInt(i int64) error { - var buf [binary.MaxVarintLen64]byte - n := binary.PutVarint(buf[:], i) - _, err := w.base.Write(buf[:n]) - return err -} - -func (w *Writer) WriteI8(i int8) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteI16(i int16) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteI32(i int32) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteI64(i int64) error { - return binary.Write(w.base, binary.LittleEndian, i) -} - -func (w *Writer) WriteF32(f float32) error { - if math.IsNaN(float64(f)) { - return fmt.Errorf("NaN is not permitted in BARE floats") - } - return binary.Write(w.base, binary.LittleEndian, f) -} - -func (w *Writer) WriteF64(f float64) error { - if math.IsNaN(f) { - return fmt.Errorf("NaN is not permitted in BARE floats") - } - return binary.Write(w.base, binary.LittleEndian, f) -} - -func (w *Writer) WriteBool(b bool) error { - return binary.Write(w.base, binary.LittleEndian, b) -} - -func (w *Writer) WriteString(str string) error { - return w.WriteData(misc.StringToBytes(str)) -} - -// Writes a fixed amount of arbitrary data, defined by the length of the slice. -func (w *Writer) WriteDataFixed(data []byte) error { - var amt int = 0 - for amt < len(data) { - n, err := w.base.Write(data[amt:]) - if err != nil { - return err - } - amt += n - } - return nil -} - -// Writes arbitrary data whose length is encoded into the message. -func (w *Writer) WriteData(data []byte) error { - err := w.WriteUint(uint64(len(data))) - if err != nil { - return err - } - var amt int = 0 - for amt < len(data) { - n, err := w.base.Write(data[amt:]) - if err != nil { - return err - } - amt += n - } - return nil -} diff --git a/forged/internal/cmap/comparable_map.go b/forged/internal/cmap/comparable_map.go deleted file mode 100644 index cd9d4ce..0000000 --- a/forged/internal/cmap/comparable_map.go +++ /dev/null @@ -1,539 +0,0 @@ -// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically -// written from scratch with Go 1.23's sync.Map. -// Copyright 2024 Runxi Yu (porting it to generics) -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -package cmap - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// ComparableMap[K comparable, V comparable] is like a Go map[K]V but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. Loads, -// stores, and deletes run in amortized constant time. -// -// The ComparableMap type is optimized for two common use cases: (1) when the comparableEntry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a ComparableMap may significantly reduce lock -// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. -// -// The zero ComparableMap is empty and ready for use. A ComparableMap must not be copied after first use. -// -// In the terminology of [the Go memory model], ComparableMap arranges that a write operation -// “synchronizes before” any read operation that observes the effect of the write, where -// read and write operations are defined as follows. -// [ComparableMap.Load], [ComparableMap.LoadAndDelete], [ComparableMap.LoadOrStore], [ComparableMap.Swap], [ComparableMap.CompareAndSwap], -// and [ComparableMap.CompareAndDelete] are read operations; -// [ComparableMap.Delete], [ComparableMap.LoadAndDelete], [ComparableMap.Store], and [ComparableMap.Swap] are write operations; -// [ComparableMap.LoadOrStore] is a write operation when it returns loaded set to false; -// [ComparableMap.CompareAndSwap] is a write operation when it returns swapped set to true; -// and [ComparableMap.CompareAndDelete] is a write operation when it returns deleted set to true. -// -// [the Go memory model]: https://go.dev/ref/mem -type ComparableMap[K comparable, V comparable] struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-comparableExpunged comparableEntry requires that the comparableEntry be copied to the dirty - // map and uncomparableExpunged with mu held. - read atomic.Pointer[comparableReadOnly[K, V]] - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-comparableExpunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An comparableExpunged comparableEntry in the - // clean map must be uncomparableExpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[K]*comparableEntry[V] - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// comparableReadOnly is an immutable struct stored atomically in the ComparableMap.read field. -type comparableReadOnly[K comparable, V comparable] struct { - m map[K]*comparableEntry[V] - amended bool // true if the dirty map contains some key not in m. -} - -// comparableExpunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var comparableExpunged = unsafe.Pointer(new(any)) - -// An comparableEntry is a slot in the map corresponding to a particular key. -type comparableEntry[V comparable] struct { - // p points to the value stored for the comparableEntry. - // - // If p == nil, the comparableEntry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == comparableExpunged, the comparableEntry has been deleted, m.dirty != nil, and the comparableEntry - // is missing from m.dirty. - // - // Otherwise, the comparableEntry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An comparableEntry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with comparableExpunged and leave - // m.dirty[key] unset. - // - // An comparableEntry's associated value can be updated by atomic replacement, provided - // p != comparableExpunged. If p == comparableExpunged, an comparableEntry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the comparableEntry. - p unsafe.Pointer -} - -func newComparableEntry[V comparable](i V) *comparableEntry[V] { - return &comparableEntry[V]{p: unsafe.Pointer(&i)} -} - -func (m *ComparableMap[K, V]) loadReadOnly() comparableReadOnly[K, V] { - if p := m.read.Load(); p != nil { - return *p - } - return comparableReadOnly[K, V]{} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *ComparableMap[K, V]) Load(key K) (value V, ok bool) { - read := m.loadReadOnly() - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read = m.loadReadOnly() - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the comparableEntry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return *new(V), false - } - return e.load() -} - -func (e *comparableEntry[V]) load() (value V, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == comparableExpunged { - return value, false - } - return *(*V)(p), true -} - -// Store sets the value for a key. -func (m *ComparableMap[K, V]) Store(key K, value V) { - _, _ = m.Swap(key, value) -} - -// Clear deletes all the entries, resulting in an empty ComparableMap. -func (m *ComparableMap[K, V]) Clear() { - read := m.loadReadOnly() - if len(read.m) == 0 && !read.amended { - // Avoid allocating a new comparableReadOnly when the map is already clear. - return - } - - m.mu.Lock() - defer m.mu.Unlock() - - read = m.loadReadOnly() - if len(read.m) > 0 || read.amended { - m.read.Store(&comparableReadOnly[K, V]{}) - } - - clear(m.dirty) - // Don't immediately promote the newly-cleared dirty map on the next operation. - m.misses = 0 -} - -// tryCompareAndSwap compare the comparableEntry with the given old value and swaps -// it with a new value if the comparableEntry is equal to the old value, and the comparableEntry -// has not been comparableExpunged. -// -// If the comparableEntry is comparableExpunged, tryCompareAndSwap returns false and leaves -// the comparableEntry unchanged. -func (e *comparableEntry[V]) tryCompareAndSwap(old V, new V) bool { - p := atomic.LoadPointer(&e.p) - if p == nil || p == comparableExpunged || *(*V)(p) != old { // XXX - return false - } - - // Copy the pointer after the first load to make this method more amenable - // to escape analysis: if the comparison fails from the start, we shouldn't - // bother heap-allocating a pointer to store. - nc := new - for { - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(&nc)) { - return true - } - p = atomic.LoadPointer(&e.p) - if p == nil || p == comparableExpunged || *(*V)(p) != old { - return false - } - } -} - -// unexpungeLocked ensures that the comparableEntry is not marked as comparableExpunged. -// -// If the comparableEntry was previously comparableExpunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *comparableEntry[V]) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, comparableExpunged, nil) -} - -// swapLocked unconditionally swaps a value into the comparableEntry. -// -// The comparableEntry must be known not to be comparableExpunged. -func (e *comparableEntry[V]) swapLocked(i *V) *V { - return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i))) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *ComparableMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { - // Avoid locking if it's a clean hit. - read := m.loadReadOnly() - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read = m.loadReadOnly() - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true}) - } - m.dirty[key] = newComparableEntry(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the comparableEntry is not -// comparableExpunged. -// -// If the comparableEntry is comparableExpunged, tryLoadOrStore leaves the comparableEntry unchanged and -// returns with ok==false. -func (e *comparableEntry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == comparableExpunged { - return actual, false, false - } - if p != nil { - return *(*V)(p), true, true - } - - // Copy the pointer after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the comparableEntry is comparableExpunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == comparableExpunged { - return actual, false, false - } - if p != nil { - return *(*V)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *ComparableMap[K, V]) LoadAndDelete(key K) (value V, loaded bool) { - read := m.loadReadOnly() - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read = m.loadReadOnly() - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the comparableEntry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *ComparableMap[K, V]) Delete(key K) { - m.LoadAndDelete(key) -} - -func (e *comparableEntry[V]) delete() (value V, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == comparableExpunged { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(*V)(p), true - } - } -} - -// trySwap swaps a value if the comparableEntry has not been comparableExpunged. -// -// If the comparableEntry is comparableExpunged, trySwap returns false and leaves the comparableEntry -// unchanged. -func (e *comparableEntry[V]) trySwap(i *V) (*V, bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == comparableExpunged { - return nil, false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return (*V)(p), true - } - } -} - -// Swap swaps the value for a key and returns the previous value if any. -// The loaded result reports whether the key was present. -func (m *ComparableMap[K, V]) Swap(key K, value V) (previous V, loaded bool) { - read := m.loadReadOnly() - if e, ok := read.m[key]; ok { - if v, ok := e.trySwap(&value); ok { - if v == nil { - return previous, false - } - return *v, true - } - } - - m.mu.Lock() - read = m.loadReadOnly() - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The comparableEntry was previously comparableExpunged, which implies that there is a - // non-nil dirty map and this comparableEntry is not in it. - m.dirty[key] = e - } - if v := e.swapLocked(&value); v != nil { - loaded = true - previous = *v - } - } else if e, ok := m.dirty[key]; ok { - if v := e.swapLocked(&value); v != nil { - loaded = true - previous = *v - } - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true}) - } - m.dirty[key] = newComparableEntry(value) - } - m.mu.Unlock() - return previous, loaded -} - -// CompareAndSwap swaps the old and new values for key -// if the value stored in the map is equal to old. -// The old value must be of a comparable type. -func (m *ComparableMap[K, V]) CompareAndSwap(key K, old, new V) (swapped bool) { - read := m.loadReadOnly() - if e, ok := read.m[key]; ok { - return e.tryCompareAndSwap(old, new) - } else if !read.amended { - return false // No existing value for key. - } - - m.mu.Lock() - defer m.mu.Unlock() - read = m.loadReadOnly() - swapped = false - if e, ok := read.m[key]; ok { - swapped = e.tryCompareAndSwap(old, new) - } else if e, ok := m.dirty[key]; ok { - swapped = e.tryCompareAndSwap(old, new) - // We needed to lock mu in order to load the comparableEntry for key, - // and the operation didn't change the set of keys in the map - // (so it would be made more efficient by promoting the dirty - // map to read-only). - // Count it as a miss so that we will eventually switch to the - // more efficient steady state. - m.missLocked() - } - return swapped -} - -// CompareAndDelete deletes the comparableEntry for key if its value is equal to old. -// The old value must be of a comparable type. -// -// If there is no current value for key in the map, CompareAndDelete -// returns false (even if the old value is a nil pointer). -func (m *ComparableMap[K, V]) CompareAndDelete(key K, old V) (deleted bool) { - read := m.loadReadOnly() - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read = m.loadReadOnly() - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Don't delete key from m.dirty: we still need to do the “compare” part - // of the operation. The comparableEntry will eventually be comparableExpunged when the - // dirty map is promoted to the read map. - // - // Regardless of whether the comparableEntry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - for ok { - p := atomic.LoadPointer(&e.p) - if p == nil || p == comparableExpunged || *(*V)(p) != old { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return true - } - } - return false -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the ComparableMap's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently (including by f), Range may reflect any -// mapping for that key from any point during the Range call. Range does not -// block other methods on the receiver; even f itself may call any method on m. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *ComparableMap[K, V]) Range(f func(key K, value V) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read := m.loadReadOnly() - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read = m.loadReadOnly() - if read.amended { - read = comparableReadOnly[K, V]{m: m.dirty} - copyRead := read - m.read.Store(©Read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *ComparableMap[K, V]) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(&comparableReadOnly[K, V]{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *ComparableMap[K, V]) dirtyLocked() { - if m.dirty != nil { - return - } - - read := m.loadReadOnly() - m.dirty = make(map[K]*comparableEntry[V], len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *comparableEntry[V]) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, comparableExpunged) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == comparableExpunged -} diff --git a/forged/internal/cmap/map.go b/forged/internal/cmap/map.go deleted file mode 100644 index 4f43627..0000000 --- a/forged/internal/cmap/map.go +++ /dev/null @@ -1,446 +0,0 @@ -// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically -// written from scratch with Go 1.23's sync.Map. -// Copyright 2024 Runxi Yu (porting it to generics) -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// Package cmap provides a generic Map safe for concurrent use. -package cmap - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map[K comparable, V any] is like a Go map[K]V but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. Loads, -// stores, and deletes run in amortized constant time. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -// -// In the terminology of [the Go memory model], Map arranges that a write operation -// “synchronizes before” any read operation that observes the effect of the write, where -// read and write operations are defined as follows. -// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap], -// and [Map.CompareAndDelete] are read operations; -// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations; -// [Map.LoadOrStore] is a write operation when it returns loaded set to false; -// [Map.CompareAndSwap] is a write operation when it returns swapped set to true; -// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true. -// -// [the Go memory model]: https://go.dev/ref/mem -type Map[K comparable, V any] struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Pointer[readOnly[K, V]] - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[K]*entry[V] - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnly[K comparable, V any] struct { - m map[K]*entry[V] - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expunged = unsafe.Pointer(new(any)) - -// An entry is a slot in the map corresponding to a particular key. -type entry[V any] struct { - // p points to the value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer -} - -func newEntry[V any](i V) *entry[V] { - return &entry[V]{p: unsafe.Pointer(&i)} -} - -func (m *Map[K, V]) loadReadOnly() readOnly[K, V] { - if p := m.read.Load(); p != nil { - return *p - } - return readOnly[K, V]{} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *Map[K, V]) Load(key K) (value V, ok bool) { - read := m.loadReadOnly() - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read = m.loadReadOnly() - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return *new(V), false - } - return e.load() -} - -func (e *entry[V]) load() (value V, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expunged { - return value, false - } - return *(*V)(p), true -} - -// Store sets the value for a key. -func (m *Map[K, V]) Store(key K, value V) { - _, _ = m.Swap(key, value) -} - -// Clear deletes all the entries, resulting in an empty Map. -func (m *Map[K, V]) Clear() { - read := m.loadReadOnly() - if len(read.m) == 0 && !read.amended { - // Avoid allocating a new readOnly when the map is already clear. - return - } - - m.mu.Lock() - defer m.mu.Unlock() - - read = m.loadReadOnly() - if len(read.m) > 0 || read.amended { - m.read.Store(&readOnly[K, V]{}) - } - - clear(m.dirty) - // Don't immediately promote the newly-cleared dirty map on the next operation. - m.misses = 0 -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entry[V]) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expunged, nil) -} - -// swapLocked unconditionally swaps a value into the entry. -// -// The entry must be known not to be expunged. -func (e *entry[V]) swapLocked(i *V) *V { - return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i))) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { - // Avoid locking if it's a clean hit. - read := m.loadReadOnly() - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read = m.loadReadOnly() - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(&readOnly[K, V]{m: read.m, amended: true}) - } - m.dirty[key] = newEntry(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expunged { - return actual, false, false - } - if p != nil { - return *(*V)(p), true, true - } - - // Copy the pointer after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expunged { - return actual, false, false - } - if p != nil { - return *(*V)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { - read := m.loadReadOnly() - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read = m.loadReadOnly() - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *Map[K, V]) Delete(key K) { - m.LoadAndDelete(key) -} - -func (e *entry[V]) delete() (value V, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expunged { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(*V)(p), true - } - } -} - -// trySwap swaps a value if the entry has not been expunged. -// -// If the entry is expunged, trySwap returns false and leaves the entry -// unchanged. -func (e *entry[V]) trySwap(i *V) (*V, bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == expunged { - return nil, false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return (*V)(p), true - } - } -} - -// Swap swaps the value for a key and returns the previous value if any. -// The loaded result reports whether the key was present. -func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { - read := m.loadReadOnly() - if e, ok := read.m[key]; ok { - if v, ok := e.trySwap(&value); ok { - if v == nil { - return previous, false - } - return *v, true - } - } - - m.mu.Lock() - read = m.loadReadOnly() - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - if v := e.swapLocked(&value); v != nil { - loaded = true - previous = *v - } - } else if e, ok := m.dirty[key]; ok { - if v := e.swapLocked(&value); v != nil { - loaded = true - previous = *v - } - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(&readOnly[K, V]{m: read.m, amended: true}) - } - m.dirty[key] = newEntry(value) - } - m.mu.Unlock() - return previous, loaded -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently (including by f), Range may reflect any -// mapping for that key from any point during the Range call. Range does not -// block other methods on the receiver; even f itself may call any method on m. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *Map[K, V]) Range(f func(key K, value V) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read := m.loadReadOnly() - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read = m.loadReadOnly() - if read.amended { - read = readOnly[K, V]{m: m.dirty} - copyRead := read - m.read.Store(©Read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *Map[K, V]) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(&readOnly[K, V]{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *Map[K, V]) dirtyLocked() { - if m.dirty != nil { - return - } - - read := m.loadReadOnly() - m.dirty = make(map[K]*entry[V], len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entry[V]) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expunged -} diff --git a/forged/internal/common/ansiec/colors.go b/forged/internal/common/ansiec/colors.go new file mode 100644 index 0000000..8be2a0c --- /dev/null +++ b/forged/internal/common/ansiec/colors.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package ansiec + +// ANSI color codes +const ( + Black = "\x1b[30m" + Red = "\x1b[31m" + Green = "\x1b[32m" + Yellow = "\x1b[33m" + Blue = "\x1b[34m" + Magenta = "\x1b[35m" + Cyan = "\x1b[36m" + White = "\x1b[37m" + BrightBlack = "\x1b[30;1m" + BrightRed = "\x1b[31;1m" + BrightGreen = "\x1b[32;1m" + BrightYellow = "\x1b[33;1m" + BrightBlue = "\x1b[34;1m" + BrightMagenta = "\x1b[35;1m" + BrightCyan = "\x1b[36;1m" + BrightWhite = "\x1b[37;1m" +) diff --git a/forged/internal/common/ansiec/doc.go b/forged/internal/common/ansiec/doc.go new file mode 100644 index 0000000..542c564 --- /dev/null +++ b/forged/internal/common/ansiec/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +// Package ansiec provides definitions for ANSI escape sequences. +package ansiec diff --git a/forged/internal/common/ansiec/reset.go b/forged/internal/common/ansiec/reset.go new file mode 100644 index 0000000..51bb312 --- /dev/null +++ b/forged/internal/common/ansiec/reset.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package ansiec + +// Reset the colors and styles +const Reset = "\x1b[0m" diff --git a/forged/internal/common/ansiec/style.go b/forged/internal/common/ansiec/style.go new file mode 100644 index 0000000..95edbbe --- /dev/null +++ b/forged/internal/common/ansiec/style.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package ansiec + +// ANSI text styles +const ( + Bold = "\x1b[1m" + Underline = "\x1b[4m" + Reversed = "\x1b[7m" + Italic = "\x1b[3m" +) diff --git a/forged/internal/common/argon2id/LICENSE b/forged/internal/common/argon2id/LICENSE new file mode 100644 index 0000000..3649823 --- /dev/null +++ b/forged/internal/common/argon2id/LICENSE @@ -0,0 +1,18 @@ +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/forged/internal/common/argon2id/argon2id.go b/forged/internal/common/argon2id/argon2id.go new file mode 100644 index 0000000..88df8f6 --- /dev/null +++ b/forged/internal/common/argon2id/argon2id.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2018 Alex Edwards + +// Package argon2id provides a wrapper around Go's golang.org/x/crypto/argon2. +package argon2id + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "errors" + "fmt" + "runtime" + "strings" + + "golang.org/x/crypto/argon2" +) + +var ( + // ErrInvalidHash in returned by ComparePasswordAndHash if the provided + // hash isn't in the expected format. + ErrInvalidHash = errors.New("argon2id: hash is not in the correct format") + + // ErrIncompatibleVariant is returned by ComparePasswordAndHash if the + // provided hash was created using a unsupported variant of Argon2. + // Currently only argon2id is supported by this package. + ErrIncompatibleVariant = errors.New("argon2id: incompatible variant of argon2") + + // ErrIncompatibleVersion is returned by ComparePasswordAndHash if the + // provided hash was created using a different version of Argon2. + ErrIncompatibleVersion = errors.New("argon2id: incompatible version of argon2") +) + +// DefaultParams provides some sane default parameters for hashing passwords. +// +// Follows recommendations given by the Argon2 RFC: +// "The Argon2id variant with t=1 and maximum available memory is RECOMMENDED as a +// default setting for all environments. This setting is secure against side-channel +// attacks and maximizes adversarial costs on dedicated bruteforce hardware."" +// +// The default parameters should generally be used for development/testing purposes +// only. Custom parameters should be set for production applications depending on +// available memory/CPU resources and business requirements. +var DefaultParams = &Params{ + Memory: 64 * 1024, + Iterations: 1, + Parallelism: uint8(runtime.NumCPU()), + SaltLength: 16, + KeyLength: 32, +} + +// Params describes the input parameters used by the Argon2id algorithm. The +// Memory and Iterations parameters control the computational cost of hashing +// the password. The higher these figures are, the greater the cost of generating +// the hash and the longer the runtime. It also follows that the greater the cost +// will be for any attacker trying to guess the password. If the code is running +// on a machine with multiple cores, then you can decrease the runtime without +// reducing the cost by increasing the Parallelism parameter. This controls the +// number of threads that the work is spread across. Important note: Changing the +// value of the Parallelism parameter changes the hash output. +// +// For guidance and an outline process for choosing appropriate parameters see +// https://tools.ietf.org/html/draft-irtf-cfrg-argon2-04#section-4 +type Params struct { + // The amount of memory used by the algorithm (in kibibytes). + Memory uint32 + + // The number of iterations over the memory. + Iterations uint32 + + // The number of threads (or lanes) used by the algorithm. + // Recommended value is between 1 and runtime.NumCPU(). + Parallelism uint8 + + // Length of the random salt. 16 bytes is recommended for password hashing. + SaltLength uint32 + + // Length of the generated key. 16 bytes or more is recommended. + KeyLength uint32 +} + +// CreateHash returns an Argon2id hash of a plain-text password using the +// provided algorithm parameters. The returned hash follows the format used by +// the Argon2 reference C implementation and contains the base64-encoded Argon2id d +// derived key prefixed by the salt and parameters. It looks like this: +// +// $argon2id$v=19$m=65536,t=3,p=2$c29tZXNhbHQ$RdescudvJCsgt3ub+b+dWRWJTmaaJObG +func CreateHash(password string, params *Params) (hash string, err error) { + salt, err := generateRandomBytes(params.SaltLength) + if err != nil { + return "", err + } + + key := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength) + + b64Salt := base64.RawStdEncoding.EncodeToString(salt) + b64Key := base64.RawStdEncoding.EncodeToString(key) + + hash = fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, params.Memory, params.Iterations, params.Parallelism, b64Salt, b64Key) + return hash, nil +} + +// ComparePasswordAndHash performs a constant-time comparison between a +// plain-text password and Argon2id hash, using the parameters and salt +// contained in the hash. It returns true if they match, otherwise it returns +// false. +func ComparePasswordAndHash(password, hash string) (match bool, err error) { + match, _, err = CheckHash(password, hash) + return match, err +} + +// CheckHash is like ComparePasswordAndHash, except it also returns the params that the hash was +// created with. This can be useful if you want to update your hash params over time (which you +// should). +func CheckHash(password, hash string) (match bool, params *Params, err error) { + params, salt, key, err := DecodeHash(hash) + if err != nil { + return false, nil, err + } + + otherKey := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength) + + keyLen := int32(len(key)) + otherKeyLen := int32(len(otherKey)) + + if subtle.ConstantTimeEq(keyLen, otherKeyLen) == 0 { + return false, params, nil + } + if subtle.ConstantTimeCompare(key, otherKey) == 1 { + return true, params, nil + } + return false, params, nil +} + +func generateRandomBytes(n uint32) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + + return b, nil +} + +// DecodeHash expects a hash created from this package, and parses it to return the params used to +// create it, as well as the salt and key (password hash). +func DecodeHash(hash string) (params *Params, salt, key []byte, err error) { + vals := strings.Split(hash, "$") + if len(vals) != 6 { + return nil, nil, nil, ErrInvalidHash + } + + if vals[1] != "argon2id" { + return nil, nil, nil, ErrIncompatibleVariant + } + + var version int + _, err = fmt.Sscanf(vals[2], "v=%d", &version) + if err != nil { + return nil, nil, nil, err + } + if version != argon2.Version { + return nil, nil, nil, ErrIncompatibleVersion + } + + params = &Params{} + _, err = fmt.Sscanf(vals[3], "m=%d,t=%d,p=%d", ¶ms.Memory, ¶ms.Iterations, ¶ms.Parallelism) + if err != nil { + return nil, nil, nil, err + } + + salt, err = base64.RawStdEncoding.Strict().DecodeString(vals[4]) + if err != nil { + return nil, nil, nil, err + } + params.SaltLength = uint32(len(salt)) + + key, err = base64.RawStdEncoding.Strict().DecodeString(vals[5]) + if err != nil { + return nil, nil, nil, err + } + params.KeyLength = uint32(len(key)) + + return params, salt, key, nil +} diff --git a/forged/internal/common/bare/LICENSE b/forged/internal/common/bare/LICENSE new file mode 100644 index 0000000..6b0b127 --- /dev/null +++ b/forged/internal/common/bare/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/forged/internal/common/bare/doc.go b/forged/internal/common/bare/doc.go new file mode 100644 index 0000000..2f12f55 --- /dev/null +++ b/forged/internal/common/bare/doc.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +// Package bare provides primitives to encode and decode BARE messages. +// +// There is no guarantee that this is compatible with the upstream +// implementation at https://git.sr.ht/~sircmpwn/go-bare. +package bare diff --git a/forged/internal/common/bare/errors.go b/forged/internal/common/bare/errors.go new file mode 100644 index 0000000..39c951a --- /dev/null +++ b/forged/internal/common/bare/errors.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "errors" + "fmt" + "reflect" +) + +var ErrInvalidStr = errors.New("String contains invalid UTF-8 sequences") + +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return fmt.Sprintf("Unsupported type for marshaling: %s\n", e.Type.String()) +} diff --git a/forged/internal/common/bare/limit.go b/forged/internal/common/bare/limit.go new file mode 100644 index 0000000..212bc05 --- /dev/null +++ b/forged/internal/common/bare/limit.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "errors" + "io" +) + +var ( + maxUnmarshalBytes uint64 = 1024 * 1024 * 32 /* 32 MiB */ + maxArrayLength uint64 = 1024 * 4 /* 4096 elements */ + maxMapSize uint64 = 1024 +) + +// MaxUnmarshalBytes sets the maximum size of a message decoded by unmarshal. +// By default, this is set to 32 MiB. +func MaxUnmarshalBytes(bytes uint64) { + maxUnmarshalBytes = bytes +} + +// MaxArrayLength sets maximum number of elements in array. Defaults to 4096 elements +func MaxArrayLength(length uint64) { + maxArrayLength = length +} + +// MaxMapSize sets maximum size of map. Defaults to 1024 key/value pairs +func MaxMapSize(size uint64) { + maxMapSize = size +} + +// Use MaxUnmarshalBytes to prevent this error from occuring on messages which +// are large by design. +var ErrLimitExceeded = errors.New("Maximum message size exceeded") + +// Identical to io.LimitedReader, except it returns our custom error instead of +// EOF if the limit is reached. +type limitedReader struct { + R io.Reader + N uint64 +} + +func (l *limitedReader) Read(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, ErrLimitExceeded + } + if uint64(len(p)) > l.N { + p = p[0:l.N] + } + n, err = l.R.Read(p) + l.N -= uint64(n) + return +} + +func newLimitedReader(r io.Reader) *limitedReader { + return &limitedReader{r, maxUnmarshalBytes} +} diff --git a/forged/internal/common/bare/marshal.go b/forged/internal/common/bare/marshal.go new file mode 100644 index 0000000..1ce942d --- /dev/null +++ b/forged/internal/common/bare/marshal.go @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sync" +) + +// A type which implements this interface will be responsible for marshaling +// itself when encountered. +type Marshalable interface { + Marshal(w *Writer) error +} + +var encoderBufferPool = sync.Pool{ + New: func() interface{} { + buf := &bytes.Buffer{} + buf.Grow(32) + return buf + }, +} + +// Marshals a value (val, which must be a pointer) into a BARE message. +// +// The encoding of each struct field can be customized by the format string +// stored under the "bare" key in the struct field's tag. +// +// As a special case, if the field tag is "-", the field is always omitted. +func Marshal(val interface{}) ([]byte, error) { + // reuse buffers from previous serializations + b := encoderBufferPool.Get().(*bytes.Buffer) + defer func() { + b.Reset() + encoderBufferPool.Put(b) + }() + + w := NewWriter(b) + err := MarshalWriter(w, val) + + msg := make([]byte, b.Len()) + copy(msg, b.Bytes()) + + return msg, err +} + +// Marshals a value (val, which must be a pointer) into a BARE message and +// writes it to a Writer. See Marshal for details. +func MarshalWriter(w *Writer, val interface{}) error { + t := reflect.TypeOf(val) + v := reflect.ValueOf(val) + if t.Kind() != reflect.Ptr { + return errors.New("Expected val to be pointer type") + } + + return getEncoder(t.Elem())(w, v.Elem()) +} + +type encodeFunc func(w *Writer, v reflect.Value) error + +var encodeFuncCache sync.Map // map[reflect.Type]encodeFunc + +// get decoder from cache +func getEncoder(t reflect.Type) encodeFunc { + if f, ok := encodeFuncCache.Load(t); ok { + return f.(encodeFunc) + } + + f := encoderFunc(t) + encodeFuncCache.Store(t, f) + return f +} + +var marshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem() + +func encoderFunc(t reflect.Type) encodeFunc { + if reflect.PointerTo(t).Implements(marshalableInterface) { + return func(w *Writer, v reflect.Value) error { + uv := v.Addr().Interface().(Marshalable) + return uv.Marshal(w) + } + } + + if t.Kind() == reflect.Interface && t.Implements(unionInterface) { + return encodeUnion(t) + } + + switch t.Kind() { + case reflect.Ptr: + return encodeOptional(t.Elem()) + case reflect.Struct: + return encodeStruct(t) + case reflect.Array: + return encodeArray(t) + case reflect.Slice: + return encodeSlice(t) + case reflect.Map: + return encodeMap(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt + case reflect.Float32, reflect.Float64: + return encodeFloat + case reflect.Bool: + return encodeBool + case reflect.String: + return encodeString + } + + return func(w *Writer, v reflect.Value) error { + return &UnsupportedTypeError{v.Type()} + } +} + +func encodeOptional(t reflect.Type) encodeFunc { + return func(w *Writer, v reflect.Value) error { + if v.IsNil() { + return w.WriteBool(false) + } + + if err := w.WriteBool(true); err != nil { + return err + } + + return getEncoder(t)(w, v.Elem()) + } +} + +func encodeStruct(t reflect.Type) encodeFunc { + n := t.NumField() + encoders := make([]encodeFunc, n) + for i := 0; i < n; i++ { + field := t.Field(i) + if field.Tag.Get("bare") == "-" { + continue + } + encoders[i] = getEncoder(field.Type) + } + + return func(w *Writer, v reflect.Value) error { + for i := 0; i < n; i++ { + if encoders[i] == nil { + continue + } + err := encoders[i](w, v.Field(i)) + if err != nil { + return err + } + } + return nil + } +} + +func encodeArray(t reflect.Type) encodeFunc { + f := getEncoder(t.Elem()) + len := t.Len() + + return func(w *Writer, v reflect.Value) error { + for i := 0; i < len; i++ { + if err := f(w, v.Index(i)); err != nil { + return err + } + } + return nil + } +} + +func encodeSlice(t reflect.Type) encodeFunc { + elem := t.Elem() + f := getEncoder(elem) + + return func(w *Writer, v reflect.Value) error { + if err := w.WriteUint(uint64(v.Len())); err != nil { + return err + } + + for i := 0; i < v.Len(); i++ { + if err := f(w, v.Index(i)); err != nil { + return err + } + } + return nil + } +} + +func encodeMap(t reflect.Type) encodeFunc { + keyType := t.Key() + keyf := getEncoder(keyType) + + valueType := t.Elem() + valf := getEncoder(valueType) + + return func(w *Writer, v reflect.Value) error { + if err := w.WriteUint(uint64(v.Len())); err != nil { + return err + } + + iter := v.MapRange() + for iter.Next() { + if err := keyf(w, iter.Key()); err != nil { + return err + } + if err := valf(w, iter.Value()); err != nil { + return err + } + } + return nil + } +} + +func encodeUnion(t reflect.Type) encodeFunc { + ut, ok := unionRegistry[t] + if !ok { + return func(w *Writer, v reflect.Value) error { + return fmt.Errorf("Union type %s is not registered", t.Name()) + } + } + + encoders := make(map[uint64]encodeFunc) + for tag, t := range ut.types { + encoders[tag] = getEncoder(t) + } + + return func(w *Writer, v reflect.Value) error { + t := v.Elem().Type() + if t.Kind() == reflect.Ptr { + // If T is a valid union value type, *T is valid too. + t = t.Elem() + v = v.Elem() + } + tag, ok := ut.tags[t] + if !ok { + return fmt.Errorf("Invalid union value: %s", v.Elem().String()) + } + + if err := w.WriteUint(tag); err != nil { + return err + } + + return encoders[tag](w, v.Elem()) + } +} + +func encodeUint(w *Writer, v reflect.Value) error { + switch getIntKind(v.Type()) { + case reflect.Uint: + return w.WriteUint(v.Uint()) + + case reflect.Uint8: + return w.WriteU8(uint8(v.Uint())) + + case reflect.Uint16: + return w.WriteU16(uint16(v.Uint())) + + case reflect.Uint32: + return w.WriteU32(uint32(v.Uint())) + + case reflect.Uint64: + return w.WriteU64(uint64(v.Uint())) + } + + panic("not uint") +} + +func encodeInt(w *Writer, v reflect.Value) error { + switch getIntKind(v.Type()) { + case reflect.Int: + return w.WriteInt(v.Int()) + + case reflect.Int8: + return w.WriteI8(int8(v.Int())) + + case reflect.Int16: + return w.WriteI16(int16(v.Int())) + + case reflect.Int32: + return w.WriteI32(int32(v.Int())) + + case reflect.Int64: + return w.WriteI64(int64(v.Int())) + } + + panic("not int") +} + +func encodeFloat(w *Writer, v reflect.Value) error { + switch v.Type().Kind() { + case reflect.Float32: + return w.WriteF32(float32(v.Float())) + case reflect.Float64: + return w.WriteF64(v.Float()) + } + + panic("not float") +} + +func encodeBool(w *Writer, v reflect.Value) error { + return w.WriteBool(v.Bool()) +} + +func encodeString(w *Writer, v reflect.Value) error { + if v.Kind() != reflect.String { + panic("not string") + } + return w.WriteString(v.String()) +} diff --git a/forged/internal/common/bare/reader.go b/forged/internal/common/bare/reader.go new file mode 100644 index 0000000..028a7aa --- /dev/null +++ b/forged/internal/common/bare/reader.go @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "unicode/utf8" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// A Reader for BARE primitive types. +type Reader struct { + base byteReader + scratch [8]byte +} + +type simpleByteReader struct { + io.Reader + scratch [1]byte +} + +func (r simpleByteReader) ReadByte() (byte, error) { + // using reference type here saves us allocations + _, err := r.Read(r.scratch[:]) + return r.scratch[0], err +} + +// Returns a new BARE primitive reader wrapping the given io.Reader. +func NewReader(base io.Reader) *Reader { + br, ok := base.(byteReader) + if !ok { + br = simpleByteReader{Reader: base} + } + return &Reader{base: br} +} + +func (r *Reader) ReadUint() (uint64, error) { + x, err := binary.ReadUvarint(r.base) + if err != nil { + return x, err + } + return x, nil +} + +func (r *Reader) ReadU8() (uint8, error) { + return r.base.ReadByte() +} + +func (r *Reader) ReadU16() (uint16, error) { + var i uint16 + if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil { + return i, err + } + return binary.LittleEndian.Uint16(r.scratch[:]), nil +} + +func (r *Reader) ReadU32() (uint32, error) { + var i uint32 + if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil { + return i, err + } + return binary.LittleEndian.Uint32(r.scratch[:]), nil +} + +func (r *Reader) ReadU64() (uint64, error) { + var i uint64 + if _, err := io.ReadAtLeast(r.base, r.scratch[:8], 8); err != nil { + return i, err + } + return binary.LittleEndian.Uint64(r.scratch[:]), nil +} + +func (r *Reader) ReadInt() (int64, error) { + return binary.ReadVarint(r.base) +} + +func (r *Reader) ReadI8() (int8, error) { + b, err := r.base.ReadByte() + return int8(b), err +} + +func (r *Reader) ReadI16() (int16, error) { + var i int16 + if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil { + return i, err + } + return int16(binary.LittleEndian.Uint16(r.scratch[:])), nil +} + +func (r *Reader) ReadI32() (int32, error) { + var i int32 + if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil { + return i, err + } + return int32(binary.LittleEndian.Uint32(r.scratch[:])), nil +} + +func (r *Reader) ReadI64() (int64, error) { + var i int64 + if _, err := io.ReadAtLeast(r.base, r.scratch[:], 8); err != nil { + return i, err + } + return int64(binary.LittleEndian.Uint64(r.scratch[:])), nil +} + +func (r *Reader) ReadF32() (float32, error) { + u, err := r.ReadU32() + f := math.Float32frombits(u) + if math.IsNaN(float64(f)) { + return 0.0, fmt.Errorf("NaN is not permitted in BARE floats") + } + return f, err +} + +func (r *Reader) ReadF64() (float64, error) { + u, err := r.ReadU64() + f := math.Float64frombits(u) + if math.IsNaN(f) { + return 0.0, fmt.Errorf("NaN is not permitted in BARE floats") + } + return f, err +} + +func (r *Reader) ReadBool() (bool, error) { + b, err := r.ReadU8() + if err != nil { + return false, err + } + + if b > 1 { + return false, fmt.Errorf("Invalid bool value: %#x", b) + } + + return b == 1, nil +} + +func (r *Reader) ReadString() (string, error) { + buf, err := r.ReadData() + if err != nil { + return "", err + } + if !utf8.Valid(buf) { + return "", ErrInvalidStr + } + return misc.BytesToString(buf), nil +} + +// Reads a fixed amount of arbitrary data, defined by the length of the slice. +func (r *Reader) ReadDataFixed(dest []byte) error { + var amt int = 0 + for amt < len(dest) { + n, err := r.base.Read(dest[amt:]) + if err != nil { + return err + } + amt += n + } + return nil +} + +// Reads arbitrary data whose length is read from the message. +func (r *Reader) ReadData() ([]byte, error) { + l, err := r.ReadUint() + if err != nil { + return nil, err + } + if l >= maxUnmarshalBytes { + return nil, ErrLimitExceeded + } + buf := make([]byte, l) + var amt uint64 = 0 + for amt < l { + n, err := r.base.Read(buf[amt:]) + if err != nil { + return nil, err + } + amt += uint64(n) + } + return buf, nil +} diff --git a/forged/internal/common/bare/unions.go b/forged/internal/common/bare/unions.go new file mode 100644 index 0000000..1020fa0 --- /dev/null +++ b/forged/internal/common/bare/unions.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "fmt" + "reflect" +) + +// Any type which is a union member must implement this interface. You must +// also call RegisterUnion for go-bare to marshal or unmarshal messages which +// utilize your union type. +type Union interface { + IsUnion() +} + +type UnionTags struct { + iface reflect.Type + tags map[reflect.Type]uint64 + types map[uint64]reflect.Type +} + +var ( + unionInterface = reflect.TypeOf((*Union)(nil)).Elem() + unionRegistry map[reflect.Type]*UnionTags +) + +func init() { + unionRegistry = make(map[reflect.Type]*UnionTags) +} + +// Registers a union type in this context. Pass the union interface and the +// list of types associated with it, sorted ascending by their union tag. +func RegisterUnion(iface interface{}) *UnionTags { + ity := reflect.TypeOf(iface).Elem() + if _, ok := unionRegistry[ity]; ok { + panic(fmt.Errorf("Type %s has already been registered", ity.Name())) + } + + if !ity.Implements(reflect.TypeOf((*Union)(nil)).Elem()) { + panic(fmt.Errorf("Type %s does not implement bare.Union", ity.Name())) + } + + utypes := &UnionTags{ + iface: ity, + tags: make(map[reflect.Type]uint64), + types: make(map[uint64]reflect.Type), + } + unionRegistry[ity] = utypes + return utypes +} + +func (ut *UnionTags) Member(t interface{}, tag uint64) *UnionTags { + ty := reflect.TypeOf(t) + if !ty.AssignableTo(ut.iface) { + panic(fmt.Errorf("Type %s does not implement interface %s", + ty.Name(), ut.iface.Name())) + } + if _, ok := ut.tags[ty]; ok { + panic(fmt.Errorf("Type %s is already registered for union %s", + ty.Name(), ut.iface.Name())) + } + if _, ok := ut.types[tag]; ok { + panic(fmt.Errorf("Tag %d is already registered for union %s", + tag, ut.iface.Name())) + } + ut.tags[ty] = tag + ut.types[tag] = ty + return ut +} + +func (ut *UnionTags) TagFor(v interface{}) (uint64, bool) { + tag, ok := ut.tags[reflect.TypeOf(v)] + return tag, ok +} + +func (ut *UnionTags) TypeFor(tag uint64) (reflect.Type, bool) { + t, ok := ut.types[tag] + return t, ok +} diff --git a/forged/internal/common/bare/unmarshal.go b/forged/internal/common/bare/unmarshal.go new file mode 100644 index 0000000..d55f32c --- /dev/null +++ b/forged/internal/common/bare/unmarshal.go @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" +) + +// A type which implements this interface will be responsible for unmarshaling +// itself when encountered. +type Unmarshalable interface { + Unmarshal(r *Reader) error +} + +// Unmarshals a BARE message into val, which must be a pointer to a value of +// the message type. +func Unmarshal(data []byte, val interface{}) error { + b := bytes.NewReader(data) + r := NewReader(b) + return UnmarshalBareReader(r, val) +} + +// Unmarshals a BARE message into value (val, which must be a pointer), from a +// reader. See Unmarshal for details. +func UnmarshalReader(r io.Reader, val interface{}) error { + r = newLimitedReader(r) + return UnmarshalBareReader(NewReader(r), val) +} + +type decodeFunc func(r *Reader, v reflect.Value) error + +var decodeFuncCache sync.Map // map[reflect.Type]decodeFunc + +func UnmarshalBareReader(r *Reader, val interface{}) error { + t := reflect.TypeOf(val) + v := reflect.ValueOf(val) + if t.Kind() != reflect.Ptr { + return errors.New("Expected val to be pointer type") + } + + return getDecoder(t.Elem())(r, v.Elem()) +} + +// get decoder from cache +func getDecoder(t reflect.Type) decodeFunc { + if f, ok := decodeFuncCache.Load(t); ok { + return f.(decodeFunc) + } + + f := decoderFunc(t) + decodeFuncCache.Store(t, f) + return f +} + +var unmarshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem() + +func decoderFunc(t reflect.Type) decodeFunc { + if reflect.PointerTo(t).Implements(unmarshalableInterface) { + return func(r *Reader, v reflect.Value) error { + uv := v.Addr().Interface().(Unmarshalable) + return uv.Unmarshal(r) + } + } + + if t.Kind() == reflect.Interface && t.Implements(unionInterface) { + return decodeUnion(t) + } + + switch t.Kind() { + case reflect.Ptr: + return decodeOptional(t.Elem()) + case reflect.Struct: + return decodeStruct(t) + case reflect.Array: + return decodeArray(t) + case reflect.Slice: + return decodeSlice(t) + case reflect.Map: + return decodeMap(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return decodeUint + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return decodeInt + case reflect.Float32, reflect.Float64: + return decodeFloat + case reflect.Bool: + return decodeBool + case reflect.String: + return decodeString + } + + return func(r *Reader, v reflect.Value) error { + return &UnsupportedTypeError{v.Type()} + } +} + +func decodeOptional(t reflect.Type) decodeFunc { + return func(r *Reader, v reflect.Value) error { + s, err := r.ReadU8() + if err != nil { + return err + } + + if s > 1 { + return fmt.Errorf("Invalid optional value: %#x", s) + } + + if s == 0 { + return nil + } + + v.Set(reflect.New(t)) + return getDecoder(t)(r, v.Elem()) + } +} + +func decodeStruct(t reflect.Type) decodeFunc { + n := t.NumField() + decoders := make([]decodeFunc, n) + for i := 0; i < n; i++ { + field := t.Field(i) + if field.Tag.Get("bare") == "-" { + continue + } + decoders[i] = getDecoder(field.Type) + } + + return func(r *Reader, v reflect.Value) error { + for i := 0; i < n; i++ { + if decoders[i] == nil { + continue + } + err := decoders[i](r, v.Field(i)) + if err != nil { + return err + } + } + return nil + } +} + +func decodeArray(t reflect.Type) decodeFunc { + f := getDecoder(t.Elem()) + len := t.Len() + + return func(r *Reader, v reflect.Value) error { + for i := 0; i < len; i++ { + err := f(r, v.Index(i)) + if err != nil { + return err + } + } + return nil + } +} + +func decodeSlice(t reflect.Type) decodeFunc { + elem := t.Elem() + f := getDecoder(elem) + + return func(r *Reader, v reflect.Value) error { + len, err := r.ReadUint() + if err != nil { + return err + } + + if len > maxArrayLength { + return fmt.Errorf("Array length %d exceeds configured limit of %d", len, maxArrayLength) + } + + v.Set(reflect.MakeSlice(t, int(len), int(len))) + + for i := 0; i < int(len); i++ { + if err := f(r, v.Index(i)); err != nil { + return err + } + } + return nil + } +} + +func decodeMap(t reflect.Type) decodeFunc { + keyType := t.Key() + keyf := getDecoder(keyType) + + valueType := t.Elem() + valf := getDecoder(valueType) + + return func(r *Reader, v reflect.Value) error { + size, err := r.ReadUint() + if err != nil { + return err + } + + if size > maxMapSize { + return fmt.Errorf("Map size %d exceeds configured limit of %d", size, maxMapSize) + } + + v.Set(reflect.MakeMapWithSize(t, int(size))) + + key := reflect.New(keyType).Elem() + value := reflect.New(valueType).Elem() + + for i := uint64(0); i < size; i++ { + if err := keyf(r, key); err != nil { + return err + } + + if v.MapIndex(key).Kind() > reflect.Invalid { + return fmt.Errorf("Encountered duplicate map key: %v", key.Interface()) + } + + if err := valf(r, value); err != nil { + return err + } + + v.SetMapIndex(key, value) + } + return nil + } +} + +func decodeUnion(t reflect.Type) decodeFunc { + ut, ok := unionRegistry[t] + if !ok { + return func(r *Reader, v reflect.Value) error { + return fmt.Errorf("Union type %s is not registered", t.Name()) + } + } + + decoders := make(map[uint64]decodeFunc) + for tag, t := range ut.types { + t := t + f := getDecoder(t) + + decoders[tag] = func(r *Reader, v reflect.Value) error { + nv := reflect.New(t) + if err := f(r, nv.Elem()); err != nil { + return err + } + + v.Set(nv) + return nil + } + } + + return func(r *Reader, v reflect.Value) error { + tag, err := r.ReadUint() + if err != nil { + return err + } + + if f, ok := decoders[tag]; ok { + return f(r, v) + } + + return fmt.Errorf("Invalid union tag %d for type %s", tag, t.Name()) + } +} + +func decodeUint(r *Reader, v reflect.Value) error { + var err error + switch getIntKind(v.Type()) { + case reflect.Uint: + var u uint64 + u, err = r.ReadUint() + v.SetUint(u) + + case reflect.Uint8: + var u uint8 + u, err = r.ReadU8() + v.SetUint(uint64(u)) + + case reflect.Uint16: + var u uint16 + u, err = r.ReadU16() + v.SetUint(uint64(u)) + case reflect.Uint32: + var u uint32 + u, err = r.ReadU32() + v.SetUint(uint64(u)) + + case reflect.Uint64: + var u uint64 + u, err = r.ReadU64() + v.SetUint(uint64(u)) + + default: + panic("not an uint") + } + + return err +} + +func decodeInt(r *Reader, v reflect.Value) error { + var err error + switch getIntKind(v.Type()) { + case reflect.Int: + var i int64 + i, err = r.ReadInt() + v.SetInt(i) + + case reflect.Int8: + var i int8 + i, err = r.ReadI8() + v.SetInt(int64(i)) + + case reflect.Int16: + var i int16 + i, err = r.ReadI16() + v.SetInt(int64(i)) + case reflect.Int32: + var i int32 + i, err = r.ReadI32() + v.SetInt(int64(i)) + + case reflect.Int64: + var i int64 + i, err = r.ReadI64() + v.SetInt(int64(i)) + + default: + panic("not an int") + } + + return err +} + +func decodeFloat(r *Reader, v reflect.Value) error { + var err error + switch v.Type().Kind() { + case reflect.Float32: + var f float32 + f, err = r.ReadF32() + v.SetFloat(float64(f)) + case reflect.Float64: + var f float64 + f, err = r.ReadF64() + v.SetFloat(f) + default: + panic("not a float") + } + return err +} + +func decodeBool(r *Reader, v reflect.Value) error { + b, err := r.ReadBool() + v.SetBool(b) + return err +} + +func decodeString(r *Reader, v reflect.Value) error { + s, err := r.ReadString() + v.SetString(s) + return err +} diff --git a/forged/internal/common/bare/varint.go b/forged/internal/common/bare/varint.go new file mode 100644 index 0000000..a185ac8 --- /dev/null +++ b/forged/internal/common/bare/varint.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "reflect" +) + +// Int is a variable-length encoded signed integer. +type Int int64 + +// Uint is a variable-length encoded unsigned integer. +type Uint uint64 + +var ( + intType = reflect.TypeOf(Int(0)) + uintType = reflect.TypeOf(Uint(0)) +) + +func getIntKind(t reflect.Type) reflect.Kind { + switch t { + case intType: + return reflect.Int + case uintType: + return reflect.Uint + default: + return t.Kind() + } +} diff --git a/forged/internal/common/bare/writer.go b/forged/internal/common/bare/writer.go new file mode 100644 index 0000000..80cd7e2 --- /dev/null +++ b/forged/internal/common/bare/writer.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault + +package bare + +import ( + "encoding/binary" + "fmt" + "io" + "math" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +// A Writer for BARE primitive types. +type Writer struct { + base io.Writer + scratch [binary.MaxVarintLen64]byte +} + +// Returns a new BARE primitive writer wrapping the given io.Writer. +func NewWriter(base io.Writer) *Writer { + return &Writer{base: base} +} + +func (w *Writer) WriteUint(i uint64) error { + n := binary.PutUvarint(w.scratch[:], i) + _, err := w.base.Write(w.scratch[:n]) + return err +} + +func (w *Writer) WriteU8(i uint8) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteU16(i uint16) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteU32(i uint32) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteU64(i uint64) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteInt(i int64) error { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], i) + _, err := w.base.Write(buf[:n]) + return err +} + +func (w *Writer) WriteI8(i int8) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteI16(i int16) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteI32(i int32) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteI64(i int64) error { + return binary.Write(w.base, binary.LittleEndian, i) +} + +func (w *Writer) WriteF32(f float32) error { + if math.IsNaN(float64(f)) { + return fmt.Errorf("NaN is not permitted in BARE floats") + } + return binary.Write(w.base, binary.LittleEndian, f) +} + +func (w *Writer) WriteF64(f float64) error { + if math.IsNaN(f) { + return fmt.Errorf("NaN is not permitted in BARE floats") + } + return binary.Write(w.base, binary.LittleEndian, f) +} + +func (w *Writer) WriteBool(b bool) error { + return binary.Write(w.base, binary.LittleEndian, b) +} + +func (w *Writer) WriteString(str string) error { + return w.WriteData(misc.StringToBytes(str)) +} + +// Writes a fixed amount of arbitrary data, defined by the length of the slice. +func (w *Writer) WriteDataFixed(data []byte) error { + var amt int = 0 + for amt < len(data) { + n, err := w.base.Write(data[amt:]) + if err != nil { + return err + } + amt += n + } + return nil +} + +// Writes arbitrary data whose length is encoded into the message. +func (w *Writer) WriteData(data []byte) error { + err := w.WriteUint(uint64(len(data))) + if err != nil { + return err + } + var amt int = 0 + for amt < len(data) { + n, err := w.base.Write(data[amt:]) + if err != nil { + return err + } + amt += n + } + return nil +} diff --git a/forged/internal/common/cmap/LICENSE b/forged/internal/common/cmap/LICENSE new file mode 100644 index 0000000..d5dfee8 --- /dev/null +++ b/forged/internal/common/cmap/LICENSE @@ -0,0 +1,22 @@ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/forged/internal/common/cmap/comparable_map.go b/forged/internal/common/cmap/comparable_map.go new file mode 100644 index 0000000..e89175c --- /dev/null +++ b/forged/internal/common/cmap/comparable_map.go @@ -0,0 +1,539 @@ +// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically +// written from scratch with Go 1.23's sync.Map. +// Copyright 2024 Runxi Yu (porting it to generics) +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// ComparableMap[K comparable, V comparable] is like a Go map[K]V but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. Loads, +// stores, and deletes run in amortized constant time. +// +// The ComparableMap type is optimized for two common use cases: (1) when the comparableEntry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a ComparableMap may significantly reduce lock +// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. +// +// The zero ComparableMap is empty and ready for use. A ComparableMap must not be copied after first use. +// +// In the terminology of [the Go memory model], ComparableMap arranges that a write operation +// “synchronizes before” any read operation that observes the effect of the write, where +// read and write operations are defined as follows. +// [ComparableMap.Load], [ComparableMap.LoadAndDelete], [ComparableMap.LoadOrStore], [ComparableMap.Swap], [ComparableMap.CompareAndSwap], +// and [ComparableMap.CompareAndDelete] are read operations; +// [ComparableMap.Delete], [ComparableMap.LoadAndDelete], [ComparableMap.Store], and [ComparableMap.Swap] are write operations; +// [ComparableMap.LoadOrStore] is a write operation when it returns loaded set to false; +// [ComparableMap.CompareAndSwap] is a write operation when it returns swapped set to true; +// and [ComparableMap.CompareAndDelete] is a write operation when it returns deleted set to true. +// +// [the Go memory model]: https://go.dev/ref/mem +type ComparableMap[K comparable, V comparable] struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-comparableExpunged comparableEntry requires that the comparableEntry be copied to the dirty + // map and uncomparableExpunged with mu held. + read atomic.Pointer[comparableReadOnly[K, V]] + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-comparableExpunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An comparableExpunged comparableEntry in the + // clean map must be uncomparableExpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[K]*comparableEntry[V] + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// comparableReadOnly is an immutable struct stored atomically in the ComparableMap.read field. +type comparableReadOnly[K comparable, V comparable] struct { + m map[K]*comparableEntry[V] + amended bool // true if the dirty map contains some key not in m. +} + +// comparableExpunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var comparableExpunged = unsafe.Pointer(new(any)) + +// An comparableEntry is a slot in the map corresponding to a particular key. +type comparableEntry[V comparable] struct { + // p points to the value stored for the comparableEntry. + // + // If p == nil, the comparableEntry has been deleted, and either m.dirty == nil or + // m.dirty[key] is e. + // + // If p == comparableExpunged, the comparableEntry has been deleted, m.dirty != nil, and the comparableEntry + // is missing from m.dirty. + // + // Otherwise, the comparableEntry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An comparableEntry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with comparableExpunged and leave + // m.dirty[key] unset. + // + // An comparableEntry's associated value can be updated by atomic replacement, provided + // p != comparableExpunged. If p == comparableExpunged, an comparableEntry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the comparableEntry. + p unsafe.Pointer +} + +func newComparableEntry[V comparable](i V) *comparableEntry[V] { + return &comparableEntry[V]{p: unsafe.Pointer(&i)} +} + +func (m *ComparableMap[K, V]) loadReadOnly() comparableReadOnly[K, V] { + if p := m.read.Load(); p != nil { + return *p + } + return comparableReadOnly[K, V]{} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *ComparableMap[K, V]) Load(key K) (value V, ok bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the comparableEntry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return *new(V), false + } + return e.load() +} + +func (e *comparableEntry[V]) load() (value V, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged { + return value, false + } + return *(*V)(p), true +} + +// Store sets the value for a key. +func (m *ComparableMap[K, V]) Store(key K, value V) { + _, _ = m.Swap(key, value) +} + +// Clear deletes all the entries, resulting in an empty ComparableMap. +func (m *ComparableMap[K, V]) Clear() { + read := m.loadReadOnly() + if len(read.m) == 0 && !read.amended { + // Avoid allocating a new comparableReadOnly when the map is already clear. + return + } + + m.mu.Lock() + defer m.mu.Unlock() + + read = m.loadReadOnly() + if len(read.m) > 0 || read.amended { + m.read.Store(&comparableReadOnly[K, V]{}) + } + + clear(m.dirty) + // Don't immediately promote the newly-cleared dirty map on the next operation. + m.misses = 0 +} + +// tryCompareAndSwap compare the comparableEntry with the given old value and swaps +// it with a new value if the comparableEntry is equal to the old value, and the comparableEntry +// has not been comparableExpunged. +// +// If the comparableEntry is comparableExpunged, tryCompareAndSwap returns false and leaves +// the comparableEntry unchanged. +func (e *comparableEntry[V]) tryCompareAndSwap(old V, new V) bool { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged || *(*V)(p) != old { // XXX + return false + } + + // Copy the pointer after the first load to make this method more amenable + // to escape analysis: if the comparison fails from the start, we shouldn't + // bother heap-allocating a pointer to store. + nc := new + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(&nc)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged || *(*V)(p) != old { + return false + } + } +} + +// unexpungeLocked ensures that the comparableEntry is not marked as comparableExpunged. +// +// If the comparableEntry was previously comparableExpunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *comparableEntry[V]) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, comparableExpunged, nil) +} + +// swapLocked unconditionally swaps a value into the comparableEntry. +// +// The comparableEntry must be known not to be comparableExpunged. +func (e *comparableEntry[V]) swapLocked(i *V) *V { + return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i))) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *ComparableMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + // Avoid locking if it's a clean hit. + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newComparableEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the comparableEntry is not +// comparableExpunged. +// +// If the comparableEntry is comparableExpunged, tryLoadOrStore leaves the comparableEntry unchanged and +// returns with ok==false. +func (e *comparableEntry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == comparableExpunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + + // Copy the pointer after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the comparableEntry is comparableExpunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == comparableExpunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *ComparableMap[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the comparableEntry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, false +} + +// Delete deletes the value for a key. +func (m *ComparableMap[K, V]) Delete(key K) { + m.LoadAndDelete(key) +} + +func (e *comparableEntry[V]) delete() (value V, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged { + return value, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(*V)(p), true + } + } +} + +// trySwap swaps a value if the comparableEntry has not been comparableExpunged. +// +// If the comparableEntry is comparableExpunged, trySwap returns false and leaves the comparableEntry +// unchanged. +func (e *comparableEntry[V]) trySwap(i *V) (*V, bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == comparableExpunged { + return nil, false + } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return (*V)(p), true + } + } +} + +// Swap swaps the value for a key and returns the previous value if any. +// The loaded result reports whether the key was present. +func (m *ComparableMap[K, V]) Swap(key K, value V) (previous V, loaded bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + if v, ok := e.trySwap(&value); ok { + if v == nil { + return previous, false + } + return *v, true + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The comparableEntry was previously comparableExpunged, which implies that there is a + // non-nil dirty map and this comparableEntry is not in it. + m.dirty[key] = e + } + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else if e, ok := m.dirty[key]; ok { + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newComparableEntry(value) + } + m.mu.Unlock() + return previous, loaded +} + +// CompareAndSwap swaps the old and new values for key +// if the value stored in the map is equal to old. +// The old value must be of a comparable type. +func (m *ComparableMap[K, V]) CompareAndSwap(key K, old, new V) (swapped bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + return e.tryCompareAndSwap(old, new) + } else if !read.amended { + return false // No existing value for key. + } + + m.mu.Lock() + defer m.mu.Unlock() + read = m.loadReadOnly() + swapped = false + if e, ok := read.m[key]; ok { + swapped = e.tryCompareAndSwap(old, new) + } else if e, ok := m.dirty[key]; ok { + swapped = e.tryCompareAndSwap(old, new) + // We needed to lock mu in order to load the comparableEntry for key, + // and the operation didn't change the set of keys in the map + // (so it would be made more efficient by promoting the dirty + // map to read-only). + // Count it as a miss so that we will eventually switch to the + // more efficient steady state. + m.missLocked() + } + return swapped +} + +// CompareAndDelete deletes the comparableEntry for key if its value is equal to old. +// The old value must be of a comparable type. +// +// If there is no current value for key in the map, CompareAndDelete +// returns false (even if the old value is a nil pointer). +func (m *ComparableMap[K, V]) CompareAndDelete(key K, old V) (deleted bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Don't delete key from m.dirty: we still need to do the “compare” part + // of the operation. The comparableEntry will eventually be comparableExpunged when the + // dirty map is promoted to the read map. + // + // Regardless of whether the comparableEntry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + for ok { + p := atomic.LoadPointer(&e.p) + if p == nil || p == comparableExpunged || *(*V)(p) != old { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } + return false +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the ComparableMap's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *ComparableMap[K, V]) Range(f func(key K, value V) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read := m.loadReadOnly() + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read = m.loadReadOnly() + if read.amended { + read = comparableReadOnly[K, V]{m: m.dirty} + copyRead := read + m.read.Store(©Read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *ComparableMap[K, V]) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(&comparableReadOnly[K, V]{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *ComparableMap[K, V]) dirtyLocked() { + if m.dirty != nil { + return + } + + read := m.loadReadOnly() + m.dirty = make(map[K]*comparableEntry[V], len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *comparableEntry[V]) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, comparableExpunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == comparableExpunged +} diff --git a/forged/internal/common/cmap/map.go b/forged/internal/common/cmap/map.go new file mode 100644 index 0000000..7a1fe5b --- /dev/null +++ b/forged/internal/common/cmap/map.go @@ -0,0 +1,446 @@ +// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically +// written from scratch with Go 1.23's sync.Map. +// Copyright 2024 Runxi Yu (porting it to generics) +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmap provides a generic Map safe for concurrent use. +package cmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map[K comparable, V any] is like a Go map[K]V but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. Loads, +// stores, and deletes run in amortized constant time. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +// +// In the terminology of [the Go memory model], Map arranges that a write operation +// “synchronizes before” any read operation that observes the effect of the write, where +// read and write operations are defined as follows. +// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap], +// and [Map.CompareAndDelete] are read operations; +// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations; +// [Map.LoadOrStore] is a write operation when it returns loaded set to false; +// [Map.CompareAndSwap] is a write operation when it returns swapped set to true; +// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true. +// +// [the Go memory model]: https://go.dev/ref/mem +type Map[K comparable, V any] struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Pointer[readOnly[K, V]] + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[K]*entry[V] + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly[K comparable, V any] struct { + m map[K]*entry[V] + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(any)) + +// An entry is a slot in the map corresponding to a particular key. +type entry[V any] struct { + // p points to the value stored for the entry. + // + // If p == nil, the entry has been deleted, and either m.dirty == nil or + // m.dirty[key] is e. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer +} + +func newEntry[V any](i V) *entry[V] { + return &entry[V]{p: unsafe.Pointer(&i)} +} + +func (m *Map[K, V]) loadReadOnly() readOnly[K, V] { + if p := m.read.Load(); p != nil { + return *p + } + return readOnly[K, V]{} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map[K, V]) Load(key K) (value V, ok bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return *new(V), false + } + return e.load() +} + +func (e *entry[V]) load() (value V, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return value, false + } + return *(*V)(p), true +} + +// Store sets the value for a key. +func (m *Map[K, V]) Store(key K, value V) { + _, _ = m.Swap(key, value) +} + +// Clear deletes all the entries, resulting in an empty Map. +func (m *Map[K, V]) Clear() { + read := m.loadReadOnly() + if len(read.m) == 0 && !read.amended { + // Avoid allocating a new readOnly when the map is already clear. + return + } + + m.mu.Lock() + defer m.mu.Unlock() + + read = m.loadReadOnly() + if len(read.m) > 0 || read.amended { + m.read.Store(&readOnly[K, V]{}) + } + + clear(m.dirty) + // Don't immediately promote the newly-cleared dirty map on the next operation. + m.misses = 0 +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry[V]) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// swapLocked unconditionally swaps a value into the entry. +// +// The entry must be known not to be expunged. +func (e *entry[V]) swapLocked(i *V) *V { + return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i))) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + // Avoid locking if it's a clean hit. + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + + // Copy the pointer after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return value, false +} + +// Delete deletes the value for a key. +func (m *Map[K, V]) Delete(key K) { + m.LoadAndDelete(key) +} + +func (e *entry[V]) delete() (value V, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return value, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(*V)(p), true + } + } +} + +// trySwap swaps a value if the entry has not been expunged. +// +// If the entry is expunged, trySwap returns false and leaves the entry +// unchanged. +func (e *entry[V]) trySwap(i *V) (*V, bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false + } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return (*V)(p), true + } + } +} + +// Swap swaps the value for a key and returns the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + if v, ok := e.trySwap(&value); ok { + if v == nil { + return previous, false + } + return *v, true + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else if e, ok := m.dirty[key]; ok { + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() + return previous, loaded +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read := m.loadReadOnly() + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read = m.loadReadOnly() + if read.amended { + read = readOnly[K, V]{m: m.dirty} + copyRead := read + m.read.Store(©Read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map[K, V]) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(&readOnly[K, V]{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map[K, V]) dirtyLocked() { + if m.dirty != nil { + return + } + + read := m.loadReadOnly() + m.dirty = make(map[K]*entry[V], len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry[V]) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/forged/internal/common/humanize/bytes.go b/forged/internal/common/humanize/bytes.go new file mode 100644 index 0000000..bea504c --- /dev/null +++ b/forged/internal/common/humanize/bytes.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: Copyright (c) 2005-2008 Dustin Sallings +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +// Package humanize provides functions to convert numbers into human-readable formats. +package humanize + +import ( + "fmt" + "math" +) + +// IBytes produces a human readable representation of an IEC size. +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} diff --git a/forged/internal/common/misc/back.go b/forged/internal/common/misc/back.go new file mode 100644 index 0000000..5351359 --- /dev/null +++ b/forged/internal/common/misc/back.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package misc + +// ErrorBack wraps a value and a channel for communicating an associated error. +// Typically used to get an error response after sending data across a channel. +type ErrorBack[T any] struct { + Content T + ErrorChan chan error +} diff --git a/forged/internal/common/misc/iter.go b/forged/internal/common/misc/iter.go new file mode 100644 index 0000000..61a96f4 --- /dev/null +++ b/forged/internal/common/misc/iter.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package misc + +import "iter" + +// iterSeqLimit returns an iterator equivalent to the supplied one, but stops +// after n iterations. +func IterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] { + return func(yield func(T) bool) { + var iterations uint + for v := range s { + if iterations > n-1 { + return + } + if !yield(v) { + return + } + iterations++ + } + } +} diff --git a/forged/internal/common/misc/misc.go b/forged/internal/common/misc/misc.go new file mode 100644 index 0000000..e9e10ab --- /dev/null +++ b/forged/internal/common/misc/misc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +// Package misc provides miscellaneous functions and other definitions. +package misc diff --git a/forged/internal/common/misc/net.go b/forged/internal/common/misc/net.go new file mode 100644 index 0000000..967ea77 --- /dev/null +++ b/forged/internal/common/misc/net.go @@ -0,0 +1,42 @@ +package misc + +import ( + "context" + "errors" + "fmt" + "net" + "syscall" +) + +func ListenUnixSocket(ctx context.Context, path string) (listener net.Listener, replaced bool, err error) { + listenConfig := net.ListenConfig{} //exhaustruct:ignore + listener, err = listenConfig.Listen(ctx, "unix", path) + if errors.Is(err, syscall.EADDRINUSE) { + replaced = true + unlinkErr := syscall.Unlink(path) + if unlinkErr != nil { + return listener, false, fmt.Errorf("remove existing socket %q: %w", path, unlinkErr) + } + listener, err = listenConfig.Listen(ctx, "unix", path) + } + if err != nil { + return listener, replaced, fmt.Errorf("listen on unix socket %q: %w", path, err) + } + return listener, replaced, nil +} + +func Listen(ctx context.Context, net_, addr string) (listener net.Listener, err error) { + if net_ == "unix" { + listener, _, err = ListenUnixSocket(ctx, addr) + if err != nil { + return listener, fmt.Errorf("listen unix socket for web: %w", err) + } + } else { + listenConfig := net.ListenConfig{} //exhaustruct:ignore + listener, err = listenConfig.Listen(ctx, net_, addr) + if err != nil { + return listener, fmt.Errorf("listen %s for web: %w", net_, err) + } + } + return listener, nil +} diff --git a/forged/internal/common/misc/slices.go b/forged/internal/common/misc/slices.go new file mode 100644 index 0000000..3ad0211 --- /dev/null +++ b/forged/internal/common/misc/slices.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package misc + +import "strings" + +// sliceContainsNewlines returns true if and only if the given slice contains +// one or more strings that contains newlines. +func SliceContainsNewlines(s []string) bool { + for _, v := range s { + if strings.Contains(v, "\n") { + return true + } + } + return false +} diff --git a/forged/internal/common/misc/trivial.go b/forged/internal/common/misc/trivial.go new file mode 100644 index 0000000..83901e0 --- /dev/null +++ b/forged/internal/common/misc/trivial.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package misc + +import ( + "net/url" + "strings" +) + +// These are all trivial functions that are intended to be used in HTML +// templates. + +// FirstLine returns the first line of a string. +func FirstLine(s string) string { + before, _, _ := strings.Cut(s, "\n") + return before +} + +// PathEscape escapes the input as an URL path segment. +func PathEscape(s string) string { + return url.PathEscape(s) +} + +// QueryEscape escapes the input as an URL query segment. +func QueryEscape(s string) string { + return url.QueryEscape(s) +} + +// Dereference dereferences a pointer. +func Dereference[T any](p *T) T { //nolint:ireturn + return *p +} + +// DereferenceOrZero dereferences a pointer. If the pointer is nil, the zero +// value of its associated type is returned instead. +func DereferenceOrZero[T any](p *T) T { //nolint:ireturn + if p != nil { + return *p + } + var z T + return z +} + +// Minus subtracts two numbers. +func Minus(a, b int) int { + return a - b +} diff --git a/forged/internal/common/misc/unsafe.go b/forged/internal/common/misc/unsafe.go new file mode 100644 index 0000000..d827e7f --- /dev/null +++ b/forged/internal/common/misc/unsafe.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package misc + +import "unsafe" + +// StringToBytes converts a string to a byte slice without copying the string. +// Memory is borrowed from the string. +// The resulting byte slice must not be modified in any form. +func StringToBytes(s string) (bytes []byte) { + return unsafe.Slice(unsafe.StringData(s), len(s)) //#nosec G103 +} + +// BytesToString converts a byte slice to a string without copying the bytes. +// Memory is borrowed from the byte slice. +// The source byte slice must not be modified. +func BytesToString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) //#nosec G103 +} diff --git a/forged/internal/common/misc/url.go b/forged/internal/common/misc/url.go new file mode 100644 index 0000000..346ff76 --- /dev/null +++ b/forged/internal/common/misc/url.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package misc + +import ( + "net/http" + "net/url" + "strings" +) + +// ParseReqURI parses an HTTP request URL, and returns a slice of path segments +// and the query parameters. It handles %2F correctly. +func ParseReqURI(requestURI string) (segments []string, params url.Values, err error) { + path, paramsStr, _ := strings.Cut(requestURI, "?") + + segments, err = PathToSegments(path) + if err != nil { + return + } + + params, err = url.ParseQuery(paramsStr) + return +} + +// PathToSegments splits a path into unescaped segments. It handles %2F correctly. +func PathToSegments(path string) (segments []string, err error) { + segments = strings.Split(strings.TrimPrefix(path, "/"), "/") + + for i, segment := range segments { + segments[i], err = url.PathUnescape(segment) + if err != nil { + return + } + } + + return +} + +// RedirectDir returns true and redirects the user to a version of the URL with +// a trailing slash, if and only if the request URL does not already have a +// trailing slash. +func RedirectDir(writer http.ResponseWriter, request *http.Request) bool { + requestURI := request.RequestURI + + pathEnd := strings.IndexAny(requestURI, "?#") + var path, rest string + if pathEnd == -1 { + path = requestURI + } else { + path = requestURI[:pathEnd] + rest = requestURI[pathEnd:] + } + + if !strings.HasSuffix(path, "/") { + http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther) + return true + } + return false +} + +// RedirectNoDir returns true and redirects the user to a version of the URL +// without a trailing slash, if and only if the request URL has a trailing +// slash. +func RedirectNoDir(writer http.ResponseWriter, request *http.Request) bool { + requestURI := request.RequestURI + + pathEnd := strings.IndexAny(requestURI, "?#") + var path, rest string + if pathEnd == -1 { + path = requestURI + } else { + path = requestURI[:pathEnd] + rest = requestURI[pathEnd:] + } + + if strings.HasSuffix(path, "/") { + http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther) + return true + } + return false +} + +// RedirectUnconditionally unconditionally redirects the user back to the +// current page while preserving query parameters. +func RedirectUnconditionally(writer http.ResponseWriter, request *http.Request) { + requestURI := request.RequestURI + + pathEnd := strings.IndexAny(requestURI, "?#") + var path, rest string + if pathEnd == -1 { + path = requestURI + } else { + path = requestURI[:pathEnd] + rest = requestURI[pathEnd:] + } + + http.Redirect(writer, request, path+rest, http.StatusSeeOther) +} + +// SegmentsToURL joins URL segments to the path component of a URL. +// Each segment is escaped properly first. +func SegmentsToURL(segments []string) string { + for i, segment := range segments { + segments[i] = url.PathEscape(segment) + } + return strings.Join(segments, "/") +} + +// AnyContain returns true if and only if ss contains a string that contains c. +func AnyContain(ss []string, c string) bool { + for _, s := range ss { + if strings.Contains(s, c) { + return true + } + } + return false +} diff --git a/forged/internal/common/scfg/.golangci.yaml b/forged/internal/common/scfg/.golangci.yaml new file mode 100644 index 0000000..59f1970 --- /dev/null +++ b/forged/internal/common/scfg/.golangci.yaml @@ -0,0 +1,26 @@ +linters: + enable-all: true + disable: + - perfsprint + - wsl + - varnamelen + - nlreturn + - exhaustruct + - wrapcheck + - lll + - exhaustive + - intrange + - godox + - nestif + - err113 + - staticcheck + - errorlint + - cyclop + - nonamedreturns + - funlen + - gochecknoglobals + - tenv + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/forged/internal/common/scfg/LICENSE b/forged/internal/common/scfg/LICENSE new file mode 100644 index 0000000..3649823 --- /dev/null +++ b/forged/internal/common/scfg/LICENSE @@ -0,0 +1,18 @@ +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/forged/internal/common/scfg/reader.go b/forged/internal/common/scfg/reader.go new file mode 100644 index 0000000..6a2bedc --- /dev/null +++ b/forged/internal/common/scfg/reader.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser + +package scfg + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// This limits the max block nesting depth to prevent stack overflows. +const maxNestingDepth = 1000 + +// Load loads a configuration file. +func Load(path string) (Block, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + return Read(f) +} + +// Read parses a configuration file from an io.Reader. +func Read(r io.Reader) (Block, error) { + scanner := bufio.NewScanner(r) + + dec := decoder{scanner: scanner} + block, closingBrace, err := dec.readBlock() + if err != nil { + return nil, err + } else if closingBrace { + return nil, fmt.Errorf("line %v: unexpected '}'", dec.lineno) + } + + return block, scanner.Err() +} + +type decoder struct { + scanner *bufio.Scanner + lineno int + blockDepth int +} + +// readBlock reads a block. closingBrace is true if parsing stopped on '}' +// (otherwise, it stopped on Scanner.Scan). +func (dec *decoder) readBlock() (block Block, closingBrace bool, err error) { + dec.blockDepth++ + defer func() { + dec.blockDepth-- + }() + + if dec.blockDepth >= maxNestingDepth { + return nil, false, fmt.Errorf("exceeded max block depth") + } + + for dec.scanner.Scan() { + dec.lineno++ + + l := dec.scanner.Text() + words, err := splitWords(l) + if err != nil { + return nil, false, fmt.Errorf("line %v: %v", dec.lineno, err) + } else if len(words) == 0 { + continue + } + + if len(words) == 1 && l[len(l)-1] == '}' { + closingBrace = true + break + } + + var d *Directive + if words[len(words)-1] == "{" && l[len(l)-1] == '{' { + words = words[:len(words)-1] + + var name string + params := words + if len(words) > 0 { + name, params = words[0], words[1:] + } + + startLineno := dec.lineno + childBlock, childClosingBrace, err := dec.readBlock() + if err != nil { + return nil, false, err + } else if !childClosingBrace { + return nil, false, fmt.Errorf("line %v: unterminated block", startLineno) + } + + // Allows callers to tell apart "no block" and "empty block" + if childBlock == nil { + childBlock = Block{} + } + + d = &Directive{Name: name, Params: params, Children: childBlock, lineno: dec.lineno} + } else { + d = &Directive{Name: words[0], Params: words[1:], lineno: dec.lineno} + } + block = append(block, d) + } + + return block, closingBrace, nil +} + +func splitWords(l string) ([]string, error) { + var ( + words []string + sb strings.Builder + escape bool + quote rune + wantWSP bool + ) + for _, ch := range l { + switch { + case escape: + sb.WriteRune(ch) + escape = false + case wantWSP && (ch != ' ' && ch != '\t'): + return words, fmt.Errorf("atom not allowed after quoted string") + case ch == '\\': + escape = true + case quote != 0 && ch == quote: + quote = 0 + wantWSP = true + if sb.Len() == 0 { + words = append(words, "") + } + case quote == 0 && len(words) == 0 && sb.Len() == 0 && ch == '#': + return nil, nil + case quote == 0 && (ch == '\'' || ch == '"'): + if sb.Len() > 0 { + return words, fmt.Errorf("quoted string not allowed after atom") + } + quote = ch + case quote == 0 && (ch == ' ' || ch == '\t'): + if sb.Len() > 0 { + words = append(words, sb.String()) + } + sb.Reset() + wantWSP = false + default: + sb.WriteRune(ch) + } + } + if quote != 0 { + return words, fmt.Errorf("unterminated quoted string") + } + if sb.Len() > 0 { + words = append(words, sb.String()) + } + return words, nil +} diff --git a/forged/internal/common/scfg/scfg.go b/forged/internal/common/scfg/scfg.go new file mode 100644 index 0000000..4533e63 --- /dev/null +++ b/forged/internal/common/scfg/scfg.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser + +// Package scfg parses and formats configuration files. +// Note that this fork of scfg behaves differently from upstream scfg. +package scfg + +import ( + "fmt" +) + +// Block is a list of directives. +type Block []*Directive + +// GetAll returns a list of directives with the provided name. +func (blk Block) GetAll(name string) []*Directive { + l := make([]*Directive, 0, len(blk)) + for _, child := range blk { + if child.Name == name { + l = append(l, child) + } + } + return l +} + +// Get returns the first directive with the provided name. +func (blk Block) Get(name string) *Directive { + for _, child := range blk { + if child.Name == name { + return child + } + } + return nil +} + +// Directive is a configuration directive. +type Directive struct { + Name string + Params []string + + Children Block + + lineno int +} + +// ParseParams extracts parameters from the directive. It errors out if the +// user hasn't provided enough parameters. +func (d *Directive) ParseParams(params ...*string) error { + if len(d.Params) < len(params) { + return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params)) + } + for i, ptr := range params { + if ptr == nil { + continue + } + *ptr = d.Params[i] + } + return nil +} diff --git a/forged/internal/common/scfg/struct.go b/forged/internal/common/scfg/struct.go new file mode 100644 index 0000000..98ec943 --- /dev/null +++ b/forged/internal/common/scfg/struct.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser + +package scfg + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// structInfo contains scfg metadata for structs. +type structInfo struct { + param int // index of field storing parameters + children map[string]int // indices of fields storing child directives +} + +var ( + structCacheMutex sync.Mutex + structCache = make(map[reflect.Type]*structInfo) +) + +func getStructInfo(t reflect.Type) (*structInfo, error) { + structCacheMutex.Lock() + defer structCacheMutex.Unlock() + + if info := structCache[t]; info != nil { + return info, nil + } + + info := &structInfo{ + param: -1, + children: make(map[string]int), + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Anonymous { + return nil, fmt.Errorf("scfg: anonymous struct fields are not supported") + } else if !f.IsExported() { + continue + } + + tag := f.Tag.Get("scfg") + parts := strings.Split(tag, ",") + k, options := parts[0], parts[1:] + if k == "-" { + continue + } else if k == "" { + k = f.Name + } + + isParam := false + for _, opt := range options { + switch opt { + case "param": + isParam = true + default: + return nil, fmt.Errorf("scfg: invalid option %q in struct tag", opt) + } + } + + if isParam { + if info.param >= 0 { + return nil, fmt.Errorf("scfg: param option specified multiple times in struct tag in %v", t) + } + if parts[0] != "" { + return nil, fmt.Errorf("scfg: name must be empty when param option is specified in struct tag in %v", t) + } + info.param = i + } else { + if _, ok := info.children[k]; ok { + return nil, fmt.Errorf("scfg: key %q specified multiple times in struct tag in %v", k, t) + } + info.children[k] = i + } + } + + structCache[t] = info + return info, nil +} diff --git a/forged/internal/common/scfg/unmarshal.go b/forged/internal/common/scfg/unmarshal.go new file mode 100644 index 0000000..8befc10 --- /dev/null +++ b/forged/internal/common/scfg/unmarshal.go @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package scfg + +import ( + "encoding" + "fmt" + "io" + "reflect" + "strconv" +) + +// Decoder reads and decodes an scfg document from an input stream. +type Decoder struct { + r io.Reader + unknownDirectives []*Directive +} + +// NewDecoder returns a new decoder which reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// UnknownDirectives returns a slice of all unknown directives encountered +// during Decode. +func (dec *Decoder) UnknownDirectives() []*Directive { + return dec.unknownDirectives +} + +// Decode reads scfg document from the input and stores it in the value pointed +// to by v. +// +// If v is nil or not a pointer, Decode returns an error. +// +// Blocks can be unmarshaled to: +// +// - Maps. Each directive is unmarshaled into a map entry. The map key must +// be a string. +// - Structs. Each directive is unmarshaled into a struct field. +// +// Duplicate directives are not allowed, unless the struct field or map value +// is a slice of values representing a directive: structs or maps. +// +// Directives can be unmarshaled to: +// +// - Maps. The children block is unmarshaled into the map. Parameters are not +// allowed. +// - Structs. The children block is unmarshaled into the struct. Parameters +// are allowed if one of the struct fields contains the "param" option in +// its tag. +// - Slices. Parameters are unmarshaled into the slice. Children blocks are +// not allowed. +// - Arrays. Parameters are unmarshaled into the array. The number of +// parameters must match exactly the length of the array. Children blocks +// are not allowed. +// - Strings, booleans, integers, floating-point values, values implementing +// encoding.TextUnmarshaler. Only a single parameter is allowed and is +// unmarshaled into the value. Children blocks are not allowed. +// +// The decoding of each struct field can be customized by the format string +// stored under the "scfg" key in the struct field's tag. The tag contains the +// name of the field possibly followed by a comma-separated list of options. +// The name may be empty in order to specify options without overriding the +// default field name. As a special case, if the field name is "-", the field +// is ignored. The "param" option specifies that directive parameters are +// stored in this field (the name must be empty). +func (dec *Decoder) Decode(v interface{}) error { + block, err := Read(dec.r) + if err != nil { + return err + } + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return fmt.Errorf("scfg: invalid value for unmarshaling") + } + + return dec.unmarshalBlock(block, rv) +} + +func (dec *Decoder) unmarshalBlock(block Block, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + dirsByName := make(map[string][]*Directive, len(block)) + for _, dir := range block { + dirsByName[dir.Name] = append(dirsByName[dir.Name], dir) + } + + switch v.Kind() { + case reflect.Map: + if t.Key().Kind() != reflect.String { + return fmt.Errorf("scfg: map key type must be string") + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } else if v.Len() > 0 { + clearMap(v) + } + + for name, dirs := range dirsByName { + mv := reflect.New(t.Elem()).Elem() + if err := dec.unmarshalDirectiveList(dirs, mv); err != nil { + return err + } + v.SetMapIndex(reflect.ValueOf(name), mv) + } + + case reflect.Struct: + si, err := getStructInfo(t) + if err != nil { + return err + } + + seen := make(map[int]bool) + + for name, dirs := range dirsByName { + fieldIndex, ok := si.children[name] + if !ok { + dec.unknownDirectives = append(dec.unknownDirectives, dirs...) + continue + } + fv := v.Field(fieldIndex) + if err := dec.unmarshalDirectiveList(dirs, fv); err != nil { + return err + } + seen[fieldIndex] = true + } + + for name, fieldIndex := range si.children { + if fieldIndex == si.param { + continue + } + if _, ok := seen[fieldIndex]; !ok { + return fmt.Errorf("scfg: missing required directive %q", name) + } + } + + default: + return fmt.Errorf("scfg: unsupported type for unmarshaling blocks: %v", t) + } + + return nil +} + +func (dec *Decoder) unmarshalDirectiveList(dirs []*Directive, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + if v.Kind() != reflect.Slice || !isDirectiveType(t.Elem()) { + if len(dirs) > 1 { + return newUnmarshalDirectiveError(dirs[1], "directive must not be specified more than once") + } + return dec.unmarshalDirective(dirs[0], v) + } + + sv := reflect.MakeSlice(t, len(dirs), len(dirs)) + for i, dir := range dirs { + if err := dec.unmarshalDirective(dir, sv.Index(i)); err != nil { + return err + } + } + v.Set(sv) + return nil +} + +// isDirectiveType checks whether a type can only be unmarshaled as a +// directive, not as a parameter. Accepting too many types here would result in +// ambiguities, see: +// https://lists.sr.ht/~emersion/public-inbox/%3C20230629132458.152205-1-contact%40emersion.fr%3E#%3Ch4Y2peS_YBqY3ar4XlmPDPiNBFpYGns3EBYUx3_6zWEhV2o8_-fBQveRujGADWYhVVCucHBEryFGoPtpC3d3mQ-x10pWnFogfprbQTSvtxc=@emersion.fr%3E +func isDirectiveType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + textUnmarshalerType := reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + if reflect.PointerTo(t).Implements(textUnmarshalerType) { + return false + } + + switch t.Kind() { + case reflect.Struct, reflect.Map: + return true + default: + return false + } +} + +func (dec *Decoder) unmarshalDirective(dir *Directive, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + if v.CanAddr() { + if _, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok { + if len(dir.Children) != 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero children") + } + return unmarshalParamList(dir, v) + } + } + + switch v.Kind() { + case reflect.Map: + if len(dir.Params) > 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero parameters") + } + if err := dec.unmarshalBlock(dir.Children, v); err != nil { + return err + } + case reflect.Struct: + si, err := getStructInfo(t) + if err != nil { + return err + } + + if si.param >= 0 { + if err := unmarshalParamList(dir, v.Field(si.param)); err != nil { + return err + } + } else { + if len(dir.Params) > 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero parameters") + } + } + + if err := dec.unmarshalBlock(dir.Children, v); err != nil { + return err + } + default: + if len(dir.Children) != 0 { + return newUnmarshalDirectiveError(dir, "directive requires zero children") + } + if err := unmarshalParamList(dir, v); err != nil { + return err + } + } + return nil +} + +func unmarshalParamList(dir *Directive, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + t := v.Type() + sv := reflect.MakeSlice(t, len(dir.Params), len(dir.Params)) + for i, param := range dir.Params { + if err := unmarshalParam(param, sv.Index(i)); err != nil { + return newUnmarshalParamError(dir, i, err) + } + } + v.Set(sv) + case reflect.Array: + if len(dir.Params) != v.Len() { + return newUnmarshalDirectiveError(dir, fmt.Sprintf("directive requires exactly %v parameters", v.Len())) + } + for i, param := range dir.Params { + if err := unmarshalParam(param, v.Index(i)); err != nil { + return newUnmarshalParamError(dir, i, err) + } + } + default: + if len(dir.Params) != 1 { + return newUnmarshalDirectiveError(dir, "directive requires exactly one parameter") + } + if err := unmarshalParam(dir.Params[0], v); err != nil { + return newUnmarshalParamError(dir, 0, err) + } + } + + return nil +} + +func unmarshalParam(param string, v reflect.Value) error { + v = unwrapPointers(v) + t := v.Type() + + // TODO: improve our logic following: + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/encoding/json/decode.go;drc=b9b8cecbfc72168ca03ad586cc2ed52b0e8db409;l=421 + if v.CanAddr() { + if v, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok { + return v.UnmarshalText([]byte(param)) + } + } + + switch v.Kind() { + case reflect.String: + v.Set(reflect.ValueOf(param)) + case reflect.Bool: + switch param { + case "true": + v.Set(reflect.ValueOf(true)) + case "false": + v.Set(reflect.ValueOf(false)) + default: + return fmt.Errorf("invalid bool parameter %q", param) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := strconv.ParseInt(param, 10, t.Bits()) + if err != nil { + return fmt.Errorf("invalid %v parameter: %v", t, err) + } + v.Set(reflect.ValueOf(i).Convert(t)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + u, err := strconv.ParseUint(param, 10, t.Bits()) + if err != nil { + return fmt.Errorf("invalid %v parameter: %v", t, err) + } + v.Set(reflect.ValueOf(u).Convert(t)) + case reflect.Float32, reflect.Float64: + f, err := strconv.ParseFloat(param, t.Bits()) + if err != nil { + return fmt.Errorf("invalid %v parameter: %v", t, err) + } + v.Set(reflect.ValueOf(f).Convert(t)) + default: + return fmt.Errorf("unsupported type for unmarshaling parameter: %v", t) + } + + return nil +} + +func unwrapPointers(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} + +func clearMap(v reflect.Value) { + for _, k := range v.MapKeys() { + v.SetMapIndex(k, reflect.Value{}) + } +} + +type unmarshalDirectiveError struct { + lineno int + name string + msg string +} + +func newUnmarshalDirectiveError(dir *Directive, msg string) *unmarshalDirectiveError { + return &unmarshalDirectiveError{ + name: dir.Name, + lineno: dir.lineno, + msg: msg, + } +} + +func (err *unmarshalDirectiveError) Error() string { + return fmt.Sprintf("line %v, directive %q: %v", err.lineno, err.name, err.msg) +} + +type unmarshalParamError struct { + lineno int + directive string + paramIndex int + err error +} + +func newUnmarshalParamError(dir *Directive, paramIndex int, err error) *unmarshalParamError { + return &unmarshalParamError{ + directive: dir.Name, + lineno: dir.lineno, + paramIndex: paramIndex, + err: err, + } +} + +func (err *unmarshalParamError) Error() string { + return fmt.Sprintf("line %v, directive %q, parameter %v: %v", err.lineno, err.directive, err.paramIndex+1, err.err) +} diff --git a/forged/internal/common/scfg/writer.go b/forged/internal/common/scfg/writer.go new file mode 100644 index 0000000..02a07fe --- /dev/null +++ b/forged/internal/common/scfg/writer.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser + +package scfg + +import ( + "errors" + "io" + "strings" +) + +var errDirEmptyName = errors.New("scfg: directive with empty name") + +// Write writes a parsed configuration to the provided io.Writer. +func Write(w io.Writer, blk Block) error { + enc := newEncoder(w) + err := enc.encodeBlock(blk) + return err +} + +// encoder write SCFG directives to an output stream. +type encoder struct { + w io.Writer + lvl int + err error +} + +// newEncoder returns a new encoder that writes to w. +func newEncoder(w io.Writer) *encoder { + return &encoder{w: w} +} + +func (enc *encoder) push() { + enc.lvl++ +} + +func (enc *encoder) pop() { + enc.lvl-- +} + +func (enc *encoder) writeIndent() { + for i := 0; i < enc.lvl; i++ { + enc.write([]byte("\t")) + } +} + +func (enc *encoder) write(p []byte) { + if enc.err != nil { + return + } + _, enc.err = enc.w.Write(p) +} + +func (enc *encoder) encodeBlock(blk Block) error { + for _, dir := range blk { + if err := enc.encodeDir(*dir); err != nil { + return err + } + } + return enc.err +} + +func (enc *encoder) encodeDir(dir Directive) error { + if enc.err != nil { + return enc.err + } + + if dir.Name == "" { + enc.err = errDirEmptyName + return enc.err + } + + enc.writeIndent() + enc.write([]byte(maybeQuote(dir.Name))) + for _, p := range dir.Params { + enc.write([]byte(" ")) + enc.write([]byte(maybeQuote(p))) + } + + if len(dir.Children) > 0 { + enc.write([]byte(" {\n")) + enc.push() + if err := enc.encodeBlock(dir.Children); err != nil { + return err + } + enc.pop() + + enc.writeIndent() + enc.write([]byte("}")) + } + enc.write([]byte("\n")) + + return enc.err +} + +const specialChars = "\"\\\r\n'{} \t" + +func maybeQuote(s string) string { + if s == "" || strings.ContainsAny(s, specialChars) { + var sb strings.Builder + sb.WriteByte('"') + for _, ch := range s { + if strings.ContainsRune(`"\`, ch) { + sb.WriteByte('\\') + } + sb.WriteRune(ch) + } + sb.WriteByte('"') + return sb.String() + } + return s +} diff --git a/forged/internal/config/config.go b/forged/internal/config/config.go new file mode 100644 index 0000000..da28e05 --- /dev/null +++ b/forged/internal/config/config.go @@ -0,0 +1,61 @@ +package config + +import ( + "bufio" + "fmt" + "log/slog" + "os" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/scfg" + "go.lindenii.runxiyu.org/forge/forged/internal/database" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/hooks" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/lmtp" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web" + "go.lindenii.runxiyu.org/forge/forged/internal/ipc/irc" +) + +type Config struct { + DB database.Config `scfg:"db"` + Web web.Config `scfg:"web"` + Hooks hooks.Config `scfg:"hooks"` + LMTP lmtp.Config `scfg:"lmtp"` + SSH ssh.Config `scfg:"ssh"` + IRC irc.Config `scfg:"irc"` + Git struct { + RepoDir string `scfg:"repo_dir"` + Socket string `scfg:"socket"` + } `scfg:"git"` + General struct { + Title string `scfg:"title"` + } `scfg:"general"` + Pprof struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + } `scfg:"pprof"` +} + +func Open(path string) (config Config, err error) { + var configFile *os.File + + configFile, err = os.Open(path) //#nosec G304 + if err != nil { + err = fmt.Errorf("open config file: %w", err) + return config, err + } + defer func() { + _ = configFile.Close() + }() + + decoder := scfg.NewDecoder(bufio.NewReader(configFile)) + err = decoder.Decode(&config) + if err != nil { + err = fmt.Errorf("decode config file: %w", err) + return config, err + } + for _, u := range decoder.UnknownDirectives() { + slog.Warn("unknown configuration directive", "directive", u) + } + + return config, err +} diff --git a/forged/internal/database/config.go b/forged/internal/database/config.go new file mode 100644 index 0000000..3697693 --- /dev/null +++ b/forged/internal/database/config.go @@ -0,0 +1,5 @@ +package database + +type Config struct { + Conn string `scfg:"conn"` +} diff --git a/forged/internal/database/database.go b/forged/internal/database/database.go index b995adc..093ed8f 100644 --- a/forged/internal/database/database.go +++ b/forged/internal/database/database.go @@ -6,6 +6,7 @@ package database import ( "context" + "fmt" "github.com/jackc/pgx/v5/pgxpool" ) @@ -19,7 +20,10 @@ type Database struct { // Open opens a new database connection pool using the provided connection // string. It returns a Database instance and an error if any occurs. // It is run indefinitely in the background. -func Open(connString string) (Database, error) { - db, err := pgxpool.New(context.Background(), connString) +func Open(ctx context.Context, config Config) (Database, error) { + db, err := pgxpool.New(ctx, config.Conn) + if err != nil { + err = fmt.Errorf("create pgxpool: %w", err) + } return Database{db}, err } diff --git a/forged/internal/database/queries/.gitignore b/forged/internal/database/queries/.gitignore new file mode 100644 index 0000000..1307f6d --- /dev/null +++ b/forged/internal/database/queries/.gitignore @@ -0,0 +1 @@ +/*.go diff --git a/forged/internal/embed/.gitignore b/forged/internal/embed/.gitignore deleted file mode 100644 index 36bd410..0000000 --- a/forged/internal/embed/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -/hookc/hookc -/git2d/git2d -/static -/templates -/LICENSE* -/forged diff --git a/forged/internal/embed/embed.go b/forged/internal/embed/embed.go deleted file mode 100644 index f731538..0000000 --- a/forged/internal/embed/embed.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package embed provides embedded filesystems created in build-time. -package embed - -import "embed" - -// Source contains the licenses collected at build time. -// It is intended to be served to the user. -// -//go:embed LICENSE* -var Source embed.FS - -// Resources contains the templates and static files used by the web interface, -// as well as the git backend daemon and the hookc helper. -// -//go:embed forged/templates/* forged/static/* -//go:embed hookc/hookc git2d/git2d -var Resources embed.FS diff --git a/forged/internal/git2c/client.go b/forged/internal/git2c/client.go deleted file mode 100644 index ed9390c..0000000 --- a/forged/internal/git2c/client.go +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package git2c provides routines to interact with the git2d backend daemon. -package git2c - -import ( - "fmt" - "net" - - "go.lindenii.runxiyu.org/forge/forged/internal/bare" -) - -// Client represents a connection to the git2d backend daemon. -type Client struct { - socketPath string - conn net.Conn - writer *bare.Writer - reader *bare.Reader -} - -// NewClient establishes a connection to a git2d socket and returns a new Client. -func NewClient(socketPath string) (*Client, error) { - conn, err := net.Dial("unix", socketPath) - if err != nil { - return nil, fmt.Errorf("git2d connection failed: %w", err) - } - - writer := bare.NewWriter(conn) - reader := bare.NewReader(conn) - - return &Client{ - socketPath: socketPath, - conn: conn, - writer: writer, - reader: reader, - }, nil -} - -// Close terminates the underlying socket connection. -func (c *Client) Close() error { - if c.conn != nil { - return c.conn.Close() - } - return nil -} diff --git a/forged/internal/git2c/cmd_index.go b/forged/internal/git2c/cmd_index.go deleted file mode 100644 index 8862b2c..0000000 --- a/forged/internal/git2c/cmd_index.go +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package git2c - -import ( - "encoding/hex" - "errors" - "fmt" - "io" -) - -// CmdIndex requests a repository index from git2d and returns the list of commits -// and the contents of a README file if available. -func (c *Client) CmdIndex(repoPath string) ([]Commit, *FilenameContents, error) { - if err := c.writer.WriteData([]byte(repoPath)); err != nil { - return nil, nil, fmt.Errorf("sending repo path failed: %w", err) - } - if err := c.writer.WriteUint(1); err != nil { - return nil, nil, fmt.Errorf("sending command failed: %w", err) - } - - status, err := c.reader.ReadUint() - if err != nil { - return nil, nil, fmt.Errorf("reading status failed: %w", err) - } - if status != 0 { - return nil, nil, fmt.Errorf("git2d error: %d", status) - } - - // README - readmeRaw, err := c.reader.ReadData() - if err != nil { - readmeRaw = nil - } - - readmeFilename := "README.md" // TODO - readme := &FilenameContents{Filename: readmeFilename, Content: readmeRaw} - - // Commits - var commits []Commit - for { - id, err := c.reader.ReadData() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return nil, nil, fmt.Errorf("reading commit ID failed: %w", err) - } - title, _ := c.reader.ReadData() - authorName, _ := c.reader.ReadData() - authorEmail, _ := c.reader.ReadData() - authorDate, _ := c.reader.ReadData() - - commits = append(commits, Commit{ - Hash: hex.EncodeToString(id), - Author: string(authorName), - Email: string(authorEmail), - Date: string(authorDate), - Message: string(title), - }) - } - - return commits, readme, nil -} diff --git a/forged/internal/git2c/cmd_treeraw.go b/forged/internal/git2c/cmd_treeraw.go deleted file mode 100644 index 492cb84..0000000 --- a/forged/internal/git2c/cmd_treeraw.go +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package git2c - -import ( - "errors" - "fmt" - "io" -) - -// CmdTreeRaw queries git2d for a tree or blob object at the given path within the repository. -// It returns either a directory listing or the contents of a file. -func (c *Client) CmdTreeRaw(repoPath, pathSpec string) ([]TreeEntry, string, error) { - if err := c.writer.WriteData([]byte(repoPath)); err != nil { - return nil, "", fmt.Errorf("sending repo path failed: %w", err) - } - if err := c.writer.WriteUint(2); err != nil { - return nil, "", fmt.Errorf("sending command failed: %w", err) - } - if err := c.writer.WriteData([]byte(pathSpec)); err != nil { - return nil, "", fmt.Errorf("sending path failed: %w", err) - } - - status, err := c.reader.ReadUint() - if err != nil { - return nil, "", fmt.Errorf("reading status failed: %w", err) - } - - switch status { - case 0: - kind, err := c.reader.ReadUint() - if err != nil { - return nil, "", fmt.Errorf("reading object kind failed: %w", err) - } - - switch kind { - case 1: - // Tree - count, err := c.reader.ReadUint() - if err != nil { - return nil, "", fmt.Errorf("reading entry count failed: %w", err) - } - - var files []TreeEntry - for range count { - typeCode, err := c.reader.ReadUint() - if err != nil { - return nil, "", fmt.Errorf("error reading entry type: %w", err) - } - mode, err := c.reader.ReadUint() - if err != nil { - return nil, "", fmt.Errorf("error reading entry mode: %w", err) - } - size, err := c.reader.ReadUint() - if err != nil { - return nil, "", fmt.Errorf("error reading entry size: %w", err) - } - name, err := c.reader.ReadData() - if err != nil { - return nil, "", fmt.Errorf("error reading entry name: %w", err) - } - - files = append(files, TreeEntry{ - Name: string(name), - Mode: fmt.Sprintf("%06o", mode), - Size: size, - IsFile: typeCode == 2, - IsSubtree: typeCode == 1, - }) - } - - return files, "", nil - - case 2: - // Blob - content, err := c.reader.ReadData() - if err != nil && !errors.Is(err, io.EOF) { - return nil, "", fmt.Errorf("error reading file content: %w", err) - } - - return nil, string(content), nil - - default: - return nil, "", fmt.Errorf("unknown kind: %d", kind) - } - - case 3: - return nil, "", fmt.Errorf("path not found: %s", pathSpec) - - default: - return nil, "", fmt.Errorf("unknown status code: %d", status) - } -} diff --git a/forged/internal/git2c/git_types.go b/forged/internal/git2c/git_types.go deleted file mode 100644 index bf13f05..0000000 --- a/forged/internal/git2c/git_types.go +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package git2c - -// Commit represents a single commit object retrieved from the git2d daemon. -type Commit struct { - Hash string - Author string - Email string - Date string - Message string -} - -// FilenameContents holds the filename and byte contents of a file, such as a README. -type FilenameContents struct { - Filename string - Content []byte -} - -// TreeEntry represents a file or directory entry within a Git tree object. -type TreeEntry struct { - Name string - Mode string - Size uint64 - IsFile bool - IsSubtree bool -} diff --git a/forged/internal/git2c/perror.go b/forged/internal/git2c/perror.go deleted file mode 100644 index 96bffd5..0000000 --- a/forged/internal/git2c/perror.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// TODO: Make the C part report detailed error messages too - -package git2c - -import "errors" - -var ( - Success error - ErrUnknown = errors.New("git2c: unknown error") - ErrPath = errors.New("git2c: get tree entry by path failed") - ErrRevparse = errors.New("git2c: revparse failed") - ErrReadme = errors.New("git2c: no readme") - ErrBlobExpected = errors.New("git2c: blob expected") - ErrEntryToObject = errors.New("git2c: tree entry to object conversion failed") - ErrBlobRawContent = errors.New("git2c: get blob raw content failed") - ErrRevwalk = errors.New("git2c: revwalk failed") - ErrRevwalkPushHead = errors.New("git2c: revwalk push head failed") - ErrBareProto = errors.New("git2c: bare protocol error") -) - -func Perror(errno uint) error { - switch errno { - case 0: - return Success - case 3: - return ErrPath - case 4: - return ErrRevparse - case 5: - return ErrReadme - case 6: - return ErrBlobExpected - case 7: - return ErrEntryToObject - case 8: - return ErrBlobRawContent - case 9: - return ErrRevwalk - case 10: - return ErrRevwalkPushHead - case 11: - return ErrBareProto - } - return ErrUnknown -} diff --git a/forged/internal/global/global.go b/forged/internal/global/global.go new file mode 100644 index 0000000..2aa8049 --- /dev/null +++ b/forged/internal/global/global.go @@ -0,0 +1,8 @@ +package global + +type Global struct { + ForgeTitle string + ForgeVersion string + SSHPubkey string + SSHFingerprint string +} diff --git a/forged/internal/humanize/bytes.go b/forged/internal/humanize/bytes.go deleted file mode 100644 index bea504c..0000000 --- a/forged/internal/humanize/bytes.go +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-FileCopyrightText: Copyright (c) 2005-2008 Dustin Sallings -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package humanize provides functions to convert numbers into human-readable formats. -package humanize - -import ( - "fmt" - "math" -) - -// IBytes produces a human readable representation of an IEC size. -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} diff --git a/forged/internal/incoming/hooks/config.go b/forged/internal/incoming/hooks/config.go new file mode 100644 index 0000000..0d23dc0 --- /dev/null +++ b/forged/internal/incoming/hooks/config.go @@ -0,0 +1,6 @@ +package hooks + +type Config struct { + Socket string `scfg:"socket"` + Execs string `scfg:"execs"` +} diff --git a/forged/internal/incoming/hooks/hooks.go b/forged/internal/incoming/hooks/hooks.go new file mode 100644 index 0000000..dfdf172 --- /dev/null +++ b/forged/internal/incoming/hooks/hooks.go @@ -0,0 +1,80 @@ +package hooks + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/gliderlabs/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/common/cmap" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type Server struct { + hookMap cmap.Map[string, hookInfo] + socketPath string + executablesPath string + global *global.Global +} +type hookInfo struct { + session ssh.Session + pubkey string + directAccess bool + repoPath string + userID int + userType string + repoID int + groupPath []string + repoName string + contribReq string +} + +func New(config Config, global *global.Global) (server *Server) { + return &Server{ + socketPath: config.Socket, + executablesPath: config.Execs, + hookMap: cmap.Map[string, hookInfo]{}, + global: global, + } +} + +func (server *Server) Run(ctx context.Context) error { + listener, _, err := misc.ListenUnixSocket(ctx, server.socketPath) + if err != nil { + return fmt.Errorf("listen unix socket for hooks: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + _ = listener.Close() + }) + defer stop() + + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("accept conn: %w", err) + } + + go server.handleConn(ctx, conn) + } +} + +func (server *Server) handleConn(ctx context.Context, conn net.Conn) { + defer func() { + _ = conn.Close() + }() + unblock := context.AfterFunc(ctx, func() { + _ = conn.SetDeadline(time.Now()) + _ = conn.Close() + }) + defer unblock() +} diff --git a/forged/internal/incoming/lmtp/config.go b/forged/internal/incoming/lmtp/config.go new file mode 100644 index 0000000..6241608 --- /dev/null +++ b/forged/internal/incoming/lmtp/config.go @@ -0,0 +1,9 @@ +package lmtp + +type Config struct { + Socket string `scfg:"socket"` + Domain string `scfg:"domain"` + MaxSize int64 `scfg:"max_size"` + WriteTimeout uint32 `scfg:"write_timeout"` + ReadTimeout uint32 `scfg:"read_timeout"` +} diff --git a/forged/internal/incoming/lmtp/lmtp.go b/forged/internal/incoming/lmtp/lmtp.go new file mode 100644 index 0000000..a7782a2 --- /dev/null +++ b/forged/internal/incoming/lmtp/lmtp.go @@ -0,0 +1,70 @@ +package lmtp + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type Server struct { + socket string + domain string + maxSize int64 + writeTimeout uint32 + readTimeout uint32 + global *global.Global +} + +func New(config Config, global *global.Global) (server *Server) { + return &Server{ + socket: config.Socket, + domain: config.Domain, + maxSize: config.MaxSize, + writeTimeout: config.WriteTimeout, + readTimeout: config.ReadTimeout, + global: global, + } +} + +func (server *Server) Run(ctx context.Context) error { + listener, _, err := misc.ListenUnixSocket(ctx, server.socket) + if err != nil { + return fmt.Errorf("listen unix socket for LMTP: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + _ = listener.Close() + }) + defer stop() + + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("accept conn: %w", err) + } + + go server.handleConn(ctx, conn) + } +} + +func (server *Server) handleConn(ctx context.Context, conn net.Conn) { + defer func() { + _ = conn.Close() + }() + unblock := context.AfterFunc(ctx, func() { + _ = conn.SetDeadline(time.Now()) + _ = conn.Close() + }) + defer unblock() +} diff --git a/forged/internal/incoming/ssh/config.go b/forged/internal/incoming/ssh/config.go new file mode 100644 index 0000000..7d22cc1 --- /dev/null +++ b/forged/internal/incoming/ssh/config.go @@ -0,0 +1,9 @@ +package ssh + +type Config struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + Key string `scfg:"key"` + Root string `scfg:"root"` + ShutdownTimeout uint32 `scfg:"shutdown_timeout"` +} diff --git a/forged/internal/incoming/ssh/ssh.go b/forged/internal/incoming/ssh/ssh.go new file mode 100644 index 0000000..527cd28 --- /dev/null +++ b/forged/internal/incoming/ssh/ssh.go @@ -0,0 +1,89 @@ +package ssh + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + gliderssh "github.com/gliderlabs/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + gossh "golang.org/x/crypto/ssh" +) + +type Server struct { + gliderServer *gliderssh.Server + privkey gossh.Signer + net string + addr string + root string + shutdownTimeout uint32 + global *global.Global +} + +func New(config Config, global *global.Global) (server *Server, err error) { + server = &Server{ + net: config.Net, + addr: config.Addr, + root: config.Root, + shutdownTimeout: config.ShutdownTimeout, + global: global, + } //exhaustruct:ignore + + var privkeyBytes []byte + + privkeyBytes, err = os.ReadFile(config.Key) + if err != nil { + return server, fmt.Errorf("read SSH private key: %w", err) + } + + server.privkey, err = gossh.ParsePrivateKey(privkeyBytes) + if err != nil { + return server, fmt.Errorf("parse SSH private key: %w", err) + } + + server.global.SSHPubkey = misc.BytesToString(gossh.MarshalAuthorizedKey(server.privkey.PublicKey())) + server.global.SSHFingerprint = gossh.FingerprintSHA256(server.privkey.PublicKey()) + + server.gliderServer = &gliderssh.Server{ + Handler: handle, + PublicKeyHandler: func(ctx gliderssh.Context, key gliderssh.PublicKey) bool { return true }, + KeyboardInteractiveHandler: func(ctx gliderssh.Context, challenge gossh.KeyboardInteractiveChallenge) bool { return true }, + } //exhaustruct:ignore + server.gliderServer.AddHostKey(server.privkey) + + return server, nil +} + +func (server *Server) Run(ctx context.Context) (err error) { + listener, err := misc.Listen(ctx, server.net, server.addr) + if err != nil { + return fmt.Errorf("listen for SSH: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + shCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Duration(server.shutdownTimeout)*time.Second) + defer cancel() + _ = server.gliderServer.Shutdown(shCtx) + _ = listener.Close() + }) + defer stop() + + err = server.gliderServer.Serve(listener) + if err != nil { + if errors.Is(err, gliderssh.ErrServerClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("serve SSH: %w", err) + } + panic("unreachable") +} + +func handle(session gliderssh.Session) { + panic("SSH server handler not implemented yet") +} diff --git a/forged/internal/incoming/web/authn.go b/forged/internal/incoming/web/authn.go new file mode 100644 index 0000000..46263ee --- /dev/null +++ b/forged/internal/incoming/web/authn.go @@ -0,0 +1,33 @@ +package web + +import ( + "crypto/sha256" + "errors" + "fmt" + "net/http" + + "github.com/jackc/pgx/v5" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +func userResolver(r *http.Request) (string, string, error) { + cookie, err := r.Cookie("session") + if err != nil { + if errors.Is(err, http.ErrNoCookie) { + return "", "", nil + } + return "", "", err + } + + tokenHash := sha256.Sum256([]byte(cookie.Value)) + + session, err := types.Base(r).Queries.GetUserFromSession(r.Context(), tokenHash[:]) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return "", "", nil + } + return "", "", err + } + + return fmt.Sprint(session.UserID), session.Username, nil +} diff --git a/forged/internal/incoming/web/config.go b/forged/internal/incoming/web/config.go new file mode 100644 index 0000000..8d32b34 --- /dev/null +++ b/forged/internal/incoming/web/config.go @@ -0,0 +1,16 @@ +package web + +type Config struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + Root string `scfg:"root"` + CookieExpiry int `scfg:"cookie_expiry"` + ReadTimeout uint32 `scfg:"read_timeout"` + WriteTimeout uint32 `scfg:"write_timeout"` + IdleTimeout uint32 `scfg:"idle_timeout"` + MaxHeaderBytes int `scfg:"max_header_bytes"` + ReverseProxy bool `scfg:"reverse_proxy"` + ShutdownTimeout uint32 `scfg:"shutdown_timeout"` + TemplatesPath string `scfg:"templates_path"` + StaticPath string `scfg:"static_path"` +} diff --git a/forged/internal/incoming/web/handler.go b/forged/internal/incoming/web/handler.go new file mode 100644 index 0000000..20f7e79 --- /dev/null +++ b/forged/internal/incoming/web/handler.go @@ -0,0 +1,77 @@ +package web + +import ( + "html/template" + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + handlers "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/handlers" + repoHandlers "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/handlers/repo" + specialHandlers "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/handlers/special" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" +) + +type handler struct { + r *Router +} + +func NewHandler(cfg Config, global *global.Global, queries *queries.Queries) *handler { + h := &handler{r: NewRouter().ReverseProxy(cfg.ReverseProxy).Global(global).Queries(queries).UserResolver(userResolver)} + + staticFS := http.FileServer(http.Dir(cfg.StaticPath)) + h.r.ANYHTTP("-/static/*rest", + http.StripPrefix("/-/static/", staticFS), + WithDirIfEmpty("rest"), + ) + + funcs := template.FuncMap{ + "path_escape": misc.PathEscape, + "query_escape": misc.QueryEscape, + "minus": misc.Minus, + "first_line": misc.FirstLine, + "dereference_error": misc.DereferenceOrZero[error], + } + t := templates.MustParseDir(cfg.TemplatesPath, funcs) + renderer := templates.New(t) + + indexHTTP := handlers.NewIndexHTTP(renderer) + loginHTTP := specialHandlers.NewLoginHTTP(renderer, cfg.CookieExpiry) + groupHTTP := handlers.NewGroupHTTP(renderer) + repoHTTP := repoHandlers.NewHTTP(renderer) + notImpl := handlers.NewNotImplementedHTTP(renderer) + + // Index + h.r.GET("/", indexHTTP.Index) + + // Top-level utilities + h.r.ANY("-/login", loginHTTP.Login) + h.r.ANY("-/users", notImpl.Handle) + + // Group index + h.r.GET("@group/", groupHTTP.Index) + h.r.POST("@group/", groupHTTP.Post) + + // Repo index + h.r.GET("@group/-/repos/:repo/", repoHTTP.Index) + + // Repo (not implemented yet) + h.r.ANY("@group/-/repos/:repo/info", notImpl.Handle) + h.r.ANY("@group/-/repos/:repo/git-upload-pack", notImpl.Handle) + + // Repo features + h.r.GET("@group/-/repos/:repo/branches/", notImpl.Handle) + h.r.GET("@group/-/repos/:repo/log/", notImpl.Handle) + h.r.GET("@group/-/repos/:repo/commit/:commit", notImpl.Handle) + h.r.GET("@group/-/repos/:repo/tree/*rest", repoHTTP.Tree, WithDirIfEmpty("rest")) + h.r.GET("@group/-/repos/:repo/raw/*rest", repoHTTP.Raw, WithDirIfEmpty("rest")) + h.r.GET("@group/-/repos/:repo/contrib/", notImpl.Handle) + h.r.GET("@group/-/repos/:repo/contrib/:mr", notImpl.Handle) + + return h +} + +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.r.ServeHTTP(w, r) +} diff --git a/forged/internal/incoming/web/handlers/group.go b/forged/internal/incoming/web/handlers/group.go new file mode 100644 index 0000000..3201491 --- /dev/null +++ b/forged/internal/incoming/web/handlers/group.go @@ -0,0 +1,92 @@ +package handlers + +import ( + "log/slog" + "net/http" + "strconv" + + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type GroupHTTP struct { + r templates.Renderer +} + +func NewGroupHTTP(r templates.Renderer) *GroupHTTP { + return &GroupHTTP{ + r: r, + } +} + +func (h *GroupHTTP) Index(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + base := wtypes.Base(r) + userID, err := strconv.ParseInt(base.UserID, 10, 64) + if err != nil { + userID = 0 + } + + queryParams := queries.GetGroupByPathParams{ + Column1: base.URLSegments, + UserID: userID, + } + p, err := base.Queries.GetGroupByPath(r.Context(), queryParams) + if err != nil { + slog.Error("failed to get group ID by path", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + subgroups, err := base.Queries.GetSubgroups(r.Context(), &p.ID) + if err != nil { + slog.Error("failed to get subgroups", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + // TODO: gracefully fail this part of the page + } + repos, err := base.Queries.GetReposInGroup(r.Context(), p.ID) + if err != nil { + slog.Error("failed to get repos in group", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + // TODO: gracefully fail this part of the page + } + err = h.r.Render(w, "group", struct { + BaseData *wtypes.BaseData + Subgroups []queries.GetSubgroupsRow + Repos []queries.GetReposInGroupRow + Description string + DirectAccess bool + }{ + BaseData: base, + Subgroups: subgroups, + Repos: repos, + Description: p.Description, + DirectAccess: p.HasRole, + }) + if err != nil { + slog.Error("failed to render index page", "error", err) + } +} + +func (h *GroupHTTP) Post(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + base := wtypes.Base(r) + userID, err := strconv.ParseInt(base.UserID, 10, 64) + if err != nil { + userID = 0 + } + + queryParams := queries.GetGroupByPathParams{ + Column1: base.URLSegments, + UserID: userID, + } + p, err := base.Queries.GetGroupByPath(r.Context(), queryParams) + if err != nil { + slog.Error("failed to get group ID by path", "error", err) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } + + if !p.HasRole { + http.Error(w, "You do not have the necessary permissions to create repositories in this group.", http.StatusForbidden) + return + } +} diff --git a/forged/internal/incoming/web/handlers/index.go b/forged/internal/incoming/web/handlers/index.go new file mode 100644 index 0000000..22e6201 --- /dev/null +++ b/forged/internal/incoming/web/handlers/index.go @@ -0,0 +1,40 @@ +package handlers + +import ( + "log" + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type IndexHTTP struct { + r templates.Renderer +} + +func NewIndexHTTP(r templates.Renderer) *IndexHTTP { + return &IndexHTTP{ + r: r, + } +} + +func (h *IndexHTTP) Index(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + groups, err := types.Base(r).Queries.GetRootGroups(r.Context()) + if err != nil { + http.Error(w, "failed to get root groups", http.StatusInternalServerError) + log.Println("failed to get root groups", "error", err) + return + } + err = h.r.Render(w, "index", struct { + BaseData *types.BaseData + Groups []queries.GetRootGroupsRow + }{ + BaseData: types.Base(r), + Groups: groups, + }) + if err != nil { + log.Println("failed to render index page", "error", err) + } +} diff --git a/forged/internal/incoming/web/handlers/not_implemented.go b/forged/internal/incoming/web/handlers/not_implemented.go new file mode 100644 index 0000000..6813c88 --- /dev/null +++ b/forged/internal/incoming/web/handlers/not_implemented.go @@ -0,0 +1,22 @@ +package handlers + +import ( + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type NotImplementedHTTP struct { + r templates.Renderer +} + +func NewNotImplementedHTTP(r templates.Renderer) *NotImplementedHTTP { + return &NotImplementedHTTP{ + r: r, + } +} + +func (h *NotImplementedHTTP) Handle(w http.ResponseWriter, _ *http.Request, _ wtypes.Vars) { + http.Error(w, "not implemented", http.StatusNotImplemented) +} diff --git a/forged/internal/incoming/web/handlers/repo/handler.go b/forged/internal/incoming/web/handlers/repo/handler.go new file mode 100644 index 0000000..2881d7d --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/handler.go @@ -0,0 +1,15 @@ +package repo + +import ( + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" +) + +type HTTP struct { + r templates.Renderer +} + +func NewHTTP(r templates.Renderer) *HTTP { + return &HTTP{ + r: r, + } +} diff --git a/forged/internal/incoming/web/handlers/repo/index.go b/forged/internal/incoming/web/handlers/repo/index.go new file mode 100644 index 0000000..1a804b2 --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/index.go @@ -0,0 +1,20 @@ +package repo + +import ( + "net/http" + "strings" + + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +func (h *HTTP) Index(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repo := v["repo"] + _ = h.r.Render(w, "repo/index.html", struct { + Group string + Repo string + }{ + Group: "/" + strings.Join(base.GroupPath, "/") + "/", + Repo: repo, + }) +} diff --git a/forged/internal/incoming/web/handlers/repo/raw.go b/forged/internal/incoming/web/handlers/repo/raw.go new file mode 100644 index 0000000..e421f45 --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/raw.go @@ -0,0 +1,19 @@ +package repo + +import ( + "fmt" + "net/http" + "strings" + + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +func (h *HTTP) Raw(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repo := v["repo"] + rest := v["rest"] + if base.DirMode && rest != "" && !strings.HasSuffix(rest, "/") { + rest += "/" + } + _, _ = w.Write([]byte(fmt.Sprintf("raw: repo=%q path=%q", repo, rest))) +} diff --git a/forged/internal/incoming/web/handlers/repo/tree.go b/forged/internal/incoming/web/handlers/repo/tree.go new file mode 100644 index 0000000..3432244 --- /dev/null +++ b/forged/internal/incoming/web/handlers/repo/tree.go @@ -0,0 +1,19 @@ +package repo + +import ( + "fmt" + "net/http" + "strings" + + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +func (h *HTTP) Tree(w http.ResponseWriter, r *http.Request, v wtypes.Vars) { + base := wtypes.Base(r) + repo := v["repo"] + rest := v["rest"] // may be "" + if base.DirMode && rest != "" && !strings.HasSuffix(rest, "/") { + rest += "/" + } + _, _ = w.Write([]byte(fmt.Sprintf("tree: repo=%q path=%q", repo, rest))) +} diff --git a/forged/internal/incoming/web/handlers/special/login.go b/forged/internal/incoming/web/handlers/special/login.go new file mode 100644 index 0000000..0287c47 --- /dev/null +++ b/forged/internal/incoming/web/handlers/special/login.go @@ -0,0 +1,115 @@ +package handlers + +import ( + "crypto/rand" + "crypto/sha256" + "errors" + "log" + "net/http" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" + "go.lindenii.runxiyu.org/forge/forged/internal/common/argon2id" + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/templates" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type LoginHTTP struct { + r templates.Renderer + cookieExpiry int +} + +func NewLoginHTTP(r templates.Renderer, cookieExpiry int) *LoginHTTP { + return &LoginHTTP{ + r: r, + cookieExpiry: cookieExpiry, + } +} + +func (h *LoginHTTP) Login(w http.ResponseWriter, r *http.Request, _ wtypes.Vars) { + renderLoginPage := func(loginError string) bool { + err := h.r.Render(w, "login", struct { + BaseData *types.BaseData + LoginError string + }{ + BaseData: types.Base(r), + LoginError: loginError, + }) + if err != nil { + log.Println("failed to render login page", "error", err) + http.Error(w, "Failed to render login page", http.StatusInternalServerError) + return true + } + return false + } + + if r.Method == http.MethodGet { + renderLoginPage("") + return + } + + username := r.PostFormValue("username") + password := r.PostFormValue("password") + + userCreds, err := types.Base(r).Queries.GetUserCreds(r.Context(), &username) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + renderLoginPage("User not found") + return + } + log.Println("failed to get user credentials", "error", err) + http.Error(w, "Failed to get user credentials", http.StatusInternalServerError) + return + } + + if userCreds.PasswordHash == "" { + renderLoginPage("No password set for this user") + return + } + + passwordMatches, err := argon2id.ComparePasswordAndHash(password, userCreds.PasswordHash) + if err != nil { + log.Println("failed to compare password and hash", "error", err) + http.Error(w, "Failed to verify password", http.StatusInternalServerError) + return + } + + if !passwordMatches { + renderLoginPage("Invalid password") + return + } + + cookieValue := rand.Text() + + now := time.Now() + expiry := now.Add(time.Duration(h.cookieExpiry) * time.Second) + + cookie := &http.Cookie{ + Name: "session", + Value: cookieValue, + SameSite: http.SameSiteLaxMode, + HttpOnly: true, + Secure: false, // TODO + Expires: expiry, + Path: "/", + } //exhaustruct:ignore + + http.SetCookie(w, cookie) + + tokenHash := sha256.Sum256(misc.StringToBytes(cookieValue)) + + err = types.Base(r).Queries.InsertSession(r.Context(), queries.InsertSessionParams{ + UserID: userCreds.ID, + TokenHash: tokenHash[:], + ExpiresAt: pgtype.Timestamptz{ + Time: expiry, + Valid: true, + }, + }) + + http.Redirect(w, r, "/", http.StatusSeeOther) +} diff --git a/forged/internal/incoming/web/router.go b/forged/internal/incoming/web/router.go new file mode 100644 index 0000000..8356191 --- /dev/null +++ b/forged/internal/incoming/web/router.go @@ -0,0 +1,428 @@ +package web + +import ( + "fmt" + "net/http" + "net/url" + "sort" + "strings" + + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + wtypes "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web/types" +) + +type UserResolver func(*http.Request) (id string, username string, err error) + +type ErrorRenderers struct { + BadRequest func(http.ResponseWriter, *wtypes.BaseData, string) + BadRequestColon func(http.ResponseWriter, *wtypes.BaseData) + NotFound func(http.ResponseWriter, *wtypes.BaseData) + ServerError func(http.ResponseWriter, *wtypes.BaseData, string) +} + +type dirPolicy int + +const ( + dirIgnore dirPolicy = iota + dirRequire + dirForbid + dirRequireIfEmpty +) + +type patKind uint8 + +const ( + lit patKind = iota + param + splat + group // @group, must be first token +) + +type patSeg struct { + kind patKind + lit string + key string +} + +type route struct { + method string + rawPattern string + wantDir dirPolicy + ifEmptyKey string + segs []patSeg + h wtypes.HandlerFunc + hh http.Handler + priority int +} + +type Router struct { + routes []route + errors ErrorRenderers + user UserResolver + global *global.Global + reverseProxy bool + queries *queries.Queries +} + +func NewRouter() *Router { return &Router{} } + +func (r *Router) Global(g *global.Global) *Router { + r.global = g + return r +} +func (r *Router) Queries(q *queries.Queries) *Router { + r.queries = q + return r +} +func (r *Router) ReverseProxy(enabled bool) *Router { r.reverseProxy = enabled; return r } +func (r *Router) Errors(e ErrorRenderers) *Router { r.errors = e; return r } +func (r *Router) UserResolver(u UserResolver) *Router { r.user = u; return r } + +type RouteOption func(*route) + +func WithDir() RouteOption { return func(rt *route) { rt.wantDir = dirRequire } } +func WithoutDir() RouteOption { return func(rt *route) { rt.wantDir = dirForbid } } +func WithDirIfEmpty(param string) RouteOption { + return func(rt *route) { rt.wantDir = dirRequireIfEmpty; rt.ifEmptyKey = param } +} + +func (r *Router) GET(pattern string, f wtypes.HandlerFunc, opts ...RouteOption) { + r.handle("GET", pattern, f, nil, opts...) +} + +func (r *Router) POST(pattern string, f wtypes.HandlerFunc, opts ...RouteOption) { + r.handle("POST", pattern, f, nil, opts...) +} + +func (r *Router) ANY(pattern string, f wtypes.HandlerFunc, opts ...RouteOption) { + r.handle("", pattern, f, nil, opts...) +} + +func (r *Router) ANYHTTP(pattern string, hh http.Handler, opts ...RouteOption) { + r.handle("", pattern, nil, hh, opts...) +} + +func (r *Router) handle(method, pattern string, f wtypes.HandlerFunc, hh http.Handler, opts ...RouteOption) { + want := dirIgnore + if strings.HasSuffix(pattern, "/") { + want = dirRequire + pattern = strings.TrimSuffix(pattern, "/") + } else if pattern != "" { + want = dirForbid + } + segs, prio := compilePattern(pattern) + rt := route{ + method: method, + rawPattern: pattern, + wantDir: want, + segs: segs, + h: f, + hh: hh, + priority: prio, + } + for _, o := range opts { + o(&rt) + } + r.routes = append(r.routes, rt) + + sort.SliceStable(r.routes, func(i, j int) bool { + return r.routes[i].priority > r.routes[j].priority + }) +} + +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + segments, dirMode, err := splitAndUnescapePath(req.URL.EscapedPath()) + if err != nil { + r.err400(w, &wtypes.BaseData{Global: r.global}, "Error parsing request URI: "+err.Error()) + return + } + for _, s := range segments { + if strings.Contains(s, ":") { + r.err400Colon(w, &wtypes.BaseData{Global: r.global}) + return + } + } + + bd := &wtypes.BaseData{ + Global: r.global, + URLSegments: segments, + DirMode: dirMode, + Queries: r.queries, + } + req = req.WithContext(wtypes.WithBaseData(req.Context(), bd)) + + bd.RefType, bd.RefName, err = GetParamRefTypeName(req) + if err != nil { + r.err400(w, bd, "Error parsing ref query parameters: "+err.Error()) + return + } + + if r.user != nil { + uid, uname, uerr := r.user(req) + if uerr != nil { + r.err500(w, bd, "Error getting user info from request: "+uerr.Error()) + return + } + bd.UserID = uid + bd.Username = uname + } + + method := req.Method + var pathMatched bool + var matchedRaw string + + for _, rt := range r.routes { + ok, vars, sepIdx := match(rt.segs, segments) + if !ok { + continue + } + pathMatched = true + matchedRaw = rt.rawPattern + + switch rt.wantDir { + case dirRequire: + if !dirMode && redirectAddSlash(w, req) { + return + } + case dirForbid: + if dirMode && redirectDropSlash(w, req) { + return + } + case dirRequireIfEmpty: + if v := vars[rt.ifEmptyKey]; v == "" && !dirMode && redirectAddSlash(w, req) { + return + } + } + + bd.SeparatorIndex = sepIdx + if g := vars["group"]; g == "" { + bd.GroupPath = []string{} + } else { + bd.GroupPath = strings.Split(g, "/") + } + + if rt.method != "" && !(rt.method == method || (method == http.MethodHead && rt.method == http.MethodGet)) { + continue + } + + if rt.h != nil { + rt.h(w, req, wtypes.Vars(vars)) + } else if rt.hh != nil { + rt.hh.ServeHTTP(w, req) + } else { + r.err500(w, bd, "route has no handler") + } + return + } + + if pathMatched { + w.Header().Set("Allow", allowForPattern(r.routes, matchedRaw)) + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + return + } + r.err404(w, bd) +} + +func compilePattern(pat string) ([]patSeg, int) { + if pat == "" || pat == "/" { + return nil, 1000 + } + pat = strings.Trim(pat, "/") + raw := strings.Split(pat, "/") + + segs := make([]patSeg, 0, len(raw)) + prio := 0 + for i, t := range raw { + switch { + case t == "@group": + if i != 0 { + segs = append(segs, patSeg{kind: lit, lit: t}) + prio += 10 + continue + } + segs = append(segs, patSeg{kind: group}) + prio += 1 + case strings.HasPrefix(t, ":"): + segs = append(segs, patSeg{kind: param, key: t[1:]}) + prio += 5 + case strings.HasPrefix(t, "*"): + segs = append(segs, patSeg{kind: splat, key: t[1:]}) + default: + segs = append(segs, patSeg{kind: lit, lit: t}) + prio += 10 + } + } + return segs, prio +} + +func match(pat []patSeg, segs []string) (bool, map[string]string, int) { + vars := make(map[string]string) + i := 0 + sepIdx := -1 + for pi := 0; pi < len(pat); pi++ { + ps := pat[pi] + switch ps.kind { + case group: + start := i + for i < len(segs) && segs[i] != "-" { + i++ + } + if start < i { + vars["group"] = strings.Join(segs[start:i], "/") + } else { + vars["group"] = "" + } + if i < len(segs) && segs[i] == "-" { + sepIdx = i + } + case lit: + if i >= len(segs) || segs[i] != ps.lit { + return false, nil, -1 + } + i++ + case param: + if i >= len(segs) { + return false, nil, -1 + } + vars[ps.key] = segs[i] + i++ + case splat: + if i < len(segs) { + vars[ps.key] = strings.Join(segs[i:], "/") + i = len(segs) + } else { + vars[ps.key] = "" + } + pi = len(pat) + } + } + if i != len(segs) { + return false, nil, -1 + } + return true, vars, sepIdx +} + +func splitAndUnescapePath(escaped string) ([]string, bool, error) { + if escaped == "" { + return nil, false, nil + } + dir := strings.HasSuffix(escaped, "/") + path := strings.Trim(escaped, "/") + if path == "" { + return []string{}, dir, nil + } + raw := strings.Split(path, "/") + out := make([]string, 0, len(raw)) + for _, seg := range raw { + u, err := url.PathUnescape(seg) + if err != nil { + return nil, dir, err + } + if u != "" { + out = append(out, u) + } + } + return out, dir, nil +} + +func redirectAddSlash(w http.ResponseWriter, r *http.Request) bool { + u := *r.URL + u.Path = u.EscapedPath() + "/" + http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect) + return true +} + +func redirectDropSlash(w http.ResponseWriter, r *http.Request) bool { + u := *r.URL + u.Path = strings.TrimRight(u.EscapedPath(), "/") + if u.Path == "" { + u.Path = "/" + } + http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect) + return true +} + +func allowForPattern(routes []route, raw string) string { + seen := map[string]struct{}{} + out := make([]string, 0, 4) + for _, rt := range routes { + if rt.rawPattern != raw || rt.method == "" { + continue + } + if _, ok := seen[rt.method]; ok { + continue + } + seen[rt.method] = struct{}{} + out = append(out, rt.method) + } + sort.Strings(out) + return strings.Join(out, ", ") +} + +func (r *Router) err400(w http.ResponseWriter, b *wtypes.BaseData, msg string) { + if r.errors.BadRequest != nil { + r.errors.BadRequest(w, b, msg) + return + } + http.Error(w, msg, http.StatusBadRequest) +} + +func (r *Router) err400Colon(w http.ResponseWriter, b *wtypes.BaseData) { + if r.errors.BadRequestColon != nil { + r.errors.BadRequestColon(w, b) + return + } + http.Error(w, "bad request", http.StatusBadRequest) +} + +func (r *Router) err404(w http.ResponseWriter, b *wtypes.BaseData) { + if r.errors.NotFound != nil { + r.errors.NotFound(w, b) + return + } + http.NotFound(w, nil) +} + +func (r *Router) err500(w http.ResponseWriter, b *wtypes.BaseData, msg string) { + if r.errors.ServerError != nil { + r.errors.ServerError(w, b, msg) + return + } + http.Error(w, msg, http.StatusInternalServerError) +} + +func GetParamRefTypeName(request *http.Request) (retRefType, retRefName string, err error) { + rawQuery := request.URL.RawQuery + queryValues, err := url.ParseQuery(rawQuery) + if err != nil { + return + } + done := false + for _, refType := range []string{"commit", "branch", "tag"} { + refName, ok := queryValues[refType] + if ok { + if done { + err = errDupRefSpec + return + } + done = true + if len(refName) != 1 { + err = errDupRefSpec + return + } + retRefName = refName[0] + retRefType = refType + } + } + if !done { + retRefType = "" + retRefName = "" + err = nil + } + return +} + +var ( + errDupRefSpec = fmt.Errorf("duplicate ref specifications") +) diff --git a/forged/internal/incoming/web/server.go b/forged/internal/incoming/web/server.go new file mode 100644 index 0000000..465657c --- /dev/null +++ b/forged/internal/incoming/web/server.go @@ -0,0 +1,70 @@ +package web + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "time" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +type Server struct { + net string + addr string + root string + httpServer *http.Server + shutdownTimeout uint32 + global *global.Global +} + +func New(config Config, global *global.Global, queries *queries.Queries) *Server { + httpServer := &http.Server{ + Handler: NewHandler(config, global, queries), + ReadTimeout: time.Duration(config.ReadTimeout) * time.Second, + WriteTimeout: time.Duration(config.WriteTimeout) * time.Second, + IdleTimeout: time.Duration(config.IdleTimeout) * time.Second, + MaxHeaderBytes: config.MaxHeaderBytes, + } //exhaustruct:ignore + return &Server{ + net: config.Net, + addr: config.Addr, + root: config.Root, + shutdownTimeout: config.ShutdownTimeout, + httpServer: httpServer, + global: global, + } +} + +func (server *Server) Run(ctx context.Context) (err error) { + server.httpServer.BaseContext = func(_ net.Listener) context.Context { return ctx } + + listener, err := misc.Listen(ctx, server.net, server.addr) + if err != nil { + return fmt.Errorf("listen for web: %w", err) + } + defer func() { + _ = listener.Close() + }() + + stop := context.AfterFunc(ctx, func() { + shCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Duration(server.shutdownTimeout)*time.Second) + defer cancel() + _ = server.httpServer.Shutdown(shCtx) + _ = listener.Close() + }) + defer stop() + + err = server.httpServer.Serve(listener) + if err != nil { + if errors.Is(err, http.ErrServerClosed) || ctx.Err() != nil { + return nil + } + return fmt.Errorf("serve web: %w", err) + } + panic("unreachable") +} diff --git a/forged/internal/incoming/web/templates/load.go b/forged/internal/incoming/web/templates/load.go new file mode 100644 index 0000000..4a6fc49 --- /dev/null +++ b/forged/internal/incoming/web/templates/load.go @@ -0,0 +1,31 @@ +package templates + +import ( + "html/template" + "io/fs" + "os" + "path/filepath" +) + +func MustParseDir(dir string, funcs template.FuncMap) *template.Template { + base := template.New("").Funcs(funcs) + + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + b, err := os.ReadFile(path) + if err != nil { + return err + } + _, err = base.Parse(string(b)) + return err + }) + if err != nil { + panic(err) + } + return base +} diff --git a/forged/internal/incoming/web/templates/renderer.go b/forged/internal/incoming/web/templates/renderer.go new file mode 100644 index 0000000..1e2f325 --- /dev/null +++ b/forged/internal/incoming/web/templates/renderer.go @@ -0,0 +1,23 @@ +package templates + +import ( + "html/template" + "net/http" +) + +type Renderer interface { + Render(w http.ResponseWriter, name string, data any) error +} + +type tmplRenderer struct { + t *template.Template +} + +func New(t *template.Template) Renderer { + return &tmplRenderer{t: t} +} + +func (r *tmplRenderer) Render(w http.ResponseWriter, name string, data any) error { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + return r.t.ExecuteTemplate(w, name, data) +} diff --git a/forged/internal/incoming/web/types/types.go b/forged/internal/incoming/web/types/types.go new file mode 100644 index 0000000..bacce24 --- /dev/null +++ b/forged/internal/incoming/web/types/types.go @@ -0,0 +1,45 @@ +package types + +import ( + "context" + "net/http" + + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/global" +) + +// BaseData is per-request context computed by the router and read by handlers. +// Keep it small and stable; page-specific data should live in view models. +type BaseData struct { + UserID string + Username string + URLSegments []string + DirMode bool + GroupPath []string + SeparatorIndex int + RefType string + RefName string + Global *global.Global + Queries *queries.Queries +} + +type ctxKey struct{} + +// WithBaseData attaches BaseData to a context. +func WithBaseData(ctx context.Context, b *BaseData) context.Context { + return context.WithValue(ctx, ctxKey{}, b) +} + +// Base retrieves BaseData from the request (never nil). +func Base(r *http.Request) *BaseData { + if v, ok := r.Context().Value(ctxKey{}).(*BaseData); ok && v != nil { + return v + } + return &BaseData{} +} + +// Vars are route variables captured by the router (e.g., :repo, *rest). +type Vars map[string]string + +// HandlerFunc is the router↔handler function contract. +type HandlerFunc func(http.ResponseWriter, *http.Request, Vars) diff --git a/forged/internal/ipc/git2c/client.go b/forged/internal/ipc/git2c/client.go new file mode 100644 index 0000000..8b11035 --- /dev/null +++ b/forged/internal/ipc/git2c/client.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package git2c + +import ( + "context" + "fmt" + "net" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/bare" +) + +// Client represents a connection to the git2d backend daemon. +type Client struct { + socketPath string + conn net.Conn + writer *bare.Writer + reader *bare.Reader +} + +// NewClient establishes a connection to a git2d socket and returns a new Client. +func NewClient(ctx context.Context, socketPath string) (*Client, error) { + dialer := &net.Dialer{} //exhaustruct:ignore + conn, err := dialer.DialContext(ctx, "unix", socketPath) + if err != nil { + return nil, fmt.Errorf("git2d connection failed: %w", err) + } + + writer := bare.NewWriter(conn) + reader := bare.NewReader(conn) + + return &Client{ + socketPath: socketPath, + conn: conn, + writer: writer, + reader: reader, + }, nil +} + +// Close terminates the underlying socket connection. +func (c *Client) Close() (err error) { + if c.conn != nil { + err = c.conn.Close() + if err != nil { + return fmt.Errorf("close underlying socket: %w", err) + } + } + return nil +} diff --git a/forged/internal/ipc/git2c/cmd_index.go b/forged/internal/ipc/git2c/cmd_index.go new file mode 100644 index 0000000..e9fc435 --- /dev/null +++ b/forged/internal/ipc/git2c/cmd_index.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package git2c + +import ( + "encoding/hex" + "errors" + "fmt" + "io" +) + +// CmdIndex requests a repository index from git2d and returns the list of commits +// and the contents of a README file if available. +func (c *Client) CmdIndex(repoPath string) ([]Commit, *FilenameContents, error) { + err := c.writer.WriteData([]byte(repoPath)) + if err != nil { + return nil, nil, fmt.Errorf("sending repo path failed: %w", err) + } + err = c.writer.WriteUint(1) + if err != nil { + return nil, nil, fmt.Errorf("sending command failed: %w", err) + } + + status, err := c.reader.ReadUint() + if err != nil { + return nil, nil, fmt.Errorf("reading status failed: %w", err) + } + if status != 0 { + return nil, nil, fmt.Errorf("git2d error: %d", status) + } + + // README + readmeRaw, err := c.reader.ReadData() + if err != nil { + readmeRaw = nil + } + + readmeFilename := "README.md" // TODO + readme := &FilenameContents{Filename: readmeFilename, Content: readmeRaw} + + // Commits + var commits []Commit + for { + id, err := c.reader.ReadData() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, nil, fmt.Errorf("reading commit ID failed: %w", err) + } + title, _ := c.reader.ReadData() + authorName, _ := c.reader.ReadData() + authorEmail, _ := c.reader.ReadData() + authorDate, _ := c.reader.ReadData() + + commits = append(commits, Commit{ + Hash: hex.EncodeToString(id), + Author: string(authorName), + Email: string(authorEmail), + Date: string(authorDate), + Message: string(title), + }) + } + + return commits, readme, nil +} diff --git a/forged/internal/ipc/git2c/cmd_treeraw.go b/forged/internal/ipc/git2c/cmd_treeraw.go new file mode 100644 index 0000000..89b702c --- /dev/null +++ b/forged/internal/ipc/git2c/cmd_treeraw.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package git2c + +import ( + "errors" + "fmt" + "io" +) + +// CmdTreeRaw queries git2d for a tree or blob object at the given path within the repository. +// It returns either a directory listing or the contents of a file. +func (c *Client) CmdTreeRaw(repoPath, pathSpec string) ([]TreeEntry, string, error) { + err := c.writer.WriteData([]byte(repoPath)) + if err != nil { + return nil, "", fmt.Errorf("sending repo path failed: %w", err) + } + err = c.writer.WriteUint(2) + if err != nil { + return nil, "", fmt.Errorf("sending command failed: %w", err) + } + err = c.writer.WriteData([]byte(pathSpec)) + if err != nil { + return nil, "", fmt.Errorf("sending path failed: %w", err) + } + + status, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("reading status failed: %w", err) + } + + switch status { + case 0: + kind, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("reading object kind failed: %w", err) + } + + switch kind { + case 1: + // Tree + count, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("reading entry count failed: %w", err) + } + + var files []TreeEntry + for range count { + typeCode, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("error reading entry type: %w", err) + } + mode, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("error reading entry mode: %w", err) + } + size, err := c.reader.ReadUint() + if err != nil { + return nil, "", fmt.Errorf("error reading entry size: %w", err) + } + name, err := c.reader.ReadData() + if err != nil { + return nil, "", fmt.Errorf("error reading entry name: %w", err) + } + + files = append(files, TreeEntry{ + Name: string(name), + Mode: fmt.Sprintf("%06o", mode), + Size: size, + IsFile: typeCode == 2, + IsSubtree: typeCode == 1, + }) + } + + return files, "", nil + + case 2: + // Blob + content, err := c.reader.ReadData() + if err != nil && !errors.Is(err, io.EOF) { + return nil, "", fmt.Errorf("error reading file content: %w", err) + } + + return nil, string(content), nil + + default: + return nil, "", fmt.Errorf("unknown kind: %d", kind) + } + + case 3: + return nil, "", fmt.Errorf("path not found: %s", pathSpec) + + default: + return nil, "", fmt.Errorf("unknown status code: %d", status) + } +} diff --git a/forged/internal/ipc/git2c/doc.go b/forged/internal/ipc/git2c/doc.go new file mode 100644 index 0000000..e14dae0 --- /dev/null +++ b/forged/internal/ipc/git2c/doc.go @@ -0,0 +1,2 @@ +// Package git2c provides routines to interact with the git2d backend daemon. +package git2c diff --git a/forged/internal/ipc/git2c/git_types.go b/forged/internal/ipc/git2c/git_types.go new file mode 100644 index 0000000..bf13f05 --- /dev/null +++ b/forged/internal/ipc/git2c/git_types.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package git2c + +// Commit represents a single commit object retrieved from the git2d daemon. +type Commit struct { + Hash string + Author string + Email string + Date string + Message string +} + +// FilenameContents holds the filename and byte contents of a file, such as a README. +type FilenameContents struct { + Filename string + Content []byte +} + +// TreeEntry represents a file or directory entry within a Git tree object. +type TreeEntry struct { + Name string + Mode string + Size uint64 + IsFile bool + IsSubtree bool +} diff --git a/forged/internal/ipc/git2c/perror.go b/forged/internal/ipc/git2c/perror.go new file mode 100644 index 0000000..6bc7595 --- /dev/null +++ b/forged/internal/ipc/git2c/perror.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +// TODO: Make the C part report detailed error messages too + +package git2c + +import "errors" + +var ( + ErrUnknown = errors.New("git2c: unknown error") + ErrPath = errors.New("git2c: get tree entry by path failed") + ErrRevparse = errors.New("git2c: revparse failed") + ErrReadme = errors.New("git2c: no readme") + ErrBlobExpected = errors.New("git2c: blob expected") + ErrEntryToObject = errors.New("git2c: tree entry to object conversion failed") + ErrBlobRawContent = errors.New("git2c: get blob raw content failed") + ErrRevwalk = errors.New("git2c: revwalk failed") + ErrRevwalkPushHead = errors.New("git2c: revwalk push head failed") + ErrBareProto = errors.New("git2c: bare protocol error") +) + +func Perror(errno uint) error { + switch errno { + case 0: + return nil + case 3: + return ErrPath + case 4: + return ErrRevparse + case 5: + return ErrReadme + case 6: + return ErrBlobExpected + case 7: + return ErrEntryToObject + case 8: + return ErrBlobRawContent + case 9: + return ErrRevwalk + case 10: + return ErrRevwalkPushHead + case 11: + return ErrBareProto + } + return ErrUnknown +} diff --git a/forged/internal/ipc/irc/bot.go b/forged/internal/ipc/irc/bot.go new file mode 100644 index 0000000..07008ae --- /dev/null +++ b/forged/internal/ipc/irc/bot.go @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package irc + +import ( + "context" + "crypto/tls" + "fmt" + "log/slog" + "net" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +// Bot represents an IRC bot client that handles events and allows for sending messages. +type Bot struct { + // TODO: Use each config field instead of embedding Config here. + config *Config + ircSendBuffered chan string + ircSendDirectChan chan misc.ErrorBack[string] +} + +// NewBot creates a new Bot instance using the provided configuration. +func NewBot(c *Config) (b *Bot) { + b = &Bot{ + config: c, + } //exhaustruct:ignore + return +} + +// Connect establishes a new IRC session and starts handling incoming and outgoing messages. +// This method blocks until an error occurs or the connection is closed. +func (b *Bot) Connect(ctx context.Context) error { + var err error + var underlyingConn net.Conn + if b.config.TLS { + dialer := tls.Dialer{} //exhaustruct:ignore + underlyingConn, err = dialer.DialContext(ctx, b.config.Net, b.config.Addr) + } else { + dialer := net.Dialer{} //exhaustruct:ignore + underlyingConn, err = dialer.DialContext(ctx, b.config.Net, b.config.Addr) + } + if err != nil { + return fmt.Errorf("dialing irc: %w", err) + } + defer func() { + _ = underlyingConn.Close() + }() + + conn := NewConn(underlyingConn) + + logAndWriteLn := func(s string) (n int, err error) { + slog.Debug("irc tx", "line", s) + return conn.WriteString(s + "\r\n") + } + + _, err = logAndWriteLn("NICK " + b.config.Nick) + if err != nil { + return err + } + _, err = logAndWriteLn("USER " + b.config.User + " 0 * :" + b.config.Gecos) + if err != nil { + return err + } + + readLoopError := make(chan error) + writeLoopAbort := make(chan struct{}) + go func() { + for { + select { + case <-writeLoopAbort: + return + default: + } + + msg, line, err := conn.ReadMessage() + if err != nil { + readLoopError <- err + return + } + + slog.Debug("irc rx", "line", line) + + switch msg.Command { + case "001": + _, err = logAndWriteLn("JOIN #chat") + if err != nil { + readLoopError <- err + return + } + case "PING": + _, err = logAndWriteLn("PONG :" + msg.Args[0]) + if err != nil { + readLoopError <- err + return + } + case "JOIN": + c, ok := msg.Source.(Client) + if !ok { + slog.Error("unable to convert source of JOIN to client") + } + if c.Nick != b.config.Nick { + continue + } + default: + } + } + }() + + for { + select { + case err = <-readLoopError: + return err + case line := <-b.ircSendBuffered: + _, err = logAndWriteLn(line) + if err != nil { + select { + case b.ircSendBuffered <- line: + default: + slog.Error("unable to requeue message", "line", line) + } + writeLoopAbort <- struct{}{} + return err + } + case lineErrorBack := <-b.ircSendDirectChan: + _, err = logAndWriteLn(lineErrorBack.Content) + lineErrorBack.ErrorChan <- err + if err != nil { + writeLoopAbort <- struct{}{} + return err + } + } + } +} + +// SendDirect sends an IRC message directly to the connection and bypasses +// the buffering system. +func (b *Bot) SendDirect(line string) error { + ech := make(chan error, 1) + + b.ircSendDirectChan <- misc.ErrorBack[string]{ + Content: line, + ErrorChan: ech, + } + + return <-ech +} + +// Send queues a message to be sent asynchronously via the buffered send queue. +// If the queue is full, the message is dropped and an error is logged. +func (b *Bot) Send(line string) { + select { + case b.ircSendBuffered <- line: + default: + slog.Error("irc sendq full", "line", line) + } +} + +// ConnectLoop continuously attempts to maintain an IRC session. +// If the connection drops, it automatically retries with no delay. +func (b *Bot) ConnectLoop(ctx context.Context) { + b.ircSendBuffered = make(chan string, b.config.SendQ) + b.ircSendDirectChan = make(chan misc.ErrorBack[string]) + + for { + err := b.Connect(ctx) + slog.Error("irc session error", "error", err) + } +} diff --git a/forged/internal/ipc/irc/config.go b/forged/internal/ipc/irc/config.go new file mode 100644 index 0000000..b1b5703 --- /dev/null +++ b/forged/internal/ipc/irc/config.go @@ -0,0 +1,13 @@ +package irc + +// Config contains IRC connection and identity settings for the bot. +// This should usually be a part of the primary config struct. +type Config struct { + Net string `scfg:"net"` + Addr string `scfg:"addr"` + TLS bool `scfg:"tls"` + SendQ uint `scfg:"sendq"` + Nick string `scfg:"nick"` + User string `scfg:"user"` + Gecos string `scfg:"gecos"` +} diff --git a/forged/internal/ipc/irc/conn.go b/forged/internal/ipc/irc/conn.go new file mode 100644 index 0000000..b9b208c --- /dev/null +++ b/forged/internal/ipc/irc/conn.go @@ -0,0 +1,58 @@ +package irc + +import ( + "bufio" + "fmt" + "net" + "slices" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +type Conn struct { + netConn net.Conn + bufReader *bufio.Reader +} + +func NewConn(netConn net.Conn) Conn { + return Conn{ + netConn: netConn, + bufReader: bufio.NewReader(netConn), + } +} + +func (c *Conn) ReadMessage() (msg Message, line string, err error) { + raw, err := c.bufReader.ReadSlice('\n') + if err != nil { + return + } + + if raw[len(raw)-1] == '\n' { + raw = raw[:len(raw)-1] + } + if raw[len(raw)-1] == '\r' { + raw = raw[:len(raw)-1] + } + + lineBytes := slices.Clone(raw) + line = misc.BytesToString(lineBytes) + msg, err = Parse(lineBytes) + + return +} + +func (c *Conn) Write(p []byte) (n int, err error) { + n, err = c.netConn.Write(p) + if err != nil { + err = fmt.Errorf("write to connection: %w", err) + } + return n, err +} + +func (c *Conn) WriteString(s string) (n int, err error) { + n, err = c.netConn.Write(misc.StringToBytes(s)) + if err != nil { + err = fmt.Errorf("write to connection: %w", err) + } + return n, err +} diff --git a/forged/internal/ipc/irc/doc.go b/forged/internal/ipc/irc/doc.go new file mode 100644 index 0000000..dcfca82 --- /dev/null +++ b/forged/internal/ipc/irc/doc.go @@ -0,0 +1,2 @@ +// Package irc provides basic IRC bot functionality. +package irc diff --git a/forged/internal/ipc/irc/errors.go b/forged/internal/ipc/irc/errors.go new file mode 100644 index 0000000..3506c70 --- /dev/null +++ b/forged/internal/ipc/irc/errors.go @@ -0,0 +1,8 @@ +package irc + +import "errors" + +var ( + ErrInvalidIRCv3Tag = errors.New("invalid ircv3 tag") + ErrMalformedMsg = errors.New("malformed irc message") +) diff --git a/forged/internal/ipc/irc/message.go b/forged/internal/ipc/irc/message.go new file mode 100644 index 0000000..3387bec --- /dev/null +++ b/forged/internal/ipc/irc/message.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2018-2024 luk3yx +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package irc + +import ( + "bytes" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +type Message struct { + Command string + Source Source + Tags map[string]string + Args []string +} + +// All strings returned are borrowed from the input byte slice. +func Parse(raw []byte) (msg Message, err error) { + sp := bytes.Split(raw, []byte{' '}) // TODO: Use bytes.Cut instead here + + if bytes.HasPrefix(sp[0], []byte{'@'}) { // TODO: Check size manually + if len(sp[0]) < 2 { + err = ErrMalformedMsg + return msg, err + } + sp[0] = sp[0][1:] + + msg.Tags, err = tagsToMap(sp[0]) + if err != nil { + return msg, err + } + + if len(sp) < 2 { + err = ErrMalformedMsg + return msg, err + } + sp = sp[1:] + } else { + msg.Tags = nil // TODO: Is a nil map the correct thing to use here? + } + + if bytes.HasPrefix(sp[0], []byte{':'}) { // TODO: Check size manually + if len(sp[0]) < 2 { + err = ErrMalformedMsg + return msg, err + } + sp[0] = sp[0][1:] + + msg.Source = parseSource(sp[0]) + + if len(sp) < 2 { + err = ErrMalformedMsg + return msg, err + } + sp = sp[1:] + } + + msg.Command = misc.BytesToString(sp[0]) + if len(sp) < 2 { + return msg, err + } + sp = sp[1:] + + for i := 0; i < len(sp); i++ { + if len(sp[i]) == 0 { + continue + } + if sp[i][0] == ':' { + if len(sp[i]) < 2 { + sp[i] = []byte{} + } else { + sp[i] = sp[i][1:] + } + msg.Args = append(msg.Args, misc.BytesToString(bytes.Join(sp[i:], []byte{' '}))) + // TODO: Avoid Join by not using sp in the first place + break + } + msg.Args = append(msg.Args, misc.BytesToString(sp[i])) + } + + return msg, err +} + +var ircv3TagEscapes = map[byte]byte{ //nolint:gochecknoglobals + ':': ';', + 's': ' ', + 'r': '\r', + 'n': '\n', +} + +func tagsToMap(raw []byte) (tags map[string]string, err error) { + tags = make(map[string]string) + for rawTag := range bytes.SplitSeq(raw, []byte{';'}) { + key, value, found := bytes.Cut(rawTag, []byte{'='}) + if !found { + err = ErrInvalidIRCv3Tag + return tags, err + } + if len(value) == 0 { + tags[misc.BytesToString(key)] = "" + } else { + if !bytes.Contains(value, []byte{'\\'}) { + tags[misc.BytesToString(key)] = misc.BytesToString(value) + } else { + valueUnescaped := bytes.NewBuffer(make([]byte, 0, len(value))) + for i := 0; i < len(value); i++ { + if value[i] == '\\' { + i++ + byteUnescaped, ok := ircv3TagEscapes[value[i]] + if !ok { + byteUnescaped = value[i] + } + valueUnescaped.WriteByte(byteUnescaped) + } else { + valueUnescaped.WriteByte(value[i]) + } + } + tags[misc.BytesToString(key)] = misc.BytesToString(valueUnescaped.Bytes()) + } + } + } + return tags, err +} diff --git a/forged/internal/ipc/irc/source.go b/forged/internal/ipc/irc/source.go new file mode 100644 index 0000000..938751f --- /dev/null +++ b/forged/internal/ipc/irc/source.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +package irc + +import ( + "bytes" + + "go.lindenii.runxiyu.org/forge/forged/internal/common/misc" +) + +type Source interface { + AsSourceString() string +} + +//nolint:ireturn +func parseSource(s []byte) Source { + nick, userhost, found := bytes.Cut(s, []byte{'!'}) + if !found { + return Server{name: misc.BytesToString(s)} + } + + user, host, found := bytes.Cut(userhost, []byte{'@'}) + if !found { + return Server{name: misc.BytesToString(s)} + } + + return Client{ + Nick: misc.BytesToString(nick), + User: misc.BytesToString(user), + Host: misc.BytesToString(host), + } +} + +type Server struct { + name string +} + +func (s Server) AsSourceString() string { + return s.name +} + +type Client struct { + Nick string + User string + Host string +} + +func (c Client) AsSourceString() string { + return c.Nick + "!" + c.User + "@" + c.Host +} diff --git a/forged/internal/irc/bot.go b/forged/internal/irc/bot.go deleted file mode 100644 index 1c6d32f..0000000 --- a/forged/internal/irc/bot.go +++ /dev/null @@ -1,176 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package irc provides basic IRC bot functionality. -package irc - -import ( - "crypto/tls" - "log/slog" - "net" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -// Config contains IRC connection and identity settings for the bot. -// This should usually be a part of the primary config struct. -type Config struct { - Net string `scfg:"net"` - Addr string `scfg:"addr"` - TLS bool `scfg:"tls"` - SendQ uint `scfg:"sendq"` - Nick string `scfg:"nick"` - User string `scfg:"user"` - Gecos string `scfg:"gecos"` -} - -// Bot represents an IRC bot client that handles events and allows for sending messages. -type Bot struct { - config *Config - ircSendBuffered chan string - ircSendDirectChan chan misc.ErrorBack[string] -} - -// NewBot creates a new Bot instance using the provided configuration. -func NewBot(c *Config) (b *Bot) { - b = &Bot{ - config: c, - } - return -} - -// Connect establishes a new IRC session and starts handling incoming and outgoing messages. -// This method blocks until an error occurs or the connection is closed. -func (b *Bot) Connect() error { - var err error - var underlyingConn net.Conn - if b.config.TLS { - underlyingConn, err = tls.Dial(b.config.Net, b.config.Addr, nil) - } else { - underlyingConn, err = net.Dial(b.config.Net, b.config.Addr) - } - if err != nil { - return err - } - defer underlyingConn.Close() - - conn := NewConn(underlyingConn) - - logAndWriteLn := func(s string) (n int, err error) { - slog.Debug("irc tx", "line", s) - return conn.WriteString(s + "\r\n") - } - - _, err = logAndWriteLn("NICK " + b.config.Nick) - if err != nil { - return err - } - _, err = logAndWriteLn("USER " + b.config.User + " 0 * :" + b.config.Gecos) - if err != nil { - return err - } - - readLoopError := make(chan error) - writeLoopAbort := make(chan struct{}) - go func() { - for { - select { - case <-writeLoopAbort: - return - default: - } - - msg, line, err := conn.ReadMessage() - if err != nil { - readLoopError <- err - return - } - - slog.Debug("irc rx", "line", line) - - switch msg.Command { - case "001": - _, err = logAndWriteLn("JOIN #chat") - if err != nil { - readLoopError <- err - return - } - case "PING": - _, err = logAndWriteLn("PONG :" + msg.Args[0]) - if err != nil { - readLoopError <- err - return - } - case "JOIN": - c, ok := msg.Source.(Client) - if !ok { - slog.Error("unable to convert source of JOIN to client") - } - if c.Nick != b.config.Nick { - continue - } - default: - } - } - }() - - for { - select { - case err = <-readLoopError: - return err - case line := <-b.ircSendBuffered: - _, err = logAndWriteLn(line) - if err != nil { - select { - case b.ircSendBuffered <- line: - default: - slog.Error("unable to requeue message", "line", line) - } - writeLoopAbort <- struct{}{} - return err - } - case lineErrorBack := <-b.ircSendDirectChan: - _, err = logAndWriteLn(lineErrorBack.Content) - lineErrorBack.ErrorChan <- err - if err != nil { - writeLoopAbort <- struct{}{} - return err - } - } - } -} - -// SendDirect sends an IRC message directly to the connection and bypasses -// the buffering system. -func (b *Bot) SendDirect(line string) error { - ech := make(chan error, 1) - - b.ircSendDirectChan <- misc.ErrorBack[string]{ - Content: line, - ErrorChan: ech, - } - - return <-ech -} - -// Send queues a message to be sent asynchronously via the buffered send queue. -// If the queue is full, the message is dropped and an error is logged. -func (b *Bot) Send(line string) { - select { - case b.ircSendBuffered <- line: - default: - slog.Error("irc sendq full", "line", line) - } -} - -// ConnectLoop continuously attempts to maintain an IRC session. -// If the connection drops, it automatically retries with no delay. -func (b *Bot) ConnectLoop() { - b.ircSendBuffered = make(chan string, b.config.SendQ) - b.ircSendDirectChan = make(chan misc.ErrorBack[string]) - - for { - err := b.Connect() - slog.Error("irc session error", "error", err) - } -} diff --git a/forged/internal/irc/conn.go b/forged/internal/irc/conn.go deleted file mode 100644 index b975b72..0000000 --- a/forged/internal/irc/conn.go +++ /dev/null @@ -1,49 +0,0 @@ -package irc - -import ( - "bufio" - "net" - "slices" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -type Conn struct { - netConn net.Conn - bufReader *bufio.Reader -} - -func NewConn(netConn net.Conn) Conn { - return Conn{ - netConn: netConn, - bufReader: bufio.NewReader(netConn), - } -} - -func (c *Conn) ReadMessage() (msg Message, line string, err error) { - raw, err := c.bufReader.ReadSlice('\n') - if err != nil { - return - } - - if raw[len(raw)-1] == '\n' { - raw = raw[:len(raw)-1] - } - if raw[len(raw)-1] == '\r' { - raw = raw[:len(raw)-1] - } - - lineBytes := slices.Clone(raw) - line = misc.BytesToString(lineBytes) - msg, err = Parse(lineBytes) - - return -} - -func (c *Conn) Write(p []byte) (n int, err error) { - return c.netConn.Write(p) -} - -func (c *Conn) WriteString(s string) (n int, err error) { - return c.netConn.Write(misc.StringToBytes(s)) -} diff --git a/forged/internal/irc/errors.go b/forged/internal/irc/errors.go deleted file mode 100644 index 3506c70..0000000 --- a/forged/internal/irc/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package irc - -import "errors" - -var ( - ErrInvalidIRCv3Tag = errors.New("invalid ircv3 tag") - ErrMalformedMsg = errors.New("malformed irc message") -) diff --git a/forged/internal/irc/message.go b/forged/internal/irc/message.go deleted file mode 100644 index 84b6867..0000000 --- a/forged/internal/irc/message.go +++ /dev/null @@ -1,126 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2018-2024 luk3yx -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package irc - -import ( - "bytes" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -type Message struct { - Command string - Source Source - Tags map[string]string - Args []string -} - -// All strings returned are borrowed from the input byte slice. -func Parse(raw []byte) (msg Message, err error) { - sp := bytes.Split(raw, []byte{' '}) // TODO: Use bytes.Cut instead here - - if bytes.HasPrefix(sp[0], []byte{'@'}) { // TODO: Check size manually - if len(sp[0]) < 2 { - err = ErrMalformedMsg - return - } - sp[0] = sp[0][1:] - - msg.Tags, err = tagsToMap(sp[0]) - if err != nil { - return - } - - if len(sp) < 2 { - err = ErrMalformedMsg - return - } - sp = sp[1:] - } else { - msg.Tags = nil // TODO: Is a nil map the correct thing to use here? - } - - if bytes.HasPrefix(sp[0], []byte{':'}) { // TODO: Check size manually - if len(sp[0]) < 2 { - err = ErrMalformedMsg - return - } - sp[0] = sp[0][1:] - - msg.Source = parseSource(sp[0]) - - if len(sp) < 2 { - err = ErrMalformedMsg - return - } - sp = sp[1:] - } - - msg.Command = misc.BytesToString(sp[0]) - if len(sp) < 2 { - return - } - sp = sp[1:] - - for i := 0; i < len(sp); i++ { - if len(sp[i]) == 0 { - continue - } - if sp[i][0] == ':' { - if len(sp[i]) < 2 { - sp[i] = []byte{} - } else { - sp[i] = sp[i][1:] - } - msg.Args = append(msg.Args, misc.BytesToString(bytes.Join(sp[i:], []byte{' '}))) - // TODO: Avoid Join by not using sp in the first place - break - } - msg.Args = append(msg.Args, misc.BytesToString(sp[i])) - } - - return -} - -var ircv3TagEscapes = map[byte]byte{ //nolint:gochecknoglobals - ':': ';', - 's': ' ', - 'r': '\r', - 'n': '\n', -} - -func tagsToMap(raw []byte) (tags map[string]string, err error) { - tags = make(map[string]string) - for rawTag := range bytes.SplitSeq(raw, []byte{';'}) { - key, value, found := bytes.Cut(rawTag, []byte{'='}) - if !found { - err = ErrInvalidIRCv3Tag - return - } - if len(value) == 0 { - tags[misc.BytesToString(key)] = "" - } else { - if !bytes.Contains(value, []byte{'\\'}) { - tags[misc.BytesToString(key)] = misc.BytesToString(value) - } else { - valueUnescaped := bytes.NewBuffer(make([]byte, 0, len(value))) - for i := 0; i < len(value); i++ { - if value[i] == '\\' { - i++ - byteUnescaped, ok := ircv3TagEscapes[value[i]] - if !ok { - byteUnescaped = value[i] - } - valueUnescaped.WriteByte(byteUnescaped) - } else { - valueUnescaped.WriteByte(value[i]) - } - } - tags[misc.BytesToString(key)] = misc.BytesToString(valueUnescaped.Bytes()) - } - } - } - return -} diff --git a/forged/internal/irc/source.go b/forged/internal/irc/source.go deleted file mode 100644 index d955f45..0000000 --- a/forged/internal/irc/source.go +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package irc - -import ( - "bytes" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -type Source interface { - AsSourceString() string -} - -func parseSource(s []byte) Source { - nick, userhost, found := bytes.Cut(s, []byte{'!'}) - if !found { - return Server{name: misc.BytesToString(s)} - } - - user, host, found := bytes.Cut(userhost, []byte{'@'}) - if !found { - return Server{name: misc.BytesToString(s)} - } - - return Client{ - Nick: misc.BytesToString(nick), - User: misc.BytesToString(user), - Host: misc.BytesToString(host), - } -} - -type Server struct { - name string -} - -func (s Server) AsSourceString() string { - return s.name -} - -type Client struct { - Nick string - User string - Host string -} - -func (c Client) AsSourceString() string { - return c.Nick + "!" + c.User + "@" + c.Host -} diff --git a/forged/internal/misc/back.go b/forged/internal/misc/back.go deleted file mode 100644 index 5351359..0000000 --- a/forged/internal/misc/back.go +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -// ErrorBack wraps a value and a channel for communicating an associated error. -// Typically used to get an error response after sending data across a channel. -type ErrorBack[T any] struct { - Content T - ErrorChan chan error -} diff --git a/forged/internal/misc/deploy.go b/forged/internal/misc/deploy.go deleted file mode 100644 index 3ee5f92..0000000 --- a/forged/internal/misc/deploy.go +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -import ( - "io" - "io/fs" - "os" -) - -// DeployBinary copies the contents of a binary file to the target destination path. -// The destination file is created with executable permissions. -func DeployBinary(src fs.File, dst string) (err error) { - var dstFile *os.File - if dstFile, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755); err != nil { - return err - } - defer dstFile.Close() - _, err = io.Copy(dstFile, src) - return err -} diff --git a/forged/internal/misc/iter.go b/forged/internal/misc/iter.go deleted file mode 100644 index 61a96f4..0000000 --- a/forged/internal/misc/iter.go +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -import "iter" - -// iterSeqLimit returns an iterator equivalent to the supplied one, but stops -// after n iterations. -func IterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] { - return func(yield func(T) bool) { - var iterations uint - for v := range s { - if iterations > n-1 { - return - } - if !yield(v) { - return - } - iterations++ - } - } -} diff --git a/forged/internal/misc/misc.go b/forged/internal/misc/misc.go deleted file mode 100644 index 398020a..0000000 --- a/forged/internal/misc/misc.go +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package misc provides miscellaneous functions and other definitions. -package misc - -import "strings" - -// sliceContainsNewlines returns true if and only if the given slice contains -// one or more strings that contains newlines. -func SliceContainsNewlines(s []string) bool { - for _, v := range s { - if strings.Contains(v, "\n") { - return true - } - } - return false -} diff --git a/forged/internal/misc/panic.go b/forged/internal/misc/panic.go deleted file mode 100644 index 34c49c5..0000000 --- a/forged/internal/misc/panic.go +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -// FirstOrPanic returns the value or panics if the error is non-nil. -func FirstOrPanic[T any](v T, err error) T { - if err != nil { - panic(err) - } - return v -} - -// NoneOrPanic panics if the provided error is non-nil. -func NoneOrPanic(err error) { - if err != nil { - panic(err) - } -} diff --git a/forged/internal/misc/trivial.go b/forged/internal/misc/trivial.go deleted file mode 100644 index e59c17e..0000000 --- a/forged/internal/misc/trivial.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -import ( - "net/url" - "strings" -) - -// These are all trivial functions that are intended to be used in HTML -// templates. - -// FirstLine returns the first line of a string. -func FirstLine(s string) string { - before, _, _ := strings.Cut(s, "\n") - return before -} - -// PathEscape escapes the input as an URL path segment. -func PathEscape(s string) string { - return url.PathEscape(s) -} - -// QueryEscape escapes the input as an URL query segment. -func QueryEscape(s string) string { - return url.QueryEscape(s) -} - -// Dereference dereferences a pointer. -func Dereference[T any](p *T) T { - return *p -} - -// DereferenceOrZero dereferences a pointer. If the pointer is nil, the zero -// value of its associated type is returned instead. -func DereferenceOrZero[T any](p *T) T { - if p != nil { - return *p - } - var z T - return z -} - -// Minus subtracts two numbers. -func Minus(a, b int) int { - return a - b -} diff --git a/forged/internal/misc/unsafe.go b/forged/internal/misc/unsafe.go deleted file mode 100644 index 6c2192f..0000000 --- a/forged/internal/misc/unsafe.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -import "unsafe" - -// StringToBytes converts a string to a byte slice without copying the string. -// Memory is borrowed from the string. -// The resulting byte slice must not be modified in any form. -func StringToBytes(s string) (bytes []byte) { - return unsafe.Slice(unsafe.StringData(s), len(s)) -} - -// BytesToString converts a byte slice to a string without copying the bytes. -// Memory is borrowed from the byte slice. -// The source byte slice must not be modified. -func BytesToString(b []byte) string { - return unsafe.String(unsafe.SliceData(b), len(b)) -} diff --git a/forged/internal/misc/url.go b/forged/internal/misc/url.go deleted file mode 100644 index 346ff76..0000000 --- a/forged/internal/misc/url.go +++ /dev/null @@ -1,118 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package misc - -import ( - "net/http" - "net/url" - "strings" -) - -// ParseReqURI parses an HTTP request URL, and returns a slice of path segments -// and the query parameters. It handles %2F correctly. -func ParseReqURI(requestURI string) (segments []string, params url.Values, err error) { - path, paramsStr, _ := strings.Cut(requestURI, "?") - - segments, err = PathToSegments(path) - if err != nil { - return - } - - params, err = url.ParseQuery(paramsStr) - return -} - -// PathToSegments splits a path into unescaped segments. It handles %2F correctly. -func PathToSegments(path string) (segments []string, err error) { - segments = strings.Split(strings.TrimPrefix(path, "/"), "/") - - for i, segment := range segments { - segments[i], err = url.PathUnescape(segment) - if err != nil { - return - } - } - - return -} - -// RedirectDir returns true and redirects the user to a version of the URL with -// a trailing slash, if and only if the request URL does not already have a -// trailing slash. -func RedirectDir(writer http.ResponseWriter, request *http.Request) bool { - requestURI := request.RequestURI - - pathEnd := strings.IndexAny(requestURI, "?#") - var path, rest string - if pathEnd == -1 { - path = requestURI - } else { - path = requestURI[:pathEnd] - rest = requestURI[pathEnd:] - } - - if !strings.HasSuffix(path, "/") { - http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther) - return true - } - return false -} - -// RedirectNoDir returns true and redirects the user to a version of the URL -// without a trailing slash, if and only if the request URL has a trailing -// slash. -func RedirectNoDir(writer http.ResponseWriter, request *http.Request) bool { - requestURI := request.RequestURI - - pathEnd := strings.IndexAny(requestURI, "?#") - var path, rest string - if pathEnd == -1 { - path = requestURI - } else { - path = requestURI[:pathEnd] - rest = requestURI[pathEnd:] - } - - if strings.HasSuffix(path, "/") { - http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther) - return true - } - return false -} - -// RedirectUnconditionally unconditionally redirects the user back to the -// current page while preserving query parameters. -func RedirectUnconditionally(writer http.ResponseWriter, request *http.Request) { - requestURI := request.RequestURI - - pathEnd := strings.IndexAny(requestURI, "?#") - var path, rest string - if pathEnd == -1 { - path = requestURI - } else { - path = requestURI[:pathEnd] - rest = requestURI[pathEnd:] - } - - http.Redirect(writer, request, path+rest, http.StatusSeeOther) -} - -// SegmentsToURL joins URL segments to the path component of a URL. -// Each segment is escaped properly first. -func SegmentsToURL(segments []string) string { - for i, segment := range segments { - segments[i] = url.PathEscape(segment) - } - return strings.Join(segments, "/") -} - -// AnyContain returns true if and only if ss contains a string that contains c. -func AnyContain(ss []string, c string) bool { - for _, s := range ss { - if strings.Contains(s, c) { - return true - } - } - return false -} diff --git a/forged/internal/oldgit/fmtpatch.go b/forged/internal/oldgit/fmtpatch.go deleted file mode 100644 index 79be5d8..0000000 --- a/forged/internal/oldgit/fmtpatch.go +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package oldgit - -import ( - "bytes" - "fmt" - "strings" - "time" - - "github.com/go-git/go-git/v5/plumbing/object" -) - -// FmtCommitPatch formats a commit object as if it was returned by -// git-format-patch. -func FmtCommitPatch(commit *object.Commit) (final string, err error) { - var patch *object.Patch - var buf bytes.Buffer - var author object.Signature - var date string - var commitTitle, commitDetails string - - if _, patch, err = CommitToPatch(commit); err != nil { - return "", err - } - - author = commit.Author - date = author.When.Format(time.RFC1123Z) - - commitTitle, commitDetails, _ = strings.Cut(commit.Message, "\n") - - // This date is hardcoded in Git. - fmt.Fprintf(&buf, "From %s Mon Sep 17 00:00:00 2001\n", commit.Hash) - fmt.Fprintf(&buf, "From: %s <%s>\n", author.Name, author.Email) - fmt.Fprintf(&buf, "Date: %s\n", date) - fmt.Fprintf(&buf, "Subject: [PATCH] %s\n\n", commitTitle) - - if commitDetails != "" { - commitDetails1, commitDetails2, _ := strings.Cut(commitDetails, "\n") - if strings.TrimSpace(commitDetails1) == "" { - commitDetails = commitDetails2 - } - buf.WriteString(commitDetails) - buf.WriteString("\n") - } - buf.WriteString("---\n") - fmt.Fprint(&buf, patch.Stats().String()) - fmt.Fprintln(&buf) - - buf.WriteString(patch.String()) - - fmt.Fprintf(&buf, "\n-- \n2.48.1\n") - - return buf.String(), nil -} diff --git a/forged/internal/oldgit/oldgit.go b/forged/internal/oldgit/oldgit.go deleted file mode 100644 index 4c99d6a..0000000 --- a/forged/internal/oldgit/oldgit.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package oldgit provides deprecated functions that depend on go-git. -package oldgit diff --git a/forged/internal/oldgit/patch.go b/forged/internal/oldgit/patch.go deleted file mode 100644 index fc8ef98..0000000 --- a/forged/internal/oldgit/patch.go +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package oldgit - -import ( - "errors" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" -) - -// CommitToPatch creates an [object.Patch] from the first parent of a given -// [object.Commit]. -// -// TODO: This function should be deprecated as it only diffs with the first -// parent and does not correctly handle merge commits. -func CommitToPatch(commit *object.Commit) (parentCommitHash plumbing.Hash, patch *object.Patch, err error) { - var parentCommit *object.Commit - var commitTree *object.Tree - - parentCommit, err = commit.Parent(0) - switch { - case errors.Is(err, object.ErrParentNotFound): - if commitTree, err = commit.Tree(); err != nil { - return - } - if patch, err = NullTree.Patch(commitTree); err != nil { - return - } - case err != nil: - return - default: - parentCommitHash = parentCommit.Hash - if patch, err = parentCommit.Patch(commit); err != nil { - return - } - } - return -} - -// NullTree is a tree object that is empty and has no hash. -var NullTree object.Tree //nolint:gochecknoglobals diff --git a/forged/internal/render/chroma.go b/forged/internal/render/chroma.go deleted file mode 100644 index 64bfde0..0000000 --- a/forged/internal/render/chroma.go +++ /dev/null @@ -1,41 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package render - -import ( - "bytes" - "html/template" - - chromaHTML "github.com/alecthomas/chroma/v2/formatters/html" - chromaLexers "github.com/alecthomas/chroma/v2/lexers" - chromaStyles "github.com/alecthomas/chroma/v2/styles" -) - -// Highlight returns HTML with syntax highlighting for the given file content, -// using Chroma. The lexer is selected based on the filename. -// If tokenization or formatting fails, a fallback
 block is returned with the error.
-func Highlight(filename, content string) template.HTML {
-	lexer := chromaLexers.Match(filename)
-	if lexer == nil {
-		lexer = chromaLexers.Fallback
-	}
-
-	iterator, err := lexer.Tokenise(nil, content)
-	if err != nil {
-		return template.HTML("
Error tokenizing file: " + err.Error() + "
") //#nosec G203` - } - - var buf bytes.Buffer - style := chromaStyles.Get("autumn") - formatter := chromaHTML.New( - chromaHTML.WithClasses(true), - chromaHTML.TabWidth(8), - ) - - if err := formatter.Format(&buf, style, iterator); err != nil { - return template.HTML("
Error formatting file: " + err.Error() + "
") //#nosec G203 - } - - return template.HTML(buf.Bytes()) //#nosec G203 -} diff --git a/forged/internal/render/escape.go b/forged/internal/render/escape.go deleted file mode 100644 index 031e333..0000000 --- a/forged/internal/render/escape.go +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package render - -import ( - "html" - "html/template" -) - -// EscapeHTML just escapes a string and wraps it in [template.HTML]. -func EscapeHTML(s string) template.HTML { - return template.HTML(html.EscapeString(s)) //#nosec G203 -} diff --git a/forged/internal/render/readme.go b/forged/internal/render/readme.go deleted file mode 100644 index fa1be7e..0000000 --- a/forged/internal/render/readme.go +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package render - -import ( - "bytes" - "html" - "html/template" - "strings" - - "github.com/microcosm-cc/bluemonday" - "github.com/yuin/goldmark" - "github.com/yuin/goldmark/extension" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -var markdownConverter = goldmark.New(goldmark.WithExtensions(extension.GFM)) //nolint:gochecknoglobals - -// renderReadme renders and sanitizes README content from a byte slice and filename. -func Readme(data []byte, filename string) (string, template.HTML) { - switch strings.ToLower(filename) { - case "readme": - return "README", template.HTML("
" + html.EscapeString(misc.BytesToString(data)) + "
") //#nosec G203 - case "readme.md": - var buf bytes.Buffer - if err := markdownConverter.Convert(data, &buf); err != nil { - return "Error fetching README", EscapeHTML("Unable to render README: " + err.Error()) - } - return "README.md", template.HTML(bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes())) //#nosec G203 - default: - return filename, template.HTML("
" + html.EscapeString(misc.BytesToString(data)) + "
") //#nosec G203 - } -} diff --git a/forged/internal/render/render.go b/forged/internal/render/render.go deleted file mode 100644 index 465e410..0000000 --- a/forged/internal/render/render.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package render provides functions to render code and READMEs. -package render diff --git a/forged/internal/scfg/.golangci.yaml b/forged/internal/scfg/.golangci.yaml deleted file mode 100644 index 59f1970..0000000 --- a/forged/internal/scfg/.golangci.yaml +++ /dev/null @@ -1,26 +0,0 @@ -linters: - enable-all: true - disable: - - perfsprint - - wsl - - varnamelen - - nlreturn - - exhaustruct - - wrapcheck - - lll - - exhaustive - - intrange - - godox - - nestif - - err113 - - staticcheck - - errorlint - - cyclop - - nonamedreturns - - funlen - - gochecknoglobals - - tenv - -issues: - max-issues-per-linter: 0 - max-same-issues: 0 diff --git a/forged/internal/scfg/reader.go b/forged/internal/scfg/reader.go deleted file mode 100644 index 6a2bedc..0000000 --- a/forged/internal/scfg/reader.go +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser - -package scfg - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -// This limits the max block nesting depth to prevent stack overflows. -const maxNestingDepth = 1000 - -// Load loads a configuration file. -func Load(path string) (Block, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - return Read(f) -} - -// Read parses a configuration file from an io.Reader. -func Read(r io.Reader) (Block, error) { - scanner := bufio.NewScanner(r) - - dec := decoder{scanner: scanner} - block, closingBrace, err := dec.readBlock() - if err != nil { - return nil, err - } else if closingBrace { - return nil, fmt.Errorf("line %v: unexpected '}'", dec.lineno) - } - - return block, scanner.Err() -} - -type decoder struct { - scanner *bufio.Scanner - lineno int - blockDepth int -} - -// readBlock reads a block. closingBrace is true if parsing stopped on '}' -// (otherwise, it stopped on Scanner.Scan). -func (dec *decoder) readBlock() (block Block, closingBrace bool, err error) { - dec.blockDepth++ - defer func() { - dec.blockDepth-- - }() - - if dec.blockDepth >= maxNestingDepth { - return nil, false, fmt.Errorf("exceeded max block depth") - } - - for dec.scanner.Scan() { - dec.lineno++ - - l := dec.scanner.Text() - words, err := splitWords(l) - if err != nil { - return nil, false, fmt.Errorf("line %v: %v", dec.lineno, err) - } else if len(words) == 0 { - continue - } - - if len(words) == 1 && l[len(l)-1] == '}' { - closingBrace = true - break - } - - var d *Directive - if words[len(words)-1] == "{" && l[len(l)-1] == '{' { - words = words[:len(words)-1] - - var name string - params := words - if len(words) > 0 { - name, params = words[0], words[1:] - } - - startLineno := dec.lineno - childBlock, childClosingBrace, err := dec.readBlock() - if err != nil { - return nil, false, err - } else if !childClosingBrace { - return nil, false, fmt.Errorf("line %v: unterminated block", startLineno) - } - - // Allows callers to tell apart "no block" and "empty block" - if childBlock == nil { - childBlock = Block{} - } - - d = &Directive{Name: name, Params: params, Children: childBlock, lineno: dec.lineno} - } else { - d = &Directive{Name: words[0], Params: words[1:], lineno: dec.lineno} - } - block = append(block, d) - } - - return block, closingBrace, nil -} - -func splitWords(l string) ([]string, error) { - var ( - words []string - sb strings.Builder - escape bool - quote rune - wantWSP bool - ) - for _, ch := range l { - switch { - case escape: - sb.WriteRune(ch) - escape = false - case wantWSP && (ch != ' ' && ch != '\t'): - return words, fmt.Errorf("atom not allowed after quoted string") - case ch == '\\': - escape = true - case quote != 0 && ch == quote: - quote = 0 - wantWSP = true - if sb.Len() == 0 { - words = append(words, "") - } - case quote == 0 && len(words) == 0 && sb.Len() == 0 && ch == '#': - return nil, nil - case quote == 0 && (ch == '\'' || ch == '"'): - if sb.Len() > 0 { - return words, fmt.Errorf("quoted string not allowed after atom") - } - quote = ch - case quote == 0 && (ch == ' ' || ch == '\t'): - if sb.Len() > 0 { - words = append(words, sb.String()) - } - sb.Reset() - wantWSP = false - default: - sb.WriteRune(ch) - } - } - if quote != 0 { - return words, fmt.Errorf("unterminated quoted string") - } - if sb.Len() > 0 { - words = append(words, sb.String()) - } - return words, nil -} diff --git a/forged/internal/scfg/scfg.go b/forged/internal/scfg/scfg.go deleted file mode 100644 index 4533e63..0000000 --- a/forged/internal/scfg/scfg.go +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser - -// Package scfg parses and formats configuration files. -// Note that this fork of scfg behaves differently from upstream scfg. -package scfg - -import ( - "fmt" -) - -// Block is a list of directives. -type Block []*Directive - -// GetAll returns a list of directives with the provided name. -func (blk Block) GetAll(name string) []*Directive { - l := make([]*Directive, 0, len(blk)) - for _, child := range blk { - if child.Name == name { - l = append(l, child) - } - } - return l -} - -// Get returns the first directive with the provided name. -func (blk Block) Get(name string) *Directive { - for _, child := range blk { - if child.Name == name { - return child - } - } - return nil -} - -// Directive is a configuration directive. -type Directive struct { - Name string - Params []string - - Children Block - - lineno int -} - -// ParseParams extracts parameters from the directive. It errors out if the -// user hasn't provided enough parameters. -func (d *Directive) ParseParams(params ...*string) error { - if len(d.Params) < len(params) { - return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params)) - } - for i, ptr := range params { - if ptr == nil { - continue - } - *ptr = d.Params[i] - } - return nil -} diff --git a/forged/internal/scfg/struct.go b/forged/internal/scfg/struct.go deleted file mode 100644 index 98ec943..0000000 --- a/forged/internal/scfg/struct.go +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser - -package scfg - -import ( - "fmt" - "reflect" - "strings" - "sync" -) - -// structInfo contains scfg metadata for structs. -type structInfo struct { - param int // index of field storing parameters - children map[string]int // indices of fields storing child directives -} - -var ( - structCacheMutex sync.Mutex - structCache = make(map[reflect.Type]*structInfo) -) - -func getStructInfo(t reflect.Type) (*structInfo, error) { - structCacheMutex.Lock() - defer structCacheMutex.Unlock() - - if info := structCache[t]; info != nil { - return info, nil - } - - info := &structInfo{ - param: -1, - children: make(map[string]int), - } - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Anonymous { - return nil, fmt.Errorf("scfg: anonymous struct fields are not supported") - } else if !f.IsExported() { - continue - } - - tag := f.Tag.Get("scfg") - parts := strings.Split(tag, ",") - k, options := parts[0], parts[1:] - if k == "-" { - continue - } else if k == "" { - k = f.Name - } - - isParam := false - for _, opt := range options { - switch opt { - case "param": - isParam = true - default: - return nil, fmt.Errorf("scfg: invalid option %q in struct tag", opt) - } - } - - if isParam { - if info.param >= 0 { - return nil, fmt.Errorf("scfg: param option specified multiple times in struct tag in %v", t) - } - if parts[0] != "" { - return nil, fmt.Errorf("scfg: name must be empty when param option is specified in struct tag in %v", t) - } - info.param = i - } else { - if _, ok := info.children[k]; ok { - return nil, fmt.Errorf("scfg: key %q specified multiple times in struct tag in %v", k, t) - } - info.children[k] = i - } - } - - structCache[t] = info - return info, nil -} diff --git a/forged/internal/scfg/unmarshal.go b/forged/internal/scfg/unmarshal.go deleted file mode 100644 index 8befc10..0000000 --- a/forged/internal/scfg/unmarshal.go +++ /dev/null @@ -1,375 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package scfg - -import ( - "encoding" - "fmt" - "io" - "reflect" - "strconv" -) - -// Decoder reads and decodes an scfg document from an input stream. -type Decoder struct { - r io.Reader - unknownDirectives []*Directive -} - -// NewDecoder returns a new decoder which reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: r} -} - -// UnknownDirectives returns a slice of all unknown directives encountered -// during Decode. -func (dec *Decoder) UnknownDirectives() []*Directive { - return dec.unknownDirectives -} - -// Decode reads scfg document from the input and stores it in the value pointed -// to by v. -// -// If v is nil or not a pointer, Decode returns an error. -// -// Blocks can be unmarshaled to: -// -// - Maps. Each directive is unmarshaled into a map entry. The map key must -// be a string. -// - Structs. Each directive is unmarshaled into a struct field. -// -// Duplicate directives are not allowed, unless the struct field or map value -// is a slice of values representing a directive: structs or maps. -// -// Directives can be unmarshaled to: -// -// - Maps. The children block is unmarshaled into the map. Parameters are not -// allowed. -// - Structs. The children block is unmarshaled into the struct. Parameters -// are allowed if one of the struct fields contains the "param" option in -// its tag. -// - Slices. Parameters are unmarshaled into the slice. Children blocks are -// not allowed. -// - Arrays. Parameters are unmarshaled into the array. The number of -// parameters must match exactly the length of the array. Children blocks -// are not allowed. -// - Strings, booleans, integers, floating-point values, values implementing -// encoding.TextUnmarshaler. Only a single parameter is allowed and is -// unmarshaled into the value. Children blocks are not allowed. -// -// The decoding of each struct field can be customized by the format string -// stored under the "scfg" key in the struct field's tag. The tag contains the -// name of the field possibly followed by a comma-separated list of options. -// The name may be empty in order to specify options without overriding the -// default field name. As a special case, if the field name is "-", the field -// is ignored. The "param" option specifies that directive parameters are -// stored in this field (the name must be empty). -func (dec *Decoder) Decode(v interface{}) error { - block, err := Read(dec.r) - if err != nil { - return err - } - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return fmt.Errorf("scfg: invalid value for unmarshaling") - } - - return dec.unmarshalBlock(block, rv) -} - -func (dec *Decoder) unmarshalBlock(block Block, v reflect.Value) error { - v = unwrapPointers(v) - t := v.Type() - - dirsByName := make(map[string][]*Directive, len(block)) - for _, dir := range block { - dirsByName[dir.Name] = append(dirsByName[dir.Name], dir) - } - - switch v.Kind() { - case reflect.Map: - if t.Key().Kind() != reflect.String { - return fmt.Errorf("scfg: map key type must be string") - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } else if v.Len() > 0 { - clearMap(v) - } - - for name, dirs := range dirsByName { - mv := reflect.New(t.Elem()).Elem() - if err := dec.unmarshalDirectiveList(dirs, mv); err != nil { - return err - } - v.SetMapIndex(reflect.ValueOf(name), mv) - } - - case reflect.Struct: - si, err := getStructInfo(t) - if err != nil { - return err - } - - seen := make(map[int]bool) - - for name, dirs := range dirsByName { - fieldIndex, ok := si.children[name] - if !ok { - dec.unknownDirectives = append(dec.unknownDirectives, dirs...) - continue - } - fv := v.Field(fieldIndex) - if err := dec.unmarshalDirectiveList(dirs, fv); err != nil { - return err - } - seen[fieldIndex] = true - } - - for name, fieldIndex := range si.children { - if fieldIndex == si.param { - continue - } - if _, ok := seen[fieldIndex]; !ok { - return fmt.Errorf("scfg: missing required directive %q", name) - } - } - - default: - return fmt.Errorf("scfg: unsupported type for unmarshaling blocks: %v", t) - } - - return nil -} - -func (dec *Decoder) unmarshalDirectiveList(dirs []*Directive, v reflect.Value) error { - v = unwrapPointers(v) - t := v.Type() - - if v.Kind() != reflect.Slice || !isDirectiveType(t.Elem()) { - if len(dirs) > 1 { - return newUnmarshalDirectiveError(dirs[1], "directive must not be specified more than once") - } - return dec.unmarshalDirective(dirs[0], v) - } - - sv := reflect.MakeSlice(t, len(dirs), len(dirs)) - for i, dir := range dirs { - if err := dec.unmarshalDirective(dir, sv.Index(i)); err != nil { - return err - } - } - v.Set(sv) - return nil -} - -// isDirectiveType checks whether a type can only be unmarshaled as a -// directive, not as a parameter. Accepting too many types here would result in -// ambiguities, see: -// https://lists.sr.ht/~emersion/public-inbox/%3C20230629132458.152205-1-contact%40emersion.fr%3E#%3Ch4Y2peS_YBqY3ar4XlmPDPiNBFpYGns3EBYUx3_6zWEhV2o8_-fBQveRujGADWYhVVCucHBEryFGoPtpC3d3mQ-x10pWnFogfprbQTSvtxc=@emersion.fr%3E -func isDirectiveType(t reflect.Type) bool { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - - textUnmarshalerType := reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - if reflect.PointerTo(t).Implements(textUnmarshalerType) { - return false - } - - switch t.Kind() { - case reflect.Struct, reflect.Map: - return true - default: - return false - } -} - -func (dec *Decoder) unmarshalDirective(dir *Directive, v reflect.Value) error { - v = unwrapPointers(v) - t := v.Type() - - if v.CanAddr() { - if _, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok { - if len(dir.Children) != 0 { - return newUnmarshalDirectiveError(dir, "directive requires zero children") - } - return unmarshalParamList(dir, v) - } - } - - switch v.Kind() { - case reflect.Map: - if len(dir.Params) > 0 { - return newUnmarshalDirectiveError(dir, "directive requires zero parameters") - } - if err := dec.unmarshalBlock(dir.Children, v); err != nil { - return err - } - case reflect.Struct: - si, err := getStructInfo(t) - if err != nil { - return err - } - - if si.param >= 0 { - if err := unmarshalParamList(dir, v.Field(si.param)); err != nil { - return err - } - } else { - if len(dir.Params) > 0 { - return newUnmarshalDirectiveError(dir, "directive requires zero parameters") - } - } - - if err := dec.unmarshalBlock(dir.Children, v); err != nil { - return err - } - default: - if len(dir.Children) != 0 { - return newUnmarshalDirectiveError(dir, "directive requires zero children") - } - if err := unmarshalParamList(dir, v); err != nil { - return err - } - } - return nil -} - -func unmarshalParamList(dir *Directive, v reflect.Value) error { - switch v.Kind() { - case reflect.Slice: - t := v.Type() - sv := reflect.MakeSlice(t, len(dir.Params), len(dir.Params)) - for i, param := range dir.Params { - if err := unmarshalParam(param, sv.Index(i)); err != nil { - return newUnmarshalParamError(dir, i, err) - } - } - v.Set(sv) - case reflect.Array: - if len(dir.Params) != v.Len() { - return newUnmarshalDirectiveError(dir, fmt.Sprintf("directive requires exactly %v parameters", v.Len())) - } - for i, param := range dir.Params { - if err := unmarshalParam(param, v.Index(i)); err != nil { - return newUnmarshalParamError(dir, i, err) - } - } - default: - if len(dir.Params) != 1 { - return newUnmarshalDirectiveError(dir, "directive requires exactly one parameter") - } - if err := unmarshalParam(dir.Params[0], v); err != nil { - return newUnmarshalParamError(dir, 0, err) - } - } - - return nil -} - -func unmarshalParam(param string, v reflect.Value) error { - v = unwrapPointers(v) - t := v.Type() - - // TODO: improve our logic following: - // https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/encoding/json/decode.go;drc=b9b8cecbfc72168ca03ad586cc2ed52b0e8db409;l=421 - if v.CanAddr() { - if v, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok { - return v.UnmarshalText([]byte(param)) - } - } - - switch v.Kind() { - case reflect.String: - v.Set(reflect.ValueOf(param)) - case reflect.Bool: - switch param { - case "true": - v.Set(reflect.ValueOf(true)) - case "false": - v.Set(reflect.ValueOf(false)) - default: - return fmt.Errorf("invalid bool parameter %q", param) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - i, err := strconv.ParseInt(param, 10, t.Bits()) - if err != nil { - return fmt.Errorf("invalid %v parameter: %v", t, err) - } - v.Set(reflect.ValueOf(i).Convert(t)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - u, err := strconv.ParseUint(param, 10, t.Bits()) - if err != nil { - return fmt.Errorf("invalid %v parameter: %v", t, err) - } - v.Set(reflect.ValueOf(u).Convert(t)) - case reflect.Float32, reflect.Float64: - f, err := strconv.ParseFloat(param, t.Bits()) - if err != nil { - return fmt.Errorf("invalid %v parameter: %v", t, err) - } - v.Set(reflect.ValueOf(f).Convert(t)) - default: - return fmt.Errorf("unsupported type for unmarshaling parameter: %v", t) - } - - return nil -} - -func unwrapPointers(v reflect.Value) reflect.Value { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - return v -} - -func clearMap(v reflect.Value) { - for _, k := range v.MapKeys() { - v.SetMapIndex(k, reflect.Value{}) - } -} - -type unmarshalDirectiveError struct { - lineno int - name string - msg string -} - -func newUnmarshalDirectiveError(dir *Directive, msg string) *unmarshalDirectiveError { - return &unmarshalDirectiveError{ - name: dir.Name, - lineno: dir.lineno, - msg: msg, - } -} - -func (err *unmarshalDirectiveError) Error() string { - return fmt.Sprintf("line %v, directive %q: %v", err.lineno, err.name, err.msg) -} - -type unmarshalParamError struct { - lineno int - directive string - paramIndex int - err error -} - -func newUnmarshalParamError(dir *Directive, paramIndex int, err error) *unmarshalParamError { - return &unmarshalParamError{ - directive: dir.Name, - lineno: dir.lineno, - paramIndex: paramIndex, - err: err, - } -} - -func (err *unmarshalParamError) Error() string { - return fmt.Sprintf("line %v, directive %q, parameter %v: %v", err.lineno, err.directive, err.paramIndex+1, err.err) -} diff --git a/forged/internal/scfg/writer.go b/forged/internal/scfg/writer.go deleted file mode 100644 index 02a07fe..0000000 --- a/forged/internal/scfg/writer.go +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser - -package scfg - -import ( - "errors" - "io" - "strings" -) - -var errDirEmptyName = errors.New("scfg: directive with empty name") - -// Write writes a parsed configuration to the provided io.Writer. -func Write(w io.Writer, blk Block) error { - enc := newEncoder(w) - err := enc.encodeBlock(blk) - return err -} - -// encoder write SCFG directives to an output stream. -type encoder struct { - w io.Writer - lvl int - err error -} - -// newEncoder returns a new encoder that writes to w. -func newEncoder(w io.Writer) *encoder { - return &encoder{w: w} -} - -func (enc *encoder) push() { - enc.lvl++ -} - -func (enc *encoder) pop() { - enc.lvl-- -} - -func (enc *encoder) writeIndent() { - for i := 0; i < enc.lvl; i++ { - enc.write([]byte("\t")) - } -} - -func (enc *encoder) write(p []byte) { - if enc.err != nil { - return - } - _, enc.err = enc.w.Write(p) -} - -func (enc *encoder) encodeBlock(blk Block) error { - for _, dir := range blk { - if err := enc.encodeDir(*dir); err != nil { - return err - } - } - return enc.err -} - -func (enc *encoder) encodeDir(dir Directive) error { - if enc.err != nil { - return enc.err - } - - if dir.Name == "" { - enc.err = errDirEmptyName - return enc.err - } - - enc.writeIndent() - enc.write([]byte(maybeQuote(dir.Name))) - for _, p := range dir.Params { - enc.write([]byte(" ")) - enc.write([]byte(maybeQuote(p))) - } - - if len(dir.Children) > 0 { - enc.write([]byte(" {\n")) - enc.push() - if err := enc.encodeBlock(dir.Children); err != nil { - return err - } - enc.pop() - - enc.writeIndent() - enc.write([]byte("}")) - } - enc.write([]byte("\n")) - - return enc.err -} - -const specialChars = "\"\\\r\n'{} \t" - -func maybeQuote(s string) string { - if s == "" || strings.ContainsAny(s, specialChars) { - var sb strings.Builder - sb.WriteByte('"') - for _, ch := range s { - if strings.ContainsRune(`"\`, ch) { - sb.WriteByte('\\') - } - sb.WriteRune(ch) - } - sb.WriteByte('"') - return sb.String() - } - return s -} diff --git a/forged/internal/server/server.go b/forged/internal/server/server.go new file mode 100644 index 0000000..62a9442 --- /dev/null +++ b/forged/internal/server/server.go @@ -0,0 +1,82 @@ +package server + +import ( + "context" + "fmt" + + "go.lindenii.runxiyu.org/forge/forged/internal/config" + "go.lindenii.runxiyu.org/forge/forged/internal/database" + "go.lindenii.runxiyu.org/forge/forged/internal/database/queries" + "go.lindenii.runxiyu.org/forge/forged/internal/global" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/hooks" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/lmtp" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/ssh" + "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web" + "golang.org/x/sync/errgroup" +) + +type Server struct { + config config.Config + + database database.Database + hookServer *hooks.Server + lmtpServer *lmtp.Server + webServer *web.Server + sshServer *ssh.Server + + global global.Global +} + +func New(configPath string) (server *Server, err error) { + server = &Server{} //exhaustruct:ignore + + server.config, err = config.Open(configPath) + if err != nil { + return server, fmt.Errorf("open config: %w", err) + } + + queries := queries.New(&server.database) + + server.global.ForgeVersion = "unknown" // TODO + server.global.ForgeTitle = server.config.General.Title + + server.hookServer = hooks.New(server.config.Hooks, &server.global) + server.lmtpServer = lmtp.New(server.config.LMTP, &server.global) + server.webServer = web.New(server.config.Web, &server.global, queries) + server.sshServer, err = ssh.New(server.config.SSH, &server.global) + if err != nil { + return server, fmt.Errorf("create SSH server: %w", err) + } + + return server, nil +} + +func (server *Server) Run(ctx context.Context) (err error) { + // TODO: Not running git2d because it should be run separately. + // This needs to be documented somewhere, hence a TODO here for now. + + g, gctx := errgroup.WithContext(ctx) + + server.database, err = database.Open(gctx, server.config.DB) + if err != nil { + return fmt.Errorf("open database: %w", err) + } + defer server.database.Close() + + g.Go(func() error { return server.hookServer.Run(gctx) }) + g.Go(func() error { return server.lmtpServer.Run(gctx) }) + g.Go(func() error { return server.webServer.Run(gctx) }) + g.Go(func() error { return server.sshServer.Run(gctx) }) + + err = g.Wait() + if err != nil { + return fmt.Errorf("server error: %w", err) + } + + err = ctx.Err() + if err != nil { + return fmt.Errorf("context exceeded: %w", err) + } + + return nil +} diff --git a/forged/internal/unsorted/acl.go b/forged/internal/unsorted/acl.go deleted file mode 100644 index c2e887d..0000000 --- a/forged/internal/unsorted/acl.go +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "context" - - "github.com/jackc/pgx/v5/pgtype" -) - -// getRepoInfo returns the filesystem path and direct access permission for a -// given repo and a provided ssh public key. -// -// TODO: Revamp. -func (s *Server) getRepoInfo(ctx context.Context, groupPath []string, repoName, sshPubkey string) (repoID int, fsPath string, access bool, contribReq, userType string, userID int, err error) { - err = s.database.QueryRow(ctx, ` -WITH RECURSIVE group_path_cte AS ( - -- Start: match the first name in the path where parent_group IS NULL - SELECT - id, - parent_group, - name, - 1 AS depth - FROM groups - WHERE name = ($1::text[])[1] - AND parent_group IS NULL - - UNION ALL - - -- Recurse: join next segment of the path - SELECT - g.id, - g.parent_group, - g.name, - group_path_cte.depth + 1 - FROM groups g - JOIN group_path_cte ON g.parent_group = group_path_cte.id - WHERE g.name = ($1::text[])[group_path_cte.depth + 1] - AND group_path_cte.depth + 1 <= cardinality($1::text[]) -) -SELECT - r.id, - r.filesystem_path, - CASE WHEN ugr.user_id IS NOT NULL THEN TRUE ELSE FALSE END AS has_role_in_group, - r.contrib_requirements, - COALESCE(u.type, ''), - COALESCE(u.id, 0) -FROM group_path_cte g -JOIN repos r ON r.group_id = g.id -LEFT JOIN ssh_public_keys s ON s.key_string = $3 -LEFT JOIN users u ON u.id = s.user_id -LEFT JOIN user_group_roles ugr ON ugr.group_id = g.id AND ugr.user_id = u.id -WHERE g.depth = cardinality($1::text[]) - AND r.name = $2 -`, pgtype.FlatArray[string](groupPath), repoName, sshPubkey, - ).Scan(&repoID, &fsPath, &access, &contribReq, &userType, &userID) - return -} diff --git a/forged/internal/unsorted/config.go b/forged/internal/unsorted/config.go deleted file mode 100644 index 9f07480..0000000 --- a/forged/internal/unsorted/config.go +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "bufio" - "errors" - "log/slog" - "os" - - "go.lindenii.runxiyu.org/forge/forged/internal/database" - "go.lindenii.runxiyu.org/forge/forged/internal/irc" - "go.lindenii.runxiyu.org/forge/forged/internal/scfg" -) - -type Config struct { - HTTP struct { - Net string `scfg:"net"` - Addr string `scfg:"addr"` - CookieExpiry int `scfg:"cookie_expiry"` - Root string `scfg:"root"` - ReadTimeout uint32 `scfg:"read_timeout"` - WriteTimeout uint32 `scfg:"write_timeout"` - IdleTimeout uint32 `scfg:"idle_timeout"` - ReverseProxy bool `scfg:"reverse_proxy"` - } `scfg:"http"` - Hooks struct { - Socket string `scfg:"socket"` - Execs string `scfg:"execs"` - } `scfg:"hooks"` - LMTP struct { - Socket string `scfg:"socket"` - Domain string `scfg:"domain"` - MaxSize int64 `scfg:"max_size"` - WriteTimeout uint32 `scfg:"write_timeout"` - ReadTimeout uint32 `scfg:"read_timeout"` - } `scfg:"lmtp"` - Git struct { - RepoDir string `scfg:"repo_dir"` - Socket string `scfg:"socket"` - DaemonPath string `scfg:"daemon_path"` - } `scfg:"git"` - SSH struct { - Net string `scfg:"net"` - Addr string `scfg:"addr"` - Key string `scfg:"key"` - Root string `scfg:"root"` - } `scfg:"ssh"` - IRC irc.Config `scfg:"irc"` - General struct { - Title string `scfg:"title"` - } `scfg:"general"` - DB struct { - Type string `scfg:"type"` - Conn string `scfg:"conn"` - } `scfg:"db"` - Pprof struct { - Net string `scfg:"net"` - Addr string `scfg:"addr"` - } `scfg:"pprof"` -} - -// LoadConfig loads a configuration file from the specified path and unmarshals -// it to the global [config] struct. This may race with concurrent reads from -// [config]; additional synchronization is necessary if the configuration is to -// be made reloadable. -func (s *Server) loadConfig(path string) (err error) { - var configFile *os.File - if configFile, err = os.Open(path); err != nil { - return err - } - defer configFile.Close() - - decoder := scfg.NewDecoder(bufio.NewReader(configFile)) - if err = decoder.Decode(&s.config); err != nil { - return err - } - for _, u := range decoder.UnknownDirectives() { - slog.Warn("unknown configuration directive", "directive", u) - } - - if s.config.DB.Type != "postgres" { - return errors.New("unsupported database type") - } - - if s.database, err = database.Open(s.config.DB.Conn); err != nil { - return err - } - - s.globalData["forge_title"] = s.config.General.Title - - return nil -} diff --git a/forged/internal/unsorted/database.go b/forged/internal/unsorted/database.go deleted file mode 100644 index 222b0c4..0000000 --- a/forged/internal/unsorted/database.go +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "context" - - "github.com/jackc/pgx/v5" -) - -// TODO: All database handling logic in all request handlers must be revamped. -// We must ensure that each request has all logic in one transaction (subject -// to exceptions if appropriate) so they get a consistent view of the database -// at a single point. A failure to do so may cause things as serious as -// privilege escalation. - -// queryNameDesc is a helper function that executes a query and returns a -// list of nameDesc results. The query must return two string arguments, i.e. a -// name and a description. -func (s *Server) queryNameDesc(ctx context.Context, query string, args ...any) (result []nameDesc, err error) { - var rows pgx.Rows - - if rows, err = s.database.Query(ctx, query, args...); err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var name, description string - if err = rows.Scan(&name, &description); err != nil { - return nil, err - } - result = append(result, nameDesc{name, description}) - } - return result, rows.Err() -} - -// nameDesc holds a name and a description. -type nameDesc struct { - Name string - Description string -} diff --git a/forged/internal/unsorted/fedauth.go b/forged/internal/unsorted/fedauth.go deleted file mode 100644 index f54649b..0000000 --- a/forged/internal/unsorted/fedauth.go +++ /dev/null @@ -1,97 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "bufio" - "context" - "errors" - "io" - "net/http" - "net/url" - "strings" - - "github.com/jackc/pgx/v5" -) - -// fedauth checks whether a user's SSH public key matches the remote username -// they claim to have on the service. If so, the association is recorded. -func (s *Server) fedauth(ctx context.Context, userID int, service, remoteUsername, pubkey string) (bool, error) { - var err error - - matched := false - usernameEscaped := url.PathEscape(remoteUsername) - - var req *http.Request - switch service { - // TODO: Services should be configurable by the instance administrator - // and should not be hardcoded in the source code. - case "sr.ht": - req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://meta.sr.ht/~"+usernameEscaped+".keys", nil) - case "github": - req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://github.com/"+usernameEscaped+".keys", nil) - case "codeberg": - req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://codeberg.org/"+usernameEscaped+".keys", nil) - case "tangled": - req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://tangled.sh/keys/"+usernameEscaped, nil) - // TODO: Don't rely on one webview - default: - return false, errors.New("unknown federated service") - } - if err != nil { - return false, err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return false, err - } - defer func() { - _ = resp.Body.Close() - }() - buf := bufio.NewReader(resp.Body) - - for { - line, err := buf.ReadString('\n') - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return false, err - } - - lineSplit := strings.Split(line, " ") - if len(lineSplit) < 2 { - continue - } - line = strings.Join(lineSplit[:2], " ") - - if line == pubkey { - matched = true - break - } - } - - if !matched { - return false, nil - } - - var txn pgx.Tx - if txn, err = s.database.Begin(ctx); err != nil { - return false, err - } - defer func() { - _ = txn.Rollback(ctx) - }() - if _, err = txn.Exec(ctx, `UPDATE users SET type = 'federated' WHERE id = $1 AND type = 'pubkey_only'`, userID); err != nil { - return false, err - } - if _, err = txn.Exec(ctx, `INSERT INTO federated_identities (user_id, service, remote_username) VALUES ($1, $2, $3)`, userID, service, remoteUsername); err != nil { - return false, err - } - if err = txn.Commit(ctx); err != nil { - return false, err - } - - return true, nil -} diff --git a/forged/internal/unsorted/git_hooks_handle_linux.go b/forged/internal/unsorted/git_hooks_handle_linux.go deleted file mode 100644 index f904550..0000000 --- a/forged/internal/unsorted/git_hooks_handle_linux.go +++ /dev/null @@ -1,377 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu -// -//go:build linux - -package unsorted - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/jackc/pgx/v5" - "go.lindenii.runxiyu.org/forge/forged/internal/ansiec" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -var ( - errGetFD = errors.New("unable to get file descriptor") - errGetUcred = errors.New("failed getsockopt") -) - -// hooksHandler handles a connection from hookc via the -// unix socket. -func (s *Server) hooksHandler(conn net.Conn) { - var ctx context.Context - var cancel context.CancelFunc - var ucred *syscall.Ucred - var err error - var cookie []byte - var packPass packPass - var sshStderr io.Writer - var hookRet byte - - defer conn.Close() - ctx, cancel = context.WithCancel(context.Background()) - defer cancel() - - // There aren't reasonable cases where someone would run this as - // another user. - if ucred, err = getUcred(conn); err != nil { - if _, err = conn.Write([]byte{1}); err != nil { - return - } - writeRedError(conn, "\nUnable to get peer credentials: %v", err) - return - } - uint32uid := uint32(os.Getuid()) //#nosec G115 - if ucred.Uid != uint32uid { - if _, err = conn.Write([]byte{1}); err != nil { - return - } - writeRedError(conn, "\nUID mismatch") - return - } - - cookie = make([]byte, 64) - if _, err = conn.Read(cookie); err != nil { - if _, err = conn.Write([]byte{1}); err != nil { - return - } - writeRedError(conn, "\nFailed to read cookie: %v", err) - return - } - - { - var ok bool - packPass, ok = s.packPasses.Load(misc.BytesToString(cookie)) - if !ok { - if _, err = conn.Write([]byte{1}); err != nil { - return - } - writeRedError(conn, "\nInvalid handler cookie") - return - } - } - - sshStderr = packPass.session.Stderr() - - _, _ = sshStderr.Write([]byte{'\n'}) - - hookRet = func() byte { - var argc64 uint64 - if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil { - writeRedError(sshStderr, "Failed to read argc: %v", err) - return 1 - } - var args []string - for range argc64 { - var arg bytes.Buffer - for { - nextByte := make([]byte, 1) - n, err := conn.Read(nextByte) - if err != nil || n != 1 { - writeRedError(sshStderr, "Failed to read arg: %v", err) - return 1 - } - if nextByte[0] == 0 { - break - } - arg.WriteByte(nextByte[0]) - } - args = append(args, arg.String()) - } - - gitEnv := make(map[string]string) - for { - var envLine bytes.Buffer - for { - nextByte := make([]byte, 1) - n, err := conn.Read(nextByte) - if err != nil || n != 1 { - writeRedError(sshStderr, "Failed to read environment variable: %v", err) - return 1 - } - if nextByte[0] == 0 { - break - } - envLine.WriteByte(nextByte[0]) - } - if envLine.Len() == 0 { - break - } - kv := envLine.String() - parts := strings.SplitN(kv, "=", 2) - if len(parts) < 2 { - writeRedError(sshStderr, "Invalid environment variable line: %v", kv) - return 1 - } - gitEnv[parts[0]] = parts[1] - } - - var stdin bytes.Buffer - if _, err = io.Copy(&stdin, conn); err != nil { - writeRedError(conn, "Failed to read to the stdin buffer: %v", err) - } - - switch filepath.Base(args[0]) { - case "pre-receive": - if packPass.directAccess { - return 0 - } - allOK := true - for { - var line, oldOID, rest, newIOID, refName string - var found bool - var oldHash, newHash plumbing.Hash - var oldCommit, newCommit *object.Commit - var pushOptCount int - - pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"]) - if err != nil { - writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err) - return 1 - } - - // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface? - // Also it'd be nice to be able to combine users or whatever - if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" { - if pushOptCount == 0 { - writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") - return 1 - } - for pushOptIndex := range pushOptCount { - pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)] - if !ok { - writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex) - return 1 - } - if strings.HasPrefix(pushOpt, "fedid=") { - fedUserID := strings.TrimPrefix(pushOpt, "fedid=") - service, username, found := strings.Cut(fedUserID, ":") - if !found { - writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID) - return 1 - } - - ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey) - if err != nil { - writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err) - return 1 - } - if !ok { - writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID) - return 1 - } - - break - } - if pushOptIndex == pushOptCount-1 { - writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") - return 1 - } - } - } - - line, err = stdin.ReadString('\n') - if errors.Is(err, io.EOF) { - break - } else if err != nil { - writeRedError(sshStderr, "Failed to read pre-receive line: %v", err) - return 1 - } - line = line[:len(line)-1] - - oldOID, rest, found = strings.Cut(line, " ") - if !found { - writeRedError(sshStderr, "Invalid pre-receive line: %v", line) - return 1 - } - - newIOID, refName, found = strings.Cut(rest, " ") - if !found { - writeRedError(sshStderr, "Invalid pre-receive line: %v", line) - return 1 - } - - if strings.HasPrefix(refName, "refs/heads/contrib/") { - if allZero(oldOID) { // New branch - fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) - var newMRLocalID int - - if packPass.userID != 0 { - err = s.database.QueryRow(ctx, - "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id", - packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"), - ).Scan(&newMRLocalID) - } else { - err = s.database.QueryRow(ctx, - "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id", - packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"), - ).Scan(&newMRLocalID) - } - if err != nil { - writeRedError(sshStderr, "Error creating merge request: %v", err) - return 1 - } - mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID) - fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset) - - s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL) - } else { // Existing contrib branch - var existingMRUser int - var isAncestor bool - - err = s.database.QueryRow(ctx, - "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2", - strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID, - ).Scan(&existingMRUser) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err) - } else { - writeRedError(sshStderr, "Error querying for existing merge request: %v", err) - } - return 1 - } - if existingMRUser == 0 { - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)") - continue - } - - if existingMRUser != packPass.userID { - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)") - continue - } - - oldHash = plumbing.NewHash(oldOID) - - if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil { - writeRedError(sshStderr, "Daemon failed to get old commit: %v", err) - return 1 - } - - // Potential BUG: I'm not sure if new_commit is guaranteed to be - // detectable as they haven't been merged into the main repo's - // objects yet. But it seems to work, and I don't think there's - // any reason for this to only work intermitently. - newHash = plumbing.NewHash(newIOID) - if newCommit, err = packPass.repo.CommitObject(newHash); err != nil { - writeRedError(sshStderr, "Daemon failed to get new commit: %v", err) - return 1 - } - - if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil { - writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err) - return 1 - } - - if !isAncestor { - // TODO: Create MR snapshot ref instead - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)") - continue - } - - fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) - } - } else { // Non-contrib branch - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)") - } - } - - fmt.Fprintln(sshStderr) - if allOK { - fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)") - return 0 - } - fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)") - return 1 - default: - fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset) - return 1 - } - }() - - fmt.Fprintln(sshStderr) - - _, _ = conn.Write([]byte{hookRet}) -} - -// serveGitHooks handles connections on the specified network listener and -// treats incoming connections as those from git hook handlers by spawning -// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The -// function itself blocks. -func (s *Server) serveGitHooks(listener net.Listener) error { - for { - conn, err := listener.Accept() - if err != nil { - return err - } - go s.hooksHandler(conn) - } -} - -// getUcred fetches connection credentials as a [syscall.Ucred] from a given -// [net.Conn]. It panics when conn is not a [net.UnixConn]. -func getUcred(conn net.Conn) (ucred *syscall.Ucred, err error) { - unixConn := conn.(*net.UnixConn) - var unixConnFD *os.File - - if unixConnFD, err = unixConn.File(); err != nil { - return nil, errGetFD - } - defer unixConnFD.Close() - - if ucred, err = syscall.GetsockoptUcred(int(unixConnFD.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED); err != nil { - return nil, errGetUcred - } - return ucred, nil -} - -// allZero returns true if all runes in a given string are '0'. The comparison -// is not constant time and must not be used in contexts where time-based side -// channel attacks are a concern. -func allZero(s string) bool { - for _, r := range s { - if r != '0' { - return false - } - } - return true -} diff --git a/forged/internal/unsorted/git_hooks_handle_other.go b/forged/internal/unsorted/git_hooks_handle_other.go deleted file mode 100644 index 70b2072..0000000 --- a/forged/internal/unsorted/git_hooks_handle_other.go +++ /dev/null @@ -1,336 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu -// -//go:build !linux - -package unsorted - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "path/filepath" - "strconv" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/jackc/pgx/v5" - "go.lindenii.runxiyu.org/forge/forged/internal/ansiec" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -// hooksHandler handles a connection from hookc via the -// unix socket. -func (s *Server) hooksHandler(conn net.Conn) { - var ctx context.Context - var cancel context.CancelFunc - var err error - var cookie []byte - var packPass packPass - var sshStderr io.Writer - var hookRet byte - - defer conn.Close() - ctx, cancel = context.WithCancel(context.Background()) - defer cancel() - - // TODO: ucred-like checks - - cookie = make([]byte, 64) - if _, err = conn.Read(cookie); err != nil { - if _, err = conn.Write([]byte{1}); err != nil { - return - } - writeRedError(conn, "\nFailed to read cookie: %v", err) - return - } - - { - var ok bool - packPass, ok = s.packPasses.Load(misc.BytesToString(cookie)) - if !ok { - if _, err = conn.Write([]byte{1}); err != nil { - return - } - writeRedError(conn, "\nInvalid handler cookie") - return - } - } - - sshStderr = packPass.session.Stderr() - - _, _ = sshStderr.Write([]byte{'\n'}) - - hookRet = func() byte { - var argc64 uint64 - if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil { - writeRedError(sshStderr, "Failed to read argc: %v", err) - return 1 - } - var args []string - for range argc64 { - var arg bytes.Buffer - for { - nextByte := make([]byte, 1) - n, err := conn.Read(nextByte) - if err != nil || n != 1 { - writeRedError(sshStderr, "Failed to read arg: %v", err) - return 1 - } - if nextByte[0] == 0 { - break - } - arg.WriteByte(nextByte[0]) - } - args = append(args, arg.String()) - } - - gitEnv := make(map[string]string) - for { - var envLine bytes.Buffer - for { - nextByte := make([]byte, 1) - n, err := conn.Read(nextByte) - if err != nil || n != 1 { - writeRedError(sshStderr, "Failed to read environment variable: %v", err) - return 1 - } - if nextByte[0] == 0 { - break - } - envLine.WriteByte(nextByte[0]) - } - if envLine.Len() == 0 { - break - } - kv := envLine.String() - parts := strings.SplitN(kv, "=", 2) - if len(parts) < 2 { - writeRedError(sshStderr, "Invalid environment variable line: %v", kv) - return 1 - } - gitEnv[parts[0]] = parts[1] - } - - var stdin bytes.Buffer - if _, err = io.Copy(&stdin, conn); err != nil { - writeRedError(conn, "Failed to read to the stdin buffer: %v", err) - } - - switch filepath.Base(args[0]) { - case "pre-receive": - if packPass.directAccess { - return 0 - } - allOK := true - for { - var line, oldOID, rest, newIOID, refName string - var found bool - var oldHash, newHash plumbing.Hash - var oldCommit, newCommit *object.Commit - var pushOptCount int - - pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"]) - if err != nil { - writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err) - return 1 - } - - // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface? - // Also it'd be nice to be able to combine users or whatever - if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" { - if pushOptCount == 0 { - writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") - return 1 - } - for pushOptIndex := range pushOptCount { - pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)] - if !ok { - writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex) - return 1 - } - if strings.HasPrefix(pushOpt, "fedid=") { - fedUserID := strings.TrimPrefix(pushOpt, "fedid=") - service, username, found := strings.Cut(fedUserID, ":") - if !found { - writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID) - return 1 - } - - ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey) - if err != nil { - writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err) - return 1 - } - if !ok { - writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID) - return 1 - } - - break - } - if pushOptIndex == pushOptCount-1 { - writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") - return 1 - } - } - } - - line, err = stdin.ReadString('\n') - if errors.Is(err, io.EOF) { - break - } else if err != nil { - writeRedError(sshStderr, "Failed to read pre-receive line: %v", err) - return 1 - } - line = line[:len(line)-1] - - oldOID, rest, found = strings.Cut(line, " ") - if !found { - writeRedError(sshStderr, "Invalid pre-receive line: %v", line) - return 1 - } - - newIOID, refName, found = strings.Cut(rest, " ") - if !found { - writeRedError(sshStderr, "Invalid pre-receive line: %v", line) - return 1 - } - - if strings.HasPrefix(refName, "refs/heads/contrib/") { - if allZero(oldOID) { // New branch - fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) - var newMRLocalID int - - if packPass.userID != 0 { - err = s.database.QueryRow(ctx, - "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id", - packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"), - ).Scan(&newMRLocalID) - } else { - err = s.database.QueryRow(ctx, - "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id", - packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"), - ).Scan(&newMRLocalID) - } - if err != nil { - writeRedError(sshStderr, "Error creating merge request: %v", err) - return 1 - } - mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID) - fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset) - - s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL) - } else { // Existing contrib branch - var existingMRUser int - var isAncestor bool - - err = s.database.QueryRow(ctx, - "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2", - strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID, - ).Scan(&existingMRUser) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err) - } else { - writeRedError(sshStderr, "Error querying for existing merge request: %v", err) - } - return 1 - } - if existingMRUser == 0 { - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)") - continue - } - - if existingMRUser != packPass.userID { - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)") - continue - } - - oldHash = plumbing.NewHash(oldOID) - - if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil { - writeRedError(sshStderr, "Daemon failed to get old commit: %v", err) - return 1 - } - - // Potential BUG: I'm not sure if new_commit is guaranteed to be - // detectable as they haven't been merged into the main repo's - // objects yet. But it seems to work, and I don't think there's - // any reason for this to only work intermitently. - newHash = plumbing.NewHash(newIOID) - if newCommit, err = packPass.repo.CommitObject(newHash); err != nil { - writeRedError(sshStderr, "Daemon failed to get new commit: %v", err) - return 1 - } - - if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil { - writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err) - return 1 - } - - if !isAncestor { - // TODO: Create MR snapshot ref instead - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)") - continue - } - - fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) - } - } else { // Non-contrib branch - allOK = false - fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)") - } - } - - fmt.Fprintln(sshStderr) - if allOK { - fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)") - return 0 - } - fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)") - return 1 - default: - fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset) - return 1 - } - }() - - fmt.Fprintln(sshStderr) - - _, _ = conn.Write([]byte{hookRet}) -} - -// serveGitHooks handles connections on the specified network listener and -// treats incoming connections as those from git hook handlers by spawning -// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The -// function itself blocks. -func (s *Server) serveGitHooks(listener net.Listener) error { - for { - conn, err := listener.Accept() - if err != nil { - return err - } - go s.hooksHandler(conn) - } -} - -// allZero returns true if all runes in a given string are '0'. The comparison -// is not constant time and must not be used in contexts where time-based side -// channel attacks are a concern. -func allZero(s string) bool { - for _, r := range s { - if r != '0' { - return false - } - } - return true -} diff --git a/forged/internal/unsorted/git_init.go b/forged/internal/unsorted/git_init.go deleted file mode 100644 index a9bba78..0000000 --- a/forged/internal/unsorted/git_init.go +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "github.com/go-git/go-git/v5" - gitConfig "github.com/go-git/go-git/v5/config" - gitFmtConfig "github.com/go-git/go-git/v5/plumbing/format/config" -) - -// gitInit initializes a bare git repository with the forge-deployed hooks -// directory as the hooksPath. -func (s *Server) gitInit(repoPath string) (err error) { - var repo *git.Repository - var gitConf *gitConfig.Config - - if repo, err = git.PlainInit(repoPath, true); err != nil { - return err - } - - if gitConf, err = repo.Config(); err != nil { - return err - } - - gitConf.Raw.SetOption("core", gitFmtConfig.NoSubsection, "hooksPath", s.config.Hooks.Execs) - gitConf.Raw.SetOption("receive", gitFmtConfig.NoSubsection, "advertisePushOptions", "true") - - if err = repo.SetConfig(gitConf); err != nil { - return err - } - - return nil -} diff --git a/forged/internal/unsorted/git_misc.go b/forged/internal/unsorted/git_misc.go deleted file mode 100644 index dd93726..0000000 --- a/forged/internal/unsorted/git_misc.go +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "context" - "errors" - "io" - "iter" - - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/jackc/pgx/v5/pgtype" -) - -// openRepo opens a git repository by group and repo name. -// -// TODO: This should be deprecated in favor of doing it in the relevant -// request/router context in the future, as it cannot cover the nuance of -// fields needed. -func (s *Server) openRepo(ctx context.Context, groupPath []string, repoName string) (repo *git.Repository, description string, repoID int, fsPath string, err error) { - err = s.database.QueryRow(ctx, ` -WITH RECURSIVE group_path_cte AS ( - -- Start: match the first name in the path where parent_group IS NULL - SELECT - id, - parent_group, - name, - 1 AS depth - FROM groups - WHERE name = ($1::text[])[1] - AND parent_group IS NULL - - UNION ALL - - -- Recurse: join next segment of the path - SELECT - g.id, - g.parent_group, - g.name, - group_path_cte.depth + 1 - FROM groups g - JOIN group_path_cte ON g.parent_group = group_path_cte.id - WHERE g.name = ($1::text[])[group_path_cte.depth + 1] - AND group_path_cte.depth + 1 <= cardinality($1::text[]) -) -SELECT - r.filesystem_path, - COALESCE(r.description, ''), - r.id -FROM group_path_cte g -JOIN repos r ON r.group_id = g.id -WHERE g.depth = cardinality($1::text[]) - AND r.name = $2 - `, pgtype.FlatArray[string](groupPath), repoName).Scan(&fsPath, &description, &repoID) - if err != nil { - return - } - - repo, err = git.PlainOpen(fsPath) - return -} - -// commitIterSeqErr creates an [iter.Seq[*object.Commit]] from an -// [object.CommitIter], and additionally returns a pointer to error. -// The pointer to error is guaranteed to be populated with either nil or the -// error returned by the commit iterator after the returned iterator is -// finished. -func commitIterSeqErr(ctx context.Context, commitIter object.CommitIter) (iter.Seq[*object.Commit], *error) { - var err error - return func(yield func(*object.Commit) bool) { - for { - commit, err2 := commitIter.Next() - if err2 != nil { - if errors.Is(err2, io.EOF) { - return - } - err = err2 - return - } - - select { - case <-ctx.Done(): - err = ctx.Err() - return - default: - } - - if !yield(commit) { - return - } - } - }, &err -} diff --git a/forged/internal/unsorted/git_plumbing.go b/forged/internal/unsorted/git_plumbing.go deleted file mode 100644 index e7ebe8f..0000000 --- a/forged/internal/unsorted/git_plumbing.go +++ /dev/null @@ -1,188 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "os" - "os/exec" - "path" - "sort" - "strings" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -func writeTree(ctx context.Context, repoPath string, entries []treeEntry) (string, error) { - var buf bytes.Buffer - - sort.Slice(entries, func(i, j int) bool { - nameI, nameJ := entries[i].name, entries[j].name - - if nameI == nameJ { // meh - return !(entries[i].mode == "40000") && (entries[j].mode == "40000") - } - - if strings.HasPrefix(nameJ, nameI) && len(nameI) < len(nameJ) { - return !(entries[i].mode == "40000") - } - - if strings.HasPrefix(nameI, nameJ) && len(nameJ) < len(nameI) { - return entries[j].mode == "40000" - } - - return nameI < nameJ - }) - - for _, e := range entries { - buf.WriteString(e.mode) - buf.WriteByte(' ') - buf.WriteString(e.name) - buf.WriteByte(0) - buf.Write(e.sha) - } - - cmd := exec.CommandContext(ctx, "git", "hash-object", "-w", "-t", "tree", "--stdin") - cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath) - cmd.Stdin = &buf - - var out bytes.Buffer - cmd.Stdout = &out - if err := cmd.Run(); err != nil { - return "", err - } - return strings.TrimSpace(out.String()), nil -} - -func buildTreeRecursive(ctx context.Context, repoPath, baseTree string, updates map[string][]byte) (string, error) { - treeCache := make(map[string][]treeEntry) - - var walk func(string, string) error - walk = func(prefix, sha string) error { - cmd := exec.CommandContext(ctx, "git", "cat-file", "tree", sha) - cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath) - var out bytes.Buffer - cmd.Stdout = &out - if err := cmd.Run(); err != nil { - return err - } - data := out.Bytes() - i := 0 - var entries []treeEntry - for i < len(data) { - modeEnd := bytes.IndexByte(data[i:], ' ') - if modeEnd < 0 { - return errors.New("invalid tree format") - } - mode := misc.BytesToString(data[i : i+modeEnd]) - i += modeEnd + 1 - - nameEnd := bytes.IndexByte(data[i:], 0) - if nameEnd < 0 { - return errors.New("missing null after filename") - } - name := misc.BytesToString(data[i : i+nameEnd]) - i += nameEnd + 1 - - if i+20 > len(data) { - return errors.New("unexpected EOF in SHA") - } - shaBytes := data[i : i+20] - i += 20 - - entries = append(entries, treeEntry{ - mode: mode, - name: name, - sha: shaBytes, - }) - - if mode == "40000" { - subPrefix := path.Join(prefix, name) - if err := walk(subPrefix, hex.EncodeToString(shaBytes)); err != nil { - return err - } - } - } - treeCache[prefix] = entries - return nil - } - - if err := walk("", baseTree); err != nil { - return "", err - } - - for filePath, blobSha := range updates { - parts := strings.Split(filePath, "/") - dir := strings.Join(parts[:len(parts)-1], "/") - name := parts[len(parts)-1] - - entries := treeCache[dir] - found := false - for i, e := range entries { - if e.name == name { - if blobSha == nil { - // Remove TODO - entries = append(entries[:i], entries[i+1:]...) - } else { - entries[i].sha = blobSha - } - found = true - break - } - } - if !found && blobSha != nil { - entries = append(entries, treeEntry{ - mode: "100644", - name: name, - sha: blobSha, - }) - } - treeCache[dir] = entries - } - - built := make(map[string][]byte) - var build func(string) ([]byte, error) - build = func(prefix string) ([]byte, error) { - entries := treeCache[prefix] - for i, e := range entries { - if e.mode == "40000" { - subPrefix := path.Join(prefix, e.name) - if sha, ok := built[subPrefix]; ok { - entries[i].sha = sha - continue - } - newShaStr, err := build(subPrefix) - if err != nil { - return nil, err - } - entries[i].sha = newShaStr - } - } - shaStr, err := writeTree(ctx, repoPath, entries) - if err != nil { - return nil, err - } - shaBytes, err := hex.DecodeString(shaStr) - if err != nil { - return nil, err - } - built[prefix] = shaBytes - return shaBytes, nil - } - - rootShaBytes, err := build("") - if err != nil { - return "", err - } - return hex.EncodeToString(rootShaBytes), nil -} - -type treeEntry struct { - mode string // like "100644" - name string // individual name - sha []byte -} diff --git a/forged/internal/unsorted/git_ref.go b/forged/internal/unsorted/git_ref.go deleted file mode 100644 index d9735ba..0000000 --- a/forged/internal/unsorted/git_ref.go +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" -) - -// getRefHash returns the hash of a reference given its -// type and name as supplied in URL queries. -func getRefHash(repo *git.Repository, refType, refName string) (refHash plumbing.Hash, err error) { - var ref *plumbing.Reference - switch refType { - case "": - if ref, err = repo.Head(); err != nil { - return - } - refHash = ref.Hash() - case "commit": - refHash = plumbing.NewHash(refName) - case "branch": - if ref, err = repo.Reference(plumbing.NewBranchReferenceName(refName), true); err != nil { - return - } - refHash = ref.Hash() - case "tag": - if ref, err = repo.Reference(plumbing.NewTagReferenceName(refName), true); err != nil { - return - } - refHash = ref.Hash() - default: - panic("Invalid ref type " + refType) - } - return -} diff --git a/forged/internal/unsorted/http_auth.go b/forged/internal/unsorted/http_auth.go deleted file mode 100644 index b0afa05..0000000 --- a/forged/internal/unsorted/http_auth.go +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "net/http" -) - -// getUserFromRequest returns the user ID and username associated with the -// session cookie in a given [http.Request]. -func (s *Server) getUserFromRequest(request *http.Request) (id int, username string, err error) { - var sessionCookie *http.Cookie - - if sessionCookie, err = request.Cookie("session"); err != nil { - return - } - - err = s.database.QueryRow( - request.Context(), - "SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.session_id = $1;", - sessionCookie.Value, - ).Scan(&id, &username) - - return -} diff --git a/forged/internal/unsorted/http_handle_branches.go b/forged/internal/unsorted/http_handle_branches.go deleted file mode 100644 index 704e1d8..0000000 --- a/forged/internal/unsorted/http_handle_branches.go +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "net/http" - "strings" - - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -// httpHandleRepoBranches provides the branches page in repos. -func (s *Server) httpHandleRepoBranches(writer http.ResponseWriter, _ *http.Request, params map[string]any) { - var repo *git.Repository - var repoName string - var groupPath []string - var err error - var notes []string - var branches []string - var branchesIter storer.ReferenceIter - - repo, repoName, groupPath = params["repo"].(*git.Repository), params["repo_name"].(string), params["group_path"].([]string) - - if strings.Contains(repoName, "\n") || misc.SliceContainsNewlines(groupPath) { - notes = append(notes, "Path contains newlines; HTTP Git access impossible") - } - - branchesIter, err = repo.Branches() - if err == nil { - _ = branchesIter.ForEach(func(branch *plumbing.Reference) error { - branches = append(branches, branch.Name().Short()) - return nil - }) - } - params["branches"] = branches - - params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, repoName) - params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, repoName) - params["notes"] = notes - - s.renderTemplate(writer, "repo_branches", params) -} diff --git a/forged/internal/unsorted/http_handle_group_index.go b/forged/internal/unsorted/http_handle_group_index.go deleted file mode 100644 index ce28a1c..0000000 --- a/forged/internal/unsorted/http_handle_group_index.go +++ /dev/null @@ -1,196 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "errors" - "net/http" - "path/filepath" - "strconv" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgtype" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// httpHandleGroupIndex provides index pages for groups, which includes a list -// of its subgroups and repos, as well as a form for group maintainers to -// create repos. -func (s *Server) httpHandleGroupIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) { - var groupPath []string - var repos []nameDesc - var subgroups []nameDesc - var err error - var groupID int - var groupDesc string - - groupPath = params["group_path"].([]string) - - // The group itself - err = s.database.QueryRow(request.Context(), ` - WITH RECURSIVE group_path_cte AS ( - SELECT - id, - parent_group, - name, - 1 AS depth - FROM groups - WHERE name = ($1::text[])[1] - AND parent_group IS NULL - - UNION ALL - - SELECT - g.id, - g.parent_group, - g.name, - group_path_cte.depth + 1 - FROM groups g - JOIN group_path_cte ON g.parent_group = group_path_cte.id - WHERE g.name = ($1::text[])[group_path_cte.depth + 1] - AND group_path_cte.depth + 1 <= cardinality($1::text[]) - ) - SELECT c.id, COALESCE(g.description, '') - FROM group_path_cte c - JOIN groups g ON g.id = c.id - WHERE c.depth = cardinality($1::text[]) - `, - pgtype.FlatArray[string](groupPath), - ).Scan(&groupID, &groupDesc) - - if errors.Is(err, pgx.ErrNoRows) { - web.ErrorPage404(s.templates, writer, params) - return - } else if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting group: "+err.Error()) - return - } - - // ACL - var count int - err = s.database.QueryRow(request.Context(), ` - SELECT COUNT(*) - FROM user_group_roles - WHERE user_id = $1 - AND group_id = $2 - `, params["user_id"].(int), groupID).Scan(&count) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error checking access: "+err.Error()) - return - } - directAccess := (count > 0) - - if request.Method == http.MethodPost { - if !directAccess { - web.ErrorPage403(s.templates, writer, params, "You do not have direct access to this group") - return - } - - repoName := request.FormValue("repo_name") - repoDesc := request.FormValue("repo_desc") - contribReq := request.FormValue("repo_contrib") - if repoName == "" { - web.ErrorPage400(s.templates, writer, params, "Repo name is required") - return - } - - var newRepoID int - err := s.database.QueryRow( - request.Context(), - `INSERT INTO repos (name, description, group_id, contrib_requirements) - VALUES ($1, $2, $3, $4) - RETURNING id`, - repoName, - repoDesc, - groupID, - contribReq, - ).Scan(&newRepoID) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error creating repo: "+err.Error()) - return - } - - filePath := filepath.Join(s.config.Git.RepoDir, strconv.Itoa(newRepoID)+".git") - - _, err = s.database.Exec( - request.Context(), - `UPDATE repos - SET filesystem_path = $1 - WHERE id = $2`, - filePath, - newRepoID, - ) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error updating repo path: "+err.Error()) - return - } - - if err = s.gitInit(filePath); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error initializing repo: "+err.Error()) - return - } - - misc.RedirectUnconditionally(writer, request) - return - } - - // Repos - var rows pgx.Rows - rows, err = s.database.Query(request.Context(), ` - SELECT name, COALESCE(description, '') - FROM repos - WHERE group_id = $1 - `, groupID) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error()) - return - } - defer rows.Close() - - for rows.Next() { - var name, description string - if err = rows.Scan(&name, &description); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error()) - return - } - repos = append(repos, nameDesc{name, description}) - } - if err = rows.Err(); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error()) - return - } - - // Subgroups - rows, err = s.database.Query(request.Context(), ` - SELECT name, COALESCE(description, '') - FROM groups - WHERE parent_group = $1 - `, groupID) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error()) - return - } - defer rows.Close() - - for rows.Next() { - var name, description string - if err = rows.Scan(&name, &description); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error()) - return - } - subgroups = append(subgroups, nameDesc{name, description}) - } - if err = rows.Err(); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error()) - return - } - - params["repos"] = repos - params["subgroups"] = subgroups - params["description"] = groupDesc - params["direct_access"] = directAccess - - s.renderTemplate(writer, "group", params) -} diff --git a/forged/internal/unsorted/http_handle_index.go b/forged/internal/unsorted/http_handle_index.go deleted file mode 100644 index a3141f4..0000000 --- a/forged/internal/unsorted/http_handle_index.go +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "net/http" - - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// httpHandleIndex provides the main index page which includes a list of groups -// and some global information such as SSH keys. -func (s *Server) httpHandleIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) { - var err error - var groups []nameDesc - - groups, err = s.queryNameDesc(request.Context(), "SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL") - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error querying groups: "+err.Error()) - return - } - params["groups"] = groups - - s.renderTemplate(writer, "index", params) -} diff --git a/forged/internal/unsorted/http_handle_login.go b/forged/internal/unsorted/http_handle_login.go deleted file mode 100644 index 8adbe17..0000000 --- a/forged/internal/unsorted/http_handle_login.go +++ /dev/null @@ -1,108 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "net/http" - "time" - - "github.com/jackc/pgx/v5" - "go.lindenii.runxiyu.org/forge/forged/internal/argon2id" - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// httpHandleLogin provides the login page for local users. -func (s *Server) httpHandleLogin(writer http.ResponseWriter, request *http.Request, params map[string]any) { - var username, password string - var userID int - var passwordHash string - var err error - var passwordMatches bool - var cookieValue string - var now time.Time - var expiry time.Time - var cookie http.Cookie - - if request.Method != http.MethodPost { - s.renderTemplate(writer, "login", params) - return - } - - username = request.PostFormValue("username") - password = request.PostFormValue("password") - - err = s.database.QueryRow(request.Context(), - "SELECT id, COALESCE(password, '') FROM users WHERE username = $1", - username, - ).Scan(&userID, &passwordHash) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - params["login_error"] = "Unknown username" - s.renderTemplate(writer, "login", params) - return - } - web.ErrorPage500(s.templates, writer, params, "Error querying user information: "+err.Error()) - return - } - if passwordHash == "" { - params["login_error"] = "User has no password" - s.renderTemplate(writer, "login", params) - return - } - - if passwordMatches, err = argon2id.ComparePasswordAndHash(password, passwordHash); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error comparing password and hash: "+err.Error()) - return - } - - if !passwordMatches { - params["login_error"] = "Invalid password" - s.renderTemplate(writer, "login", params) - return - } - - if cookieValue, err = randomUrlsafeStr(16); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting random string: "+err.Error()) - return - } - - now = time.Now() - expiry = now.Add(time.Duration(s.config.HTTP.CookieExpiry) * time.Second) - - cookie = http.Cookie{ - Name: "session", - Value: cookieValue, - SameSite: http.SameSiteLaxMode, - HttpOnly: true, - Secure: false, // TODO - Expires: expiry, - Path: "/", - } //exhaustruct:ignore - - http.SetCookie(writer, &cookie) - - _, err = s.database.Exec(request.Context(), "INSERT INTO sessions (user_id, session_id) VALUES ($1, $2)", userID, cookieValue) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error inserting session: "+err.Error()) - return - } - - http.Redirect(writer, request, "/", http.StatusSeeOther) -} - -// randomUrlsafeStr generates a random string of the given entropic size -// using the URL-safe base64 encoding. The actual size of the string returned -// will be 4*sz. -func randomUrlsafeStr(sz int) (string, error) { - r := make([]byte, 3*sz) - _, err := rand.Read(r) - if err != nil { - return "", fmt.Errorf("error generating random string: %w", err) - } - return base64.RawURLEncoding.EncodeToString(r), nil -} diff --git a/forged/internal/unsorted/http_handle_repo_commit.go b/forged/internal/unsorted/http_handle_repo_commit.go deleted file mode 100644 index 2afdf3a..0000000 --- a/forged/internal/unsorted/http_handle_repo_commit.go +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "fmt" - "net/http" - "strings" - - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/diff" - "github.com/go-git/go-git/v5/plumbing/object" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" - "go.lindenii.runxiyu.org/forge/forged/internal/oldgit" - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// usableFilePatch is a [diff.FilePatch] that is structured in a way more -// friendly for use in HTML templates. -type usableFilePatch struct { - From diff.File - To diff.File - Chunks []usableChunk -} - -// usableChunk is a [diff.Chunk] that is structured in a way more friendly for -// use in HTML templates. -type usableChunk struct { - Operation diff.Operation - Content string -} - -func (s *Server) httpHandleRepoCommit(writer http.ResponseWriter, request *http.Request, params map[string]any) { - var repo *git.Repository - var commitIDStrSpec, commitIDStrSpecNoSuffix string - var commitID plumbing.Hash - var parentCommitHash plumbing.Hash - var commitObj *object.Commit - var commitIDStr string - var err error - var patch *object.Patch - - repo, commitIDStrSpec = params["repo"].(*git.Repository), params["commit_id"].(string) - - commitIDStrSpecNoSuffix = strings.TrimSuffix(commitIDStrSpec, ".patch") - commitID = plumbing.NewHash(commitIDStrSpecNoSuffix) - if commitObj, err = repo.CommitObject(commitID); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting commit object: "+err.Error()) - return - } - if commitIDStrSpecNoSuffix != commitIDStrSpec { - var patchStr string - if patchStr, err = oldgit.FmtCommitPatch(commitObj); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error formatting patch: "+err.Error()) - return - } - fmt.Fprintln(writer, patchStr) - return - } - commitIDStr = commitObj.Hash.String() - - if commitIDStr != commitIDStrSpec { - http.Redirect(writer, request, commitIDStr, http.StatusSeeOther) - return - } - - params["commit_object"] = commitObj - params["commit_id"] = commitIDStr - - parentCommitHash, patch, err = oldgit.CommitToPatch(commitObj) - if err != nil { - web.ErrorPage500(s.templates, writer, params, "Error getting patch from commit: "+err.Error()) - return - } - params["parent_commit_hash"] = parentCommitHash.String() - params["patch"] = patch - - params["file_patches"] = makeUsableFilePatches(patch) - - s.renderTemplate(writer, "repo_commit", params) -} - -type fakeDiffFile struct { - hash plumbing.Hash - mode filemode.FileMode - path string -} - -func (f fakeDiffFile) Hash() plumbing.Hash { - return f.hash -} - -func (f fakeDiffFile) Mode() filemode.FileMode { - return f.mode -} - -func (f fakeDiffFile) Path() string { - return f.path -} - -var nullFakeDiffFile = fakeDiffFile{ //nolint:gochecknoglobals - hash: plumbing.NewHash("0000000000000000000000000000000000000000"), - mode: misc.FirstOrPanic(filemode.New("100644")), - path: "", -} - -func makeUsableFilePatches(patch diff.Patch) (usableFilePatches []usableFilePatch) { - // TODO: Remove unnecessary context - // TODO: Prepend "+"/"-"/" " instead of solely distinguishing based on color - - for _, filePatch := range patch.FilePatches() { - var fromFile, toFile diff.File - var ufp usableFilePatch - chunks := []usableChunk{} - - fromFile, toFile = filePatch.Files() - if fromFile == nil { - fromFile = nullFakeDiffFile - } - if toFile == nil { - toFile = nullFakeDiffFile - } - for _, chunk := range filePatch.Chunks() { - var content string - - content = chunk.Content() - if len(content) > 0 && content[0] == '\n' { - content = "\n" + content - } // Horrible hack to fix how browsers newlines that immediately proceed
-			chunks = append(chunks, usableChunk{
-				Operation: chunk.Type(),
-				Content:   content,
-			})
-		}
-		ufp = usableFilePatch{
-			Chunks: chunks,
-			From:   fromFile,
-			To:     toFile,
-		}
-		usableFilePatches = append(usableFilePatches, ufp)
-	}
-	return
-}
diff --git a/forged/internal/unsorted/http_handle_repo_contrib_index.go b/forged/internal/unsorted/http_handle_repo_contrib_index.go
deleted file mode 100644
index 5c68c08..0000000
--- a/forged/internal/unsorted/http_handle_repo_contrib_index.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu 
-
-package unsorted
-
-import (
-	"net/http"
-
-	"github.com/jackc/pgx/v5"
-	"go.lindenii.runxiyu.org/forge/forged/internal/web"
-)
-
-// idTitleStatus describes properties of a merge request that needs to be
-// present in MR listings.
-type idTitleStatus struct {
-	ID     int
-	Title  string
-	Status string
-}
-
-// httpHandleRepoContribIndex provides an index to merge requests of a repo.
-func (s *Server) httpHandleRepoContribIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
-	var rows pgx.Rows
-	var result []idTitleStatus
-	var err error
-
-	if rows, err = s.database.Query(request.Context(),
-		"SELECT repo_local_id, COALESCE(title, 'Untitled'), status FROM merge_requests WHERE repo_id = $1",
-		params["repo_id"],
-	); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error querying merge requests: "+err.Error())
-		return
-	}
-	defer rows.Close()
-
-	for rows.Next() {
-		var mrID int
-		var mrTitle, mrStatus string
-		if err = rows.Scan(&mrID, &mrTitle, &mrStatus); err != nil {
-			web.ErrorPage500(s.templates, writer, params, "Error scanning merge request: "+err.Error())
-			return
-		}
-		result = append(result, idTitleStatus{mrID, mrTitle, mrStatus})
-	}
-	if err = rows.Err(); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error ranging over merge requests: "+err.Error())
-		return
-	}
-	params["merge_requests"] = result
-
-	s.renderTemplate(writer, "repo_contrib_index", params)
-}
diff --git a/forged/internal/unsorted/http_handle_repo_contrib_one.go b/forged/internal/unsorted/http_handle_repo_contrib_one.go
deleted file mode 100644
index 1d733b0..0000000
--- a/forged/internal/unsorted/http_handle_repo_contrib_one.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu 
-
-package unsorted
-
-import (
-	"net/http"
-	"strconv"
-
-	"github.com/go-git/go-git/v5"
-	"github.com/go-git/go-git/v5/plumbing"
-	"github.com/go-git/go-git/v5/plumbing/object"
-	"go.lindenii.runxiyu.org/forge/forged/internal/web"
-)
-
-// httpHandleRepoContribOne provides an interface to each merge request of a
-// repo.
-func (s *Server) httpHandleRepoContribOne(writer http.ResponseWriter, request *http.Request, params map[string]any) {
-	var mrIDStr string
-	var mrIDInt int
-	var err error
-	var title, status, srcRefStr, dstBranchStr string
-	var repo *git.Repository
-	var srcRefHash plumbing.Hash
-	var dstBranchHash plumbing.Hash
-	var srcCommit, dstCommit, mergeBaseCommit *object.Commit
-	var mergeBases []*object.Commit
-
-	mrIDStr = params["mr_id"].(string)
-	mrIDInt64, err := strconv.ParseInt(mrIDStr, 10, strconv.IntSize)
-	if err != nil {
-		web.ErrorPage400(s.templates, writer, params, "Merge request ID not an integer")
-		return
-	}
-	mrIDInt = int(mrIDInt64)
-
-	if err = s.database.QueryRow(request.Context(),
-		"SELECT COALESCE(title, ''), status, source_ref, COALESCE(destination_branch, '') FROM merge_requests WHERE repo_id = $1 AND repo_local_id = $2",
-		params["repo_id"], mrIDInt,
-	).Scan(&title, &status, &srcRefStr, &dstBranchStr); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error querying merge request: "+err.Error())
-		return
-	}
-
-	repo = params["repo"].(*git.Repository)
-
-	if srcRefHash, err = getRefHash(repo, "branch", srcRefStr); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting source ref hash: "+err.Error())
-		return
-	}
-	if srcCommit, err = repo.CommitObject(srcRefHash); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting source commit: "+err.Error())
-		return
-	}
-	params["source_commit"] = srcCommit
-
-	if dstBranchStr == "" {
-		dstBranchStr = "HEAD"
-		dstBranchHash, err = getRefHash(repo, "", "")
-	} else {
-		dstBranchHash, err = getRefHash(repo, "branch", dstBranchStr)
-	}
-	if err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting destination branch hash: "+err.Error())
-		return
-	}
-
-	if dstCommit, err = repo.CommitObject(dstBranchHash); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting destination commit: "+err.Error())
-		return
-	}
-	params["destination_commit"] = dstCommit
-
-	if mergeBases, err = srcCommit.MergeBase(dstCommit); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting merge base: "+err.Error())
-		return
-	}
-
-	if len(mergeBases) < 1 {
-		web.ErrorPage500(s.templates, writer, params, "No merge base found for this merge request; these two branches do not share any common history")
-		// TODO
-		return
-	}
-
-	mergeBaseCommit = mergeBases[0]
-	params["merge_base"] = mergeBaseCommit
-
-	patch, err := mergeBaseCommit.Patch(srcCommit)
-	if err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting patch: "+err.Error())
-		return
-	}
-	params["file_patches"] = makeUsableFilePatches(patch)
-
-	params["mr_title"], params["mr_status"], params["mr_source_ref"], params["mr_destination_branch"] = title, status, srcRefStr, dstBranchStr
-
-	s.renderTemplate(writer, "repo_contrib_one", params)
-}
diff --git a/forged/internal/unsorted/http_handle_repo_index.go b/forged/internal/unsorted/http_handle_repo_index.go
deleted file mode 100644
index dd46dfe..0000000
--- a/forged/internal/unsorted/http_handle_repo_index.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu 
-
-package unsorted
-
-import (
-	"net/http"
-
-	"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
-	"go.lindenii.runxiyu.org/forge/forged/internal/render"
-	"go.lindenii.runxiyu.org/forge/forged/internal/web"
-)
-
-// httpHandleRepoIndex provides the front page of a repo using git2d.
-func (s *Server) httpHandleRepoIndex(w http.ResponseWriter, req *http.Request, params map[string]any) {
-	repoName := params["repo_name"].(string)
-	groupPath := params["group_path"].([]string)
-
-	_, repoPath, _, _, _, _, _ := s.getRepoInfo(req.Context(), groupPath, repoName, "") // TODO: Don't use getRepoInfo
-
-	client, err := git2c.NewClient(s.config.Git.Socket)
-	if err != nil {
-		web.ErrorPage500(s.templates, w, params, err.Error())
-		return
-	}
-	defer client.Close()
-
-	commits, readme, err := client.CmdIndex(repoPath)
-	if err != nil {
-		web.ErrorPage500(s.templates, w, params, err.Error())
-		return
-	}
-
-	params["commits"] = commits
-	params["readme_filename"] = readme.Filename
-	_, params["readme"] = render.Readme(readme.Content, readme.Filename)
-
-	s.renderTemplate(w, "repo_index", params)
-
-	// TODO: Caching
-}
diff --git a/forged/internal/unsorted/http_handle_repo_info.go b/forged/internal/unsorted/http_handle_repo_info.go
deleted file mode 100644
index e23b1d2..0000000
--- a/forged/internal/unsorted/http_handle_repo_info.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu 
-
-package unsorted
-
-import (
-	"fmt"
-	"io"
-	"net/http"
-	"os/exec"
-
-	"github.com/jackc/pgx/v5/pgtype"
-)
-
-// httpHandleRepoInfo provides advertised refs of a repo for use in Git's Smart
-// HTTP protocol.
-//
-// TODO: Reject access from web browsers.
-func (s *Server) httpHandleRepoInfo(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
-	groupPath := params["group_path"].([]string)
-	repoName := params["repo_name"].(string)
-	var repoPath string
-
-	if err := s.database.QueryRow(request.Context(), `
-	WITH RECURSIVE group_path_cte AS (
-		-- Start: match the first name in the path where parent_group IS NULL
-		SELECT
-			id,
-			parent_group,
-			name,
-			1 AS depth
-		FROM groups
-		WHERE name = ($1::text[])[1]
-			AND parent_group IS NULL
-	
-		UNION ALL
-	
-		-- Recurse: jion next segment of the path
-		SELECT
-			g.id,
-			g.parent_group,
-			g.name,
-			group_path_cte.depth + 1
-		FROM groups g
-		JOIN group_path_cte ON g.parent_group = group_path_cte.id
-		WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
-			AND group_path_cte.depth + 1 <= cardinality($1::text[])
-	)
-	SELECT r.filesystem_path
-	FROM group_path_cte c
-	JOIN repos r ON r.group_id = c.id
-	WHERE c.depth = cardinality($1::text[])
-		AND r.name = $2
-	`,
-		pgtype.FlatArray[string](groupPath),
-		repoName,
-	).Scan(&repoPath); err != nil {
-		return err
-	}
-
-	writer.Header().Set("Content-Type", "application/x-git-upload-pack-advertisement")
-	writer.WriteHeader(http.StatusOK)
-
-	cmd := exec.Command("git", "upload-pack", "--stateless-rpc", "--advertise-refs", repoPath)
-	stdout, err := cmd.StdoutPipe()
-	if err != nil {
-		return err
-	}
-	defer func() {
-		_ = stdout.Close()
-	}()
-	cmd.Stderr = cmd.Stdout
-
-	if err = cmd.Start(); err != nil {
-		return err
-	}
-
-	if err = packLine(writer, "# service=git-upload-pack\n"); err != nil {
-		return err
-	}
-
-	if err = packFlush(writer); err != nil {
-		return
-	}
-
-	if _, err = io.Copy(writer, stdout); err != nil {
-		return err
-	}
-
-	if err = cmd.Wait(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Taken from https://github.com/icyphox/legit, MIT license.
-func packLine(w io.Writer, s string) error {
-	_, err := fmt.Fprintf(w, "%04x%s", len(s)+4, s)
-	return err
-}
-
-// Taken from https://github.com/icyphox/legit, MIT license.
-func packFlush(w io.Writer) error {
-	_, err := fmt.Fprint(w, "0000")
-	return err
-}
diff --git a/forged/internal/unsorted/http_handle_repo_log.go b/forged/internal/unsorted/http_handle_repo_log.go
deleted file mode 100644
index 5d90871..0000000
--- a/forged/internal/unsorted/http_handle_repo_log.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu 
-
-package unsorted
-
-import (
-	"net/http"
-
-	"github.com/go-git/go-git/v5"
-	"github.com/go-git/go-git/v5/plumbing"
-	"go.lindenii.runxiyu.org/forge/forged/internal/web"
-)
-
-// httpHandleRepoLog provides a page with a complete Git log.
-//
-// TODO: This currently provides all commits in the branch. It should be
-// paginated and cached instead.
-func (s *Server) httpHandleRepoLog(writer http.ResponseWriter, req *http.Request, params map[string]any) {
-	var repo *git.Repository
-	var refHash plumbing.Hash
-	var err error
-
-	repo = params["repo"].(*git.Repository)
-
-	if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting ref hash: "+err.Error())
-		return
-	}
-
-	logOptions := git.LogOptions{From: refHash} //exhaustruct:ignore
-	commitIter, err := repo.Log(&logOptions)
-	if err != nil {
-		web.ErrorPage500(s.templates, writer, params, "Error getting recent commits: "+err.Error())
-		return
-	}
-	params["commits"], params["commits_err"] = commitIterSeqErr(req.Context(), commitIter)
-
-	s.renderTemplate(writer, "repo_log", params)
-}
diff --git a/forged/internal/unsorted/http_handle_repo_raw.go b/forged/internal/unsorted/http_handle_repo_raw.go
deleted file mode 100644
index 1127284..0000000
--- a/forged/internal/unsorted/http_handle_repo_raw.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu 
-
-package unsorted
-
-import (
-	"fmt"
-	"html/template"
-	"net/http"
-	"strings"
-
-	"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
-	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
-	"go.lindenii.runxiyu.org/forge/forged/internal/web"
-)
-
-// httpHandleRepoRaw serves raw files, or directory listings that point to raw
-// files.
-func (s *Server) httpHandleRepoRaw(writer http.ResponseWriter, request *http.Request, params map[string]any) {
-	repoName := params["repo_name"].(string)
-	groupPath := params["group_path"].([]string)
-	rawPathSpec := params["rest"].(string)
-	pathSpec := strings.TrimSuffix(rawPathSpec, "/")
-	params["path_spec"] = pathSpec
-
-	_, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")
-
-	client, err := git2c.NewClient(s.config.Git.Socket)
-	if err != nil {
-		web.ErrorPage500(s.templates, writer, params, err.Error())
-		return
-	}
-	defer client.Close()
-
-	files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
-	if err != nil {
-		web.ErrorPage500(s.templates, writer, params, err.Error())
-		return
-	}
-
-	switch {
-	case files != nil:
-		params["files"] = files
-		params["readme_filename"] = "README.md"
-		params["readme"] = template.HTML("

README rendering here is WIP again

") // TODO - s.renderTemplate(writer, "repo_raw_dir", params) - case content != "": - if misc.RedirectNoDir(writer, request) { - return - } - writer.Header().Set("Content-Type", "application/octet-stream") - fmt.Fprint(writer, content) - default: - web.ErrorPage500(s.templates, writer, params, "Unknown error fetching repo raw data") - } -} diff --git a/forged/internal/unsorted/http_handle_repo_tree.go b/forged/internal/unsorted/http_handle_repo_tree.go deleted file mode 100644 index 4799ccb..0000000 --- a/forged/internal/unsorted/http_handle_repo_tree.go +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "html/template" - "net/http" - "strings" - - "go.lindenii.runxiyu.org/forge/forged/internal/git2c" - "go.lindenii.runxiyu.org/forge/forged/internal/render" - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// httpHandleRepoTree provides a friendly, syntax-highlighted view of -// individual files, and provides directory views that link to these files. -// -// TODO: Do not highlight files that are too large. -func (s *Server) httpHandleRepoTree(writer http.ResponseWriter, request *http.Request, params map[string]any) { - repoName := params["repo_name"].(string) - groupPath := params["group_path"].([]string) - rawPathSpec := params["rest"].(string) - pathSpec := strings.TrimSuffix(rawPathSpec, "/") - params["path_spec"] = pathSpec - - _, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "") - - client, err := git2c.NewClient(s.config.Git.Socket) - if err != nil { - web.ErrorPage500(s.templates, writer, params, err.Error()) - return - } - defer client.Close() - - files, content, err := client.CmdTreeRaw(repoPath, pathSpec) - if err != nil { - web.ErrorPage500(s.templates, writer, params, err.Error()) - return - } - - switch { - case files != nil: - params["files"] = files - params["readme_filename"] = "README.md" - params["readme"] = template.HTML("

README rendering here is WIP again

") // TODO - s.renderTemplate(writer, "repo_tree_dir", params) - case content != "": - rendered := render.Highlight(pathSpec, content) - params["file_contents"] = rendered - s.renderTemplate(writer, "repo_tree_file", params) - default: - web.ErrorPage500(s.templates, writer, params, "Unknown object type, something is seriously wrong") - } -} diff --git a/forged/internal/unsorted/http_handle_repo_upload_pack.go b/forged/internal/unsorted/http_handle_repo_upload_pack.go deleted file mode 100644 index 914c9cc..0000000 --- a/forged/internal/unsorted/http_handle_repo_upload_pack.go +++ /dev/null @@ -1,120 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "fmt" - "io" - "log" - "net/http" - "os" - "os/exec" - "strings" - - "github.com/jackc/pgx/v5/pgtype" -) - -// httpHandleUploadPack handles incoming Git fetch/pull/clone's over the Smart -// HTTP protocol. -func (s *Server) httpHandleUploadPack(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) { - if ct := request.Header.Get("Content-Type"); !strings.HasPrefix(ct, "application/x-git-upload-pack-request") { - http.Error(writer, "bad content-type", http.StatusUnsupportedMediaType) - return nil - } - - decoded, err := decodeBody(request) - if err != nil { - http.Error(writer, "cannot decode request body", http.StatusBadRequest) - return err - } - defer decoded.Close() - - var groupPath []string - var repoName string - var repoPath string - var cmd *exec.Cmd - - groupPath, repoName = params["group_path"].([]string), params["repo_name"].(string) - - if err := s.database.QueryRow(request.Context(), ` - WITH RECURSIVE group_path_cte AS ( - -- Start: match the first name in the path where parent_group IS NULL - SELECT - id, - parent_group, - name, - 1 AS depth - FROM groups - WHERE name = ($1::text[])[1] - AND parent_group IS NULL - - UNION ALL - - -- Recurse: jion next segment of the path - SELECT - g.id, - g.parent_group, - g.name, - group_path_cte.depth + 1 - FROM groups g - JOIN group_path_cte ON g.parent_group = group_path_cte.id - WHERE g.name = ($1::text[])[group_path_cte.depth + 1] - AND group_path_cte.depth + 1 <= cardinality($1::text[]) - ) - SELECT r.filesystem_path - FROM group_path_cte c - JOIN repos r ON r.group_id = c.id - WHERE c.depth = cardinality($1::text[]) - AND r.name = $2 - `, - pgtype.FlatArray[string](groupPath), - repoName, - ).Scan(&repoPath); err != nil { - return err - } - - writer.Header().Set("Content-Type", "application/x-git-upload-pack-result") - // writer.Header().Set("Connection", "Keep-Alive") - // writer.Header().Set("Transfer-Encoding", "chunked") - - cmd = exec.CommandContext(request.Context(), "git", "upload-pack", "--stateless-rpc", repoPath) - cmd.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket) - - var stderrBuf bytes.Buffer - cmd.Stderr = &stderrBuf - - cmd.Stdout = writer - cmd.Stdin = decoded - - if gp := request.Header.Get("Git-Protocol"); gp != "" { - cmd.Env = append(cmd.Env, "GIT_PROTOCOL="+gp) - } - - if err = cmd.Run(); err != nil { - log.Println(stderrBuf.String()) - return err - } - - return nil -} - -func decodeBody(r *http.Request) (io.ReadCloser, error) { - switch ce := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Encoding"))); ce { - case "", "identity": - return r.Body, nil - case "gzip": - zr, err := gzip.NewReader(r.Body) - if err != nil { return nil, err } - return zr, nil - case "deflate": - zr, err := zlib.NewReader(r.Body) - if err != nil { return nil, err } - return zr, nil - default: - return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce) - } -} diff --git a/forged/internal/unsorted/http_handle_users.go b/forged/internal/unsorted/http_handle_users.go deleted file mode 100644 index b41ee44..0000000 --- a/forged/internal/unsorted/http_handle_users.go +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "net/http" - - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// httpHandleUsers is a useless stub. -func (s *Server) httpHandleUsers(writer http.ResponseWriter, _ *http.Request, params map[string]any) { - web.ErrorPage501(s.templates, writer, params) -} diff --git a/forged/internal/unsorted/http_server.go b/forged/internal/unsorted/http_server.go deleted file mode 100644 index f6a1794..0000000 --- a/forged/internal/unsorted/http_server.go +++ /dev/null @@ -1,276 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "errors" - "log/slog" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/jackc/pgx/v5" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" - "go.lindenii.runxiyu.org/forge/forged/internal/web" -) - -// ServeHTTP handles all incoming HTTP requests and routes them to the correct -// location. -// -// TODO: This function is way too large. -func (s *Server) ServeHTTP(writer http.ResponseWriter, request *http.Request) { - var remoteAddr string - if s.config.HTTP.ReverseProxy { - remoteAddrs, ok := request.Header["X-Forwarded-For"] - if ok && len(remoteAddrs) == 1 { - remoteAddr = remoteAddrs[0] - } else { - remoteAddr = request.RemoteAddr - } - } else { - remoteAddr = request.RemoteAddr - } - slog.Info("incoming http", "addr", remoteAddr, "method", request.Method, "uri", request.RequestURI) - - var segments []string - var err error - var sepIndex int - params := make(map[string]any) - - if segments, _, err = misc.ParseReqURI(request.RequestURI); err != nil { - web.ErrorPage400(s.templates, writer, params, "Error parsing request URI: "+err.Error()) - return - } - dirMode := false - if segments[len(segments)-1] == "" { - dirMode = true - segments = segments[:len(segments)-1] - } - - params["url_segments"] = segments - params["dir_mode"] = dirMode - params["global"] = s.globalData - var userID int // 0 for none - userID, params["username"], err = s.getUserFromRequest(request) - params["user_id"] = userID - if err != nil && !errors.Is(err, http.ErrNoCookie) && !errors.Is(err, pgx.ErrNoRows) { - web.ErrorPage500(s.templates, writer, params, "Error getting user info from request: "+err.Error()) - return - } - - if userID == 0 { - params["user_id_string"] = "" - } else { - params["user_id_string"] = strconv.Itoa(userID) - } - - for _, v := range segments { - if strings.Contains(v, ":") { - web.ErrorPage400Colon(s.templates, writer, params) - return - } - } - - if len(segments) == 0 { - s.httpHandleIndex(writer, request, params) - return - } - - if segments[0] == "-" { - if len(segments) < 2 { - web.ErrorPage404(s.templates, writer, params) - return - } else if len(segments) == 2 && misc.RedirectDir(writer, request) { - return - } - - switch segments[1] { - case "static": - s.staticHandler.ServeHTTP(writer, request) - return - case "source": - s.sourceHandler.ServeHTTP(writer, request) - return - } - } - - if segments[0] == "-" { - switch segments[1] { - case "login": - s.httpHandleLogin(writer, request, params) - return - case "users": - s.httpHandleUsers(writer, request, params) - return - default: - web.ErrorPage404(s.templates, writer, params) - return - } - } - - sepIndex = -1 - for i, part := range segments { - if part == "-" { - sepIndex = i - break - } - } - - params["separator_index"] = sepIndex - - var groupPath []string - var moduleType string - var moduleName string - - if sepIndex > 0 { - groupPath = segments[:sepIndex] - } else { - groupPath = segments - } - params["group_path"] = groupPath - - switch { - case sepIndex == -1: - if misc.RedirectDir(writer, request) { - return - } - s.httpHandleGroupIndex(writer, request, params) - case len(segments) == sepIndex+1: - web.ErrorPage404(s.templates, writer, params) - return - case len(segments) == sepIndex+2: - web.ErrorPage404(s.templates, writer, params) - return - default: - moduleType = segments[sepIndex+1] - moduleName = segments[sepIndex+2] - switch moduleType { - case "repos": - params["repo_name"] = moduleName - - if len(segments) > sepIndex+3 { - switch segments[sepIndex+3] { - case "info": - if err = s.httpHandleRepoInfo(writer, request, params); err != nil { - web.ErrorPage500(s.templates, writer, params, err.Error()) - } - return - case "git-upload-pack": - if err = s.httpHandleUploadPack(writer, request, params); err != nil { - web.ErrorPage500(s.templates, writer, params, err.Error()) - } - return - } - } - - if params["ref_type"], params["ref_name"], err = misc.GetParamRefTypeName(request); err != nil { - if errors.Is(err, misc.ErrNoRefSpec) { - params["ref_type"] = "" - } else { - web.ErrorPage400(s.templates, writer, params, "Error querying ref type: "+err.Error()) - return - } - } - - if params["repo"], params["repo_description"], params["repo_id"], _, err = s.openRepo(request.Context(), groupPath, moduleName); err != nil { - web.ErrorPage500(s.templates, writer, params, "Error opening repo: "+err.Error()) - return - } - - repoURLRoot := "/" - for _, part := range segments[:sepIndex+3] { - repoURLRoot = repoURLRoot + url.PathEscape(part) + "/" - } - params["repo_url_root"] = repoURLRoot - params["repo_patch_mailing_list"] = repoURLRoot[1:len(repoURLRoot)-1] + "@" + s.config.LMTP.Domain - params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, moduleName) - params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, moduleName) - - if len(segments) == sepIndex+3 { - if misc.RedirectDir(writer, request) { - return - } - s.httpHandleRepoIndex(writer, request, params) - return - } - - repoFeature := segments[sepIndex+3] - switch repoFeature { - case "tree": - if misc.AnyContain(segments[sepIndex+4:], "/") { - web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments") - return - } - if dirMode { - params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/" - } else { - params["rest"] = strings.Join(segments[sepIndex+4:], "/") - } - if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) { - return - } - s.httpHandleRepoTree(writer, request, params) - case "branches": - if misc.RedirectDir(writer, request) { - return - } - s.httpHandleRepoBranches(writer, request, params) - return - case "raw": - if misc.AnyContain(segments[sepIndex+4:], "/") { - web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments") - return - } - if dirMode { - params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/" - } else { - params["rest"] = strings.Join(segments[sepIndex+4:], "/") - } - if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) { - return - } - s.httpHandleRepoRaw(writer, request, params) - case "log": - if len(segments) > sepIndex+4 { - web.ErrorPage400(s.templates, writer, params, "Too many parameters") - return - } - if misc.RedirectDir(writer, request) { - return - } - s.httpHandleRepoLog(writer, request, params) - case "commit": - if len(segments) != sepIndex+5 { - web.ErrorPage400(s.templates, writer, params, "Incorrect number of parameters") - return - } - if misc.RedirectNoDir(writer, request) { - return - } - params["commit_id"] = segments[sepIndex+4] - s.httpHandleRepoCommit(writer, request, params) - case "contrib": - if misc.RedirectDir(writer, request) { - return - } - switch len(segments) { - case sepIndex + 4: - s.httpHandleRepoContribIndex(writer, request, params) - case sepIndex + 5: - params["mr_id"] = segments[sepIndex+4] - s.httpHandleRepoContribOne(writer, request, params) - default: - web.ErrorPage400(s.templates, writer, params, "Too many parameters") - } - default: - web.ErrorPage404(s.templates, writer, params) - return - } - default: - web.ErrorPage404(s.templates, writer, params) - return - } - } -} diff --git a/forged/internal/unsorted/http_template.go b/forged/internal/unsorted/http_template.go deleted file mode 100644 index db44e4c..0000000 --- a/forged/internal/unsorted/http_template.go +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "log/slog" - "net/http" -) - -// renderTemplate abstracts out the annoyances of reporting template rendering -// errors. -func (s *Server) renderTemplate(w http.ResponseWriter, templateName string, params map[string]any) { - if err := s.templates.ExecuteTemplate(w, templateName, params); err != nil { - http.Error(w, "error rendering template: "+err.Error(), http.StatusInternalServerError) - slog.Error("error rendering template", "error", err.Error()) - } -} diff --git a/forged/internal/unsorted/lmtp_handle_patch.go b/forged/internal/unsorted/lmtp_handle_patch.go deleted file mode 100644 index b258bfc..0000000 --- a/forged/internal/unsorted/lmtp_handle_patch.go +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "fmt" - "io" - "os" - "os/exec" - "strings" - "time" - - "github.com/bluekeyes/go-gitdiff/gitdiff" - "github.com/go-git/go-git/v5" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -func (s *Server) lmtpHandlePatch(session *lmtpSession, groupPath []string, repoName string, mbox io.Reader) (err error) { - var diffFiles []*gitdiff.File - var preamble string - if diffFiles, preamble, err = gitdiff.Parse(mbox); err != nil { - return fmt.Errorf("failed to parse patch: %w", err) - } - - var header *gitdiff.PatchHeader - if header, err = gitdiff.ParsePatchHeader(preamble); err != nil { - return fmt.Errorf("failed to parse patch headers: %w", err) - } - - var repo *git.Repository - var fsPath string - repo, _, _, fsPath, err = s.openRepo(session.ctx, groupPath, repoName) - if err != nil { - return fmt.Errorf("failed to open repo: %w", err) - } - - headRef, err := repo.Head() - if err != nil { - return fmt.Errorf("failed to get repo head hash: %w", err) - } - headCommit, err := repo.CommitObject(headRef.Hash()) - if err != nil { - return fmt.Errorf("failed to get repo head commit: %w", err) - } - headTree, err := headCommit.Tree() - if err != nil { - return fmt.Errorf("failed to get repo head tree: %w", err) - } - - headTreeHash := headTree.Hash.String() - - blobUpdates := make(map[string][]byte) - for _, diffFile := range diffFiles { - sourceFile, err := headTree.File(diffFile.OldName) - if err != nil { - return fmt.Errorf("failed to get file at old name %#v: %w", diffFile.OldName, err) - } - sourceString, err := sourceFile.Contents() - if err != nil { - return fmt.Errorf("failed to get contents: %w", err) - } - - sourceBuf := bytes.NewReader(misc.StringToBytes(sourceString)) - var patchedBuf bytes.Buffer - if err := gitdiff.Apply(&patchedBuf, sourceBuf, diffFile); err != nil { - return fmt.Errorf("failed to apply patch: %w", err) - } - - var hashBuf bytes.Buffer - - // It's really difficult to do this via go-git so we're just - // going to use upstream git for now. - // TODO - cmd := exec.CommandContext(session.ctx, "git", "hash-object", "-w", "-t", "blob", "--stdin") - cmd.Env = append(os.Environ(), "GIT_DIR="+fsPath) - cmd.Stdout = &hashBuf - cmd.Stdin = &patchedBuf - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to run git hash-object: %w", err) - } - - newHashStr := strings.TrimSpace(hashBuf.String()) - newHash, err := hex.DecodeString(newHashStr) - if err != nil { - return fmt.Errorf("failed to decode hex string from git: %w", err) - } - - blobUpdates[diffFile.NewName] = newHash - if diffFile.NewName != diffFile.OldName { - blobUpdates[diffFile.OldName] = nil // Mark for deletion. - } - } - - newTreeSha, err := buildTreeRecursive(session.ctx, fsPath, headTreeHash, blobUpdates) - if err != nil { - return fmt.Errorf("failed to recursively build a tree: %w", err) - } - - commitMsg := header.Title - if header.Body != "" { - commitMsg += "\n\n" + header.Body - } - - env := append(os.Environ(), - "GIT_DIR="+fsPath, - "GIT_AUTHOR_NAME="+header.Author.Name, - "GIT_AUTHOR_EMAIL="+header.Author.Email, - "GIT_AUTHOR_DATE="+header.AuthorDate.Format(time.RFC3339), - ) - commitCmd := exec.CommandContext(session.ctx, "git", "commit-tree", newTreeSha, "-p", headCommit.Hash.String(), "-m", commitMsg) - commitCmd.Env = env - - var commitOut bytes.Buffer - commitCmd.Stdout = &commitOut - if err := commitCmd.Run(); err != nil { - return fmt.Errorf("failed to commit tree: %w", err) - } - newCommitSha := strings.TrimSpace(commitOut.String()) - - newBranchName := rand.Text() - - refCmd := exec.CommandContext(session.ctx, "git", "update-ref", "refs/heads/contrib/"+newBranchName, newCommitSha) //#nosec G204 - refCmd.Env = append(os.Environ(), "GIT_DIR="+fsPath) - if err := refCmd.Run(); err != nil { - return fmt.Errorf("failed to update ref: %w", err) - } - - return nil -} diff --git a/forged/internal/unsorted/lmtp_server.go b/forged/internal/unsorted/lmtp_server.go deleted file mode 100644 index 1e94894..0000000 --- a/forged/internal/unsorted/lmtp_server.go +++ /dev/null @@ -1,204 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu -// SPDX-FileCopyrightText: Copyright (c) 2024 Robin Jarry - -package unsorted - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "log/slog" - "net" - "strings" - "time" - - "github.com/emersion/go-message" - "github.com/emersion/go-smtp" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -type lmtpHandler struct{} - -type lmtpSession struct { - from string - to []string - ctx context.Context - cancel context.CancelFunc - s Server -} - -func (session *lmtpSession) Reset() { - session.from = "" - session.to = nil -} - -func (session *lmtpSession) Logout() error { - session.cancel() - return nil -} - -func (session *lmtpSession) AuthPlain(_, _ string) error { - return nil -} - -func (session *lmtpSession) Mail(from string, _ *smtp.MailOptions) error { - session.from = from - return nil -} - -func (session *lmtpSession) Rcpt(to string, _ *smtp.RcptOptions) error { - session.to = append(session.to, to) - return nil -} - -func (*lmtpHandler) NewSession(_ *smtp.Conn) (smtp.Session, error) { - ctx, cancel := context.WithCancel(context.Background()) - session := &lmtpSession{ - ctx: ctx, - cancel: cancel, - } - return session, nil -} - -func (s *Server) serveLMTP(listener net.Listener) error { - smtpServer := smtp.NewServer(&lmtpHandler{}) - smtpServer.LMTP = true - smtpServer.Domain = s.config.LMTP.Domain - smtpServer.Addr = s.config.LMTP.Socket - smtpServer.WriteTimeout = time.Duration(s.config.LMTP.WriteTimeout) * time.Second - smtpServer.ReadTimeout = time.Duration(s.config.LMTP.ReadTimeout) * time.Second - smtpServer.EnableSMTPUTF8 = true - return smtpServer.Serve(listener) -} - -func (session *lmtpSession) Data(r io.Reader) error { - var ( - email *message.Entity - from string - to []string - err error - buf bytes.Buffer - data []byte - n int64 - ) - - n, err = io.CopyN(&buf, r, session.s.config.LMTP.MaxSize) - switch { - case n == session.s.config.LMTP.MaxSize: - err = errors.New("Message too big.") - // drain whatever is left in the pipe - _, _ = io.Copy(io.Discard, r) - goto end - case errors.Is(err, io.EOF): - // message was smaller than max size - break - case err != nil: - goto end - } - - data = buf.Bytes() - - email, err = message.Read(bytes.NewReader(data)) - if err != nil && message.IsUnknownCharset(err) { - goto end - } - - switch strings.ToLower(email.Header.Get("Auto-Submitted")) { - case "auto-generated", "auto-replied": - // Disregard automatic emails like OOO replies - slog.Info("ignoring automatic message", - "from", session.from, - "to", strings.Join(session.to, ","), - "message-id", email.Header.Get("Message-Id"), - "subject", email.Header.Get("Subject"), - ) - goto end - } - - slog.Info("message received", - "from", session.from, - "to", strings.Join(session.to, ","), - "message-id", email.Header.Get("Message-Id"), - "subject", email.Header.Get("Subject"), - ) - - // Make local copies of the values before to ensure the references will - // still be valid when the task is run. - from = session.from - to = session.to - - _ = from - - for _, to := range to { - if !strings.HasSuffix(to, "@"+session.s.config.LMTP.Domain) { - continue - } - localPart := to[:len(to)-len("@"+session.s.config.LMTP.Domain)] - var segments []string - segments, err = misc.PathToSegments(localPart) - if err != nil { - // TODO: Should the entire email fail or should we just - // notify them out of band? - err = fmt.Errorf("cannot parse path: %w", err) - goto end - } - sepIndex := -1 - for i, part := range segments { - if part == "-" { - sepIndex = i - break - } - } - if segments[len(segments)-1] == "" { - segments = segments[:len(segments)-1] // We don't care about dir or not. - } - if sepIndex == -1 || len(segments) <= sepIndex+2 { - err = errors.New("illegal path") - goto end - } - - mbox := bytes.Buffer{} - if _, err = fmt.Fprint(&mbox, "From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001\r\n"); err != nil { - slog.Error("error handling patch... malloc???", "error", err) - goto end - } - data = bytes.ReplaceAll(data, []byte("\r\n"), []byte("\n")) - if _, err = mbox.Write(data); err != nil { - slog.Error("error handling patch... malloc???", "error", err) - goto end - } - // TODO: Is mbox's From escaping necessary here? - - groupPath := segments[:sepIndex] - moduleType := segments[sepIndex+1] - moduleName := segments[sepIndex+2] - switch moduleType { - case "repos": - err = session.s.lmtpHandlePatch(session, groupPath, moduleName, &mbox) - if err != nil { - slog.Error("error handling patch", "error", err) - goto end - } - default: - err = errors.New("Emailing any endpoint other than repositories, is not supported yet.") // TODO - goto end - } - } - -end: - session.to = nil - session.from = "" - switch err { - case nil: - return nil - default: - return &smtp.SMTPError{ - Code: 550, - Message: "Permanent failure: " + err.Error(), - EnhancedCode: [3]int{5, 7, 1}, - } - } -} diff --git a/forged/internal/unsorted/remote_url.go b/forged/internal/unsorted/remote_url.go deleted file mode 100644 index f4d4c58..0000000 --- a/forged/internal/unsorted/remote_url.go +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "net/url" - "strings" - - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -// We don't use path.Join because it collapses multiple slashes into one. - -// genSSHRemoteURL generates SSH remote URLs from a given group path and repo -// name. -func (s *Server) genSSHRemoteURL(groupPath []string, repoName string) string { - return strings.TrimSuffix(s.config.SSH.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName) -} - -// genHTTPRemoteURL generates HTTP remote URLs from a given group path and repo -// name. -func (s *Server) genHTTPRemoteURL(groupPath []string, repoName string) string { - return strings.TrimSuffix(s.config.HTTP.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName) -} diff --git a/forged/internal/unsorted/resources.go b/forged/internal/unsorted/resources.go deleted file mode 100644 index 692b454..0000000 --- a/forged/internal/unsorted/resources.go +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "html/template" - "io/fs" - - "github.com/tdewolff/minify/v2" - "github.com/tdewolff/minify/v2/html" - "go.lindenii.runxiyu.org/forge/forged/internal/embed" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -// loadTemplates minifies and loads HTML templates. -func (s *Server) loadTemplates() (err error) { - minifier := minify.New() - minifierOptions := html.Minifier{ - TemplateDelims: [2]string{"{{", "}}"}, - KeepDefaultAttrVals: true, - } //exhaustruct:ignore - minifier.Add("text/html", &minifierOptions) - - s.templates = template.New("templates").Funcs(template.FuncMap{ - "first_line": misc.FirstLine, - "path_escape": misc.PathEscape, - "query_escape": misc.QueryEscape, - "dereference_error": misc.DereferenceOrZero[error], - "minus": misc.Minus, - }) - - err = fs.WalkDir(embed.Resources, "forged/templates", func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.IsDir() { - content, err := fs.ReadFile(embed.Resources, path) - if err != nil { - return err - } - - minified, err := minifier.Bytes("text/html", content) - if err != nil { - return err - } - - _, err = s.templates.Parse(misc.BytesToString(minified)) - if err != nil { - return err - } - } - return nil - }) - return err -} diff --git a/forged/internal/unsorted/server.go b/forged/internal/unsorted/server.go deleted file mode 100644 index 84379b0..0000000 --- a/forged/internal/unsorted/server.go +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "errors" - "html/template" - "io/fs" - "log" - "log/slog" - "net" - "net/http" - _ "net/http/pprof" - "os" - "os/exec" - "path/filepath" - "syscall" - "time" - - "go.lindenii.runxiyu.org/forge/forged/internal/cmap" - "go.lindenii.runxiyu.org/forge/forged/internal/database" - "go.lindenii.runxiyu.org/forge/forged/internal/embed" - "go.lindenii.runxiyu.org/forge/forged/internal/irc" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" - goSSH "golang.org/x/crypto/ssh" -) - -type Server struct { - config Config - - database database.Database - - sourceHandler http.Handler - staticHandler http.Handler - - // globalData is passed as "global" when rendering HTML templates. - globalData map[string]any - - serverPubkeyString string - serverPubkeyFP string - serverPubkey goSSH.PublicKey - - // packPasses contains hook cookies mapped to their packPass. - packPasses cmap.Map[string, packPass] - - templates *template.Template - - ircBot *irc.Bot - - ready bool -} - -func NewServer(configPath string) (*Server, error) { - s := &Server{ - globalData: make(map[string]any), - } //exhaustruct:ignore - - s.sourceHandler = http.StripPrefix( - "/-/source/", - http.FileServer(http.FS(embed.Source)), - ) - staticFS, err := fs.Sub(embed.Resources, "forged/static") - if err != nil { - return s, err - } - s.staticHandler = http.StripPrefix("/-/static/", http.FileServer(http.FS(staticFS))) - s.globalData = map[string]any{ - "server_public_key_string": &s.serverPubkeyString, - "server_public_key_fingerprint": &s.serverPubkeyFP, - "forge_version": version, - // Some other ones are populated after config parsing - } - - if err := s.loadConfig(configPath); err != nil { - return s, err - } - - misc.NoneOrPanic(s.loadTemplates()) - misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("git2d/git2d")), s.config.Git.DaemonPath)) - misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("hookc/hookc")), filepath.Join(s.config.Hooks.Execs, "pre-receive"))) - misc.NoneOrPanic(os.Chmod(filepath.Join(s.config.Hooks.Execs, "pre-receive"), 0o755)) - - s.ready = true - - return s, nil -} - -func (s *Server) Run() error { - if !s.ready { - return errors.New("not ready") - } - - // Launch Git2D - go func() { - cmd := exec.Command(s.config.Git.DaemonPath, s.config.Git.Socket) //#nosec G204 - cmd.Stderr = log.Writer() - cmd.Stdout = log.Writer() - if err := cmd.Run(); err != nil { - panic(err) - } - }() - - // UNIX socket listener for hooks - { - hooksListener, err := net.Listen("unix", s.config.Hooks.Socket) - if errors.Is(err, syscall.EADDRINUSE) { - slog.Warn("removing existing socket", "path", s.config.Hooks.Socket) - if err = syscall.Unlink(s.config.Hooks.Socket); err != nil { - slog.Error("removing existing socket", "path", s.config.Hooks.Socket, "error", err) - os.Exit(1) - } - if hooksListener, err = net.Listen("unix", s.config.Hooks.Socket); err != nil { - slog.Error("listening hooks", "error", err) - os.Exit(1) - } - } else if err != nil { - slog.Error("listening hooks", "error", err) - os.Exit(1) - } - slog.Info("listening hooks on unix", "path", s.config.Hooks.Socket) - go func() { - if err = s.serveGitHooks(hooksListener); err != nil { - slog.Error("serving hooks", "error", err) - os.Exit(1) - } - }() - } - - // UNIX socket listener for LMTP - { - lmtpListener, err := net.Listen("unix", s.config.LMTP.Socket) - if errors.Is(err, syscall.EADDRINUSE) { - slog.Warn("removing existing socket", "path", s.config.LMTP.Socket) - if err = syscall.Unlink(s.config.LMTP.Socket); err != nil { - slog.Error("removing existing socket", "path", s.config.LMTP.Socket, "error", err) - os.Exit(1) - } - if lmtpListener, err = net.Listen("unix", s.config.LMTP.Socket); err != nil { - slog.Error("listening LMTP", "error", err) - os.Exit(1) - } - } else if err != nil { - slog.Error("listening LMTP", "error", err) - os.Exit(1) - } - slog.Info("listening LMTP on unix", "path", s.config.LMTP.Socket) - go func() { - if err = s.serveLMTP(lmtpListener); err != nil { - slog.Error("serving LMTP", "error", err) - os.Exit(1) - } - }() - } - - // SSH listener - { - sshListener, err := net.Listen(s.config.SSH.Net, s.config.SSH.Addr) - if errors.Is(err, syscall.EADDRINUSE) && s.config.SSH.Net == "unix" { - slog.Warn("removing existing socket", "path", s.config.SSH.Addr) - if err = syscall.Unlink(s.config.SSH.Addr); err != nil { - slog.Error("removing existing socket", "path", s.config.SSH.Addr, "error", err) - os.Exit(1) - } - if sshListener, err = net.Listen(s.config.SSH.Net, s.config.SSH.Addr); err != nil { - slog.Error("listening SSH", "error", err) - os.Exit(1) - } - } else if err != nil { - slog.Error("listening SSH", "error", err) - os.Exit(1) - } - slog.Info("listening SSH on", "net", s.config.SSH.Net, "addr", s.config.SSH.Addr) - go func() { - if err = s.serveSSH(sshListener); err != nil { - slog.Error("serving SSH", "error", err) - os.Exit(1) - } - }() - } - - // HTTP listener - { - httpListener, err := net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr) - if errors.Is(err, syscall.EADDRINUSE) && s.config.HTTP.Net == "unix" { - slog.Warn("removing existing socket", "path", s.config.HTTP.Addr) - if err = syscall.Unlink(s.config.HTTP.Addr); err != nil { - slog.Error("removing existing socket", "path", s.config.HTTP.Addr, "error", err) - os.Exit(1) - } - if httpListener, err = net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr); err != nil { - slog.Error("listening HTTP", "error", err) - os.Exit(1) - } - } else if err != nil { - slog.Error("listening HTTP", "error", err) - os.Exit(1) - } - server := http.Server{ - Handler: s, - ReadTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second, - WriteTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second, - IdleTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second, - } //exhaustruct:ignore - slog.Info("listening HTTP on", "net", s.config.HTTP.Net, "addr", s.config.HTTP.Addr) - go func() { - if err = server.Serve(httpListener); err != nil && !errors.Is(err, http.ErrServerClosed) { - slog.Error("serving HTTP", "error", err) - os.Exit(1) - } - }() - } - - // Pprof listener - { - pprofListener, err := net.Listen(s.config.Pprof.Net, s.config.Pprof.Addr) - if err != nil { - slog.Error("listening pprof", "error", err) - os.Exit(1) - } - - slog.Info("listening pprof on", "net", s.config.Pprof.Net, "addr", s.config.Pprof.Addr) - go func() { - if err := http.Serve(pprofListener, nil); err != nil { - slog.Error("serving pprof", "error", err) - os.Exit(1) - } - }() - } - - s.ircBot = irc.NewBot(&s.config.IRC) - // IRC bot - go s.ircBot.ConnectLoop() - - select {} -} diff --git a/forged/internal/unsorted/ssh_handle_receive_pack.go b/forged/internal/unsorted/ssh_handle_receive_pack.go deleted file mode 100644 index a354273..0000000 --- a/forged/internal/unsorted/ssh_handle_receive_pack.go +++ /dev/null @@ -1,131 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "errors" - "fmt" - "os" - "os/exec" - - gliderSSH "github.com/gliderlabs/ssh" - "github.com/go-git/go-git/v5" -) - -// packPass contains information known when handling incoming SSH connections -// that then needs to be used in hook socket connection handlers. See hookc(1). -type packPass struct { - session gliderSSH.Session - repo *git.Repository - pubkey string - directAccess bool - repoPath string - userID int - userType string - repoID int - groupPath []string - repoName string - contribReq string -} - -// sshHandleRecvPack handles attempts to push to repos. -func (s *Server) sshHandleRecvPack(session gliderSSH.Session, pubkey, repoIdentifier string) (err error) { - groupPath, repoName, repoID, repoPath, directAccess, contribReq, userType, userID, err := s.getRepoInfo2(session.Context(), repoIdentifier, pubkey) - if err != nil { - return err - } - repo, err := git.PlainOpen(repoPath) - if err != nil { - return err - } - - repoConf, err := repo.Config() - if err != nil { - return err - } - - repoConfCore := repoConf.Raw.Section("core") - if repoConfCore == nil { - return errors.New("repository has no core section in config") - } - - hooksPath := repoConfCore.OptionAll("hooksPath") - if len(hooksPath) != 1 || hooksPath[0] != s.config.Hooks.Execs { - return errors.New("repository has hooksPath set to an unexpected value") - } - - if !directAccess { - switch contribReq { - case "closed": - if !directAccess { - return errors.New("you need direct access to push to this repo") - } - case "registered_user": - if userType != "registered" { - return errors.New("you need to be a registered user to push to this repo") - } - case "ssh_pubkey": - fallthrough - case "federated": - if pubkey == "" { - return errors.New("you need to have an SSH public key to push to this repo") - } - if userType == "" { - userID, err = s.addUserSSH(session.Context(), pubkey) - if err != nil { - return err - } - fmt.Fprintln(session.Stderr(), "you are now registered as user ID", userID) - userType = "pubkey_only" - } - - case "public": - default: - panic("unknown contrib_requirements value " + contribReq) - } - } - - cookie, err := randomUrlsafeStr(16) - if err != nil { - fmt.Fprintln(session.Stderr(), "Error while generating cookie:", err) - } - - s.packPasses.Store(cookie, packPass{ - session: session, - pubkey: pubkey, - directAccess: directAccess, - repoPath: repoPath, - userID: userID, - repoID: repoID, - groupPath: groupPath, - repoName: repoName, - repo: repo, - contribReq: contribReq, - userType: userType, - }) - defer s.packPasses.Delete(cookie) - // The Delete won't execute until proc.Wait returns unless something - // horribly wrong such as a panic occurs. - - proc := exec.CommandContext(session.Context(), "git-receive-pack", repoPath) - proc.Env = append(os.Environ(), - "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket, - "LINDENII_FORGE_HOOKS_COOKIE="+cookie, - ) - proc.Stdin = session - proc.Stdout = session - proc.Stderr = session.Stderr() - - if err = proc.Start(); err != nil { - fmt.Fprintln(session.Stderr(), "Error while starting process:", err) - return err - } - - err = proc.Wait() - if err != nil { - fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err) - } - - return err -} diff --git a/forged/internal/unsorted/ssh_handle_upload_pack.go b/forged/internal/unsorted/ssh_handle_upload_pack.go deleted file mode 100644 index 735a053..0000000 --- a/forged/internal/unsorted/ssh_handle_upload_pack.go +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "fmt" - "os" - "os/exec" - - glider_ssh "github.com/gliderlabs/ssh" -) - -// sshHandleUploadPack handles clones/fetches. It just uses git-upload-pack -// and has no ACL checks. -func (s *Server) sshHandleUploadPack(session glider_ssh.Session, pubkey, repoIdentifier string) (err error) { - var repoPath string - if _, _, _, repoPath, _, _, _, _, err = s.getRepoInfo2(session.Context(), repoIdentifier, pubkey); err != nil { - return err - } - - proc := exec.CommandContext(session.Context(), "git-upload-pack", repoPath) - proc.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket) - proc.Stdin = session - proc.Stdout = session - proc.Stderr = session.Stderr() - - if err = proc.Start(); err != nil { - fmt.Fprintln(session.Stderr(), "Error while starting process:", err) - return err - } - - err = proc.Wait() - if err != nil { - fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err) - } - - return err -} diff --git a/forged/internal/unsorted/ssh_server.go b/forged/internal/unsorted/ssh_server.go deleted file mode 100644 index 43cc0c4..0000000 --- a/forged/internal/unsorted/ssh_server.go +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "fmt" - "log/slog" - "net" - "os" - "strings" - - gliderSSH "github.com/gliderlabs/ssh" - "go.lindenii.runxiyu.org/forge/forged/internal/ansiec" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" - goSSH "golang.org/x/crypto/ssh" -) - -// serveSSH serves SSH on a [net.Listener]. The listener should generally be a -// TCP listener, although AF_UNIX SOCK_STREAM listeners may be appropriate in -// rare cases. -func (s *Server) serveSSH(listener net.Listener) error { - var hostKeyBytes []byte - var hostKey goSSH.Signer - var err error - var server *gliderSSH.Server - - if hostKeyBytes, err = os.ReadFile(s.config.SSH.Key); err != nil { - return err - } - - if hostKey, err = goSSH.ParsePrivateKey(hostKeyBytes); err != nil { - return err - } - - s.serverPubkey = hostKey.PublicKey() - s.serverPubkeyString = misc.BytesToString(goSSH.MarshalAuthorizedKey(s.serverPubkey)) - s.serverPubkeyFP = goSSH.FingerprintSHA256(s.serverPubkey) - - server = &gliderSSH.Server{ - Handler: func(session gliderSSH.Session) { - clientPubkey := session.PublicKey() - var clientPubkeyStr string - if clientPubkey != nil { - clientPubkeyStr = strings.TrimSuffix(misc.BytesToString(goSSH.MarshalAuthorizedKey(clientPubkey)), "\n") - } - - slog.Info("incoming ssh", "addr", session.RemoteAddr().String(), "key", clientPubkeyStr, "command", session.RawCommand()) - fmt.Fprintln(session.Stderr(), ansiec.Blue+"Lindenii Forge "+version+", source at "+strings.TrimSuffix(s.config.HTTP.Root, "/")+"/-/source/"+ansiec.Reset+"\r") - - cmd := session.Command() - - if len(cmd) < 2 { - fmt.Fprintln(session.Stderr(), "Insufficient arguments\r") - return - } - - switch cmd[0] { - case "git-upload-pack": - if len(cmd) > 2 { - fmt.Fprintln(session.Stderr(), "Too many arguments\r") - return - } - err = s.sshHandleUploadPack(session, clientPubkeyStr, cmd[1]) - case "git-receive-pack": - if len(cmd) > 2 { - fmt.Fprintln(session.Stderr(), "Too many arguments\r") - return - } - err = s.sshHandleRecvPack(session, clientPubkeyStr, cmd[1]) - default: - fmt.Fprintln(session.Stderr(), "Unsupported command: "+cmd[0]+"\r") - return - } - if err != nil { - fmt.Fprintln(session.Stderr(), err.Error()) - return - } - }, - PublicKeyHandler: func(_ gliderSSH.Context, _ gliderSSH.PublicKey) bool { return true }, - KeyboardInteractiveHandler: func(_ gliderSSH.Context, _ goSSH.KeyboardInteractiveChallenge) bool { return true }, - // It is intentional that we do not check any credentials and accept all connections. - // This allows all users to connect and clone repositories. However, the public key - // is passed to handlers, so e.g. the push handler could check the key and reject the - // push if it needs to. - } //exhaustruct:ignore - - server.AddHostKey(hostKey) - - if err = server.Serve(listener); err != nil { - slog.Error("error serving SSH", "error", err.Error()) - os.Exit(1) - } - - return nil -} diff --git a/forged/internal/unsorted/ssh_utils.go b/forged/internal/unsorted/ssh_utils.go deleted file mode 100644 index 6f50a87..0000000 --- a/forged/internal/unsorted/ssh_utils.go +++ /dev/null @@ -1,79 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "context" - "errors" - "fmt" - "io" - "net/url" - - "go.lindenii.runxiyu.org/forge/forged/internal/ansiec" - "go.lindenii.runxiyu.org/forge/forged/internal/misc" -) - -var errIllegalSSHRepoPath = errors.New("illegal SSH repo path") - -// getRepoInfo2 also fetches repo information... it should be deprecated and -// implemented in individual handlers. -func (s *Server) getRepoInfo2(ctx context.Context, sshPath, sshPubkey string) (groupPath []string, repoName string, repoID int, repoPath string, directAccess bool, contribReq, userType string, userID int, err error) { - var segments []string - var sepIndex int - var moduleType, moduleName string - - segments, err = misc.PathToSegments(sshPath) - if err != nil { - return - } - - for i, segment := range segments { - var err error - segments[i], err = url.PathUnescape(segment) - if err != nil { - return []string{}, "", 0, "", false, "", "", 0, err - } - } - - if segments[0] == "-" { - return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath - } - - sepIndex = -1 - for i, part := range segments { - if part == "-" { - sepIndex = i - break - } - } - if segments[len(segments)-1] == "" { - segments = segments[:len(segments)-1] - } - - switch { - case sepIndex == -1: - return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath - case len(segments) <= sepIndex+2: - return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath - } - - groupPath = segments[:sepIndex] - moduleType = segments[sepIndex+1] - moduleName = segments[sepIndex+2] - repoName = moduleName - switch moduleType { - case "repos": - _1, _2, _3, _4, _5, _6, _7 := s.getRepoInfo(ctx, groupPath, moduleName, sshPubkey) - return groupPath, repoName, _1, _2, _3, _4, _5, _6, _7 - default: - return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath - } -} - -// writeRedError is a helper function that basically does a Fprintf but makes -// the entire thing red, in terms of ANSI escape sequences. It's useful when -// producing error messages on SSH connections. -func writeRedError(w io.Writer, format string, args ...any) { - fmt.Fprintln(w, ansiec.Red+fmt.Sprintf(format, args...)+ansiec.Reset) -} diff --git a/forged/internal/unsorted/unsorted.go b/forged/internal/unsorted/unsorted.go deleted file mode 100644 index f26b0e4..0000000 --- a/forged/internal/unsorted/unsorted.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package unsorted is where unsorted Go files from the old structure are kept. -package unsorted diff --git a/forged/internal/unsorted/users.go b/forged/internal/unsorted/users.go deleted file mode 100644 index 0f72eed..0000000 --- a/forged/internal/unsorted/users.go +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -import ( - "context" - - "github.com/jackc/pgx/v5" -) - -// addUserSSH adds a new user solely based on their SSH public key. -// -// TODO: Audit all users of this function. -func (s *Server) addUserSSH(ctx context.Context, pubkey string) (userID int, err error) { - var txn pgx.Tx - - if txn, err = s.database.Begin(ctx); err != nil { - return - } - defer func() { - _ = txn.Rollback(ctx) - }() - - if err = txn.QueryRow(ctx, `INSERT INTO users (type) VALUES ('pubkey_only') RETURNING id`).Scan(&userID); err != nil { - return - } - - if _, err = txn.Exec(ctx, `INSERT INTO ssh_public_keys (key_string, user_id) VALUES ($1, $2)`, pubkey, userID); err != nil { - return - } - - err = txn.Commit(ctx) - return -} diff --git a/forged/internal/unsorted/version.go b/forged/internal/unsorted/version.go deleted file mode 100644 index 52c0f32..0000000 --- a/forged/internal/unsorted/version.go +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package unsorted - -var version = "unknown" diff --git a/forged/internal/web/error_pages.go b/forged/internal/web/error_pages.go deleted file mode 100644 index 2ba9a1a..0000000 --- a/forged/internal/web/error_pages.go +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -package web - -import ( - "html/template" - "net/http" -) - -// ErrorPage404 renders a 404 Not Found error page using the "404" template. -func ErrorPage404(templates *template.Template, w http.ResponseWriter, params map[string]any) { - w.WriteHeader(http.StatusNotFound) - _ = templates.ExecuteTemplate(w, "404", params) -} - -// ErrorPage400 renders a 400 Bad Request error page using the "400" template. -// The error message is passed via the "complete_error_msg" template param. -func ErrorPage400(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) { - w.WriteHeader(http.StatusBadRequest) - params["complete_error_msg"] = msg - _ = templates.ExecuteTemplate(w, "400", params) -} - -// ErrorPage400Colon renders a 400 Bad Request error page telling the user -// that we migrated from : to -. -func ErrorPage400Colon(templates *template.Template, w http.ResponseWriter, params map[string]any) { - w.WriteHeader(http.StatusBadRequest) - _ = templates.ExecuteTemplate(w, "400_colon", params) -} - -// ErrorPage403 renders a 403 Forbidden error page using the "403" template. -// The error message is passed via the "complete_error_msg" template param. -func ErrorPage403(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) { - w.WriteHeader(http.StatusForbidden) - params["complete_error_msg"] = msg - _ = templates.ExecuteTemplate(w, "403", params) -} - -// ErrorPage451 renders a 451 Unavailable For Legal Reasons error page using the "451" template. -// The error message is passed via the "complete_error_msg" template param. -func ErrorPage451(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) { - w.WriteHeader(http.StatusUnavailableForLegalReasons) - params["complete_error_msg"] = msg - _ = templates.ExecuteTemplate(w, "451", params) -} - -// ErrorPage500 renders a 500 Internal Server Error page using the "500" template. -// The error message is passed via the "complete_error_msg" template param. -func ErrorPage500(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) { - w.WriteHeader(http.StatusInternalServerError) - params["complete_error_msg"] = msg - _ = templates.ExecuteTemplate(w, "500", params) -} - -// ErrorPage501 renders a 501 Not Implemented error page using the "501" template. -func ErrorPage501(templates *template.Template, w http.ResponseWriter, params map[string]any) { - w.WriteHeader(http.StatusNotImplemented) - _ = templates.ExecuteTemplate(w, "501", params) -} diff --git a/forged/internal/web/web.go b/forged/internal/web/web.go deleted file mode 100644 index f4d15f8..0000000 --- a/forged/internal/web/web.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu - -// Package web provides web-facing components of the forge. -package web diff --git a/forged/main.go b/forged/main.go index fde15d1..38e22ff 100644 --- a/forged/main.go +++ b/forged/main.go @@ -5,9 +5,10 @@ package main import ( + "context" "flag" - "go.lindenii.runxiyu.org/forge/forged/internal/unsorted" + "go.lindenii.runxiyu.org/forge/forged/internal/server" ) func main() { @@ -18,10 +19,10 @@ func main() { ) flag.Parse() - s, err := unsorted.NewServer(*configPath) + s, err := server.New(*configPath) if err != nil { panic(err) } - panic(s.Run()) + panic(s.Run(context.Background())) } diff --git a/forged/sql/queries/groups.sql b/forged/sql/queries/groups.sql new file mode 100644 index 0000000..f067aeb --- /dev/null +++ b/forged/sql/queries/groups.sql @@ -0,0 +1,47 @@ +-- name: GetRootGroups :many +SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL; + +-- name: GetGroupByPath :one +WITH RECURSIVE group_path_cte AS ( + SELECT + id, + parent_group, + name, + 1 AS depth + FROM groups + WHERE name = ($1::text[])[1] + AND parent_group IS NULL + + UNION ALL + + SELECT + g.id, + g.parent_group, + g.name, + group_path_cte.depth + 1 + FROM groups g + JOIN group_path_cte ON g.parent_group = group_path_cte.id + WHERE g.name = ($1::text[])[group_path_cte.depth + 1] + AND group_path_cte.depth + 1 <= cardinality($1::text[]) +) +SELECT + g.id, + g.name, + g.parent_group, + COALESCE(g.description, '') AS description, + EXISTS ( + SELECT 1 + FROM user_group_roles ugr + WHERE ugr.user_id = $2 + AND ugr.group_id = g.id + ) AS has_role +FROM group_path_cte c +JOIN groups g ON g.id = c.id +WHERE c.depth = cardinality($1::text[]); + + +-- name: GetReposInGroup :many +SELECT name, COALESCE(description, '') FROM repos WHERE group_id = $1; + +-- name: GetSubgroups :many +SELECT name, COALESCE(description, '') FROM groups WHERE parent_group = $1; diff --git a/forged/sql/queries/login.sql b/forged/sql/queries/login.sql new file mode 100644 index 0000000..ffc4026 --- /dev/null +++ b/forged/sql/queries/login.sql @@ -0,0 +1,8 @@ +-- name: GetUserCreds :one +SELECT id, COALESCE(password_hash, '') FROM users WHERE username = $1; + +-- name: InsertSession :exec +INSERT INTO sessions (user_id, token_hash, expires_at) VALUES ($1, $2, $3); + +-- name: GetUserFromSession :one +SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.token_hash = $1; diff --git a/forged/sql/schema.sql b/forged/sql/schema.sql new file mode 100644 index 0000000..2f5ef9a --- /dev/null +++ b/forged/sql/schema.sql @@ -0,0 +1,226 @@ +-- SPDX-License-Identifier: AGPL-3.0-only +-- SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu + +-- Currently, slugs accept arbitrary unicode text. We should +-- look into normalization options later. +-- May consider using citext and limiting it to safe characters. + +CREATE TABLE groups ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name TEXT NOT NULL, + parent_group BIGINT REFERENCES groups(id) ON DELETE RESTRICT, + description TEXT, + UNIQUE NULLS NOT DISTINCT (parent_group, name) +); +CREATE INDEX ggroups_parent_idx ON groups(parent_group); + +DO $$ BEGIN + CREATE TYPE contrib_requirement AS ENUM ('closed','registered_user','federated','ssh_pubkey','open'); + -- closed means only those with direct access; each layer adds that level of user +EXCEPTION WHEN duplicate_object THEN END $$; +CREATE TABLE repos ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, -- I mean, should be CASCADE but deleting Git repos on disk also needs to be considered + name TEXT NOT NULL, + description TEXT, + contrib_requirements contrib_requirement NOT NULL, + filesystem_path TEXT NOT NULL, -- does not have to be unique, double-mounting is allowed + UNIQUE(group_id, name) +); +CREATE INDEX grepos_group_idx ON repos(group_id); + +CREATE TABLE mailing_lists ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, + name TEXT NOT NULL, + description TEXT, + UNIQUE(group_id, name) +); +CREATE INDEX gmailing_lists_group_idx ON mailing_lists(group_id); + +CREATE TABLE mailing_list_emails ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + list_id BIGINT NOT NULL REFERENCES mailing_lists(id) ON DELETE CASCADE, + title TEXT NOT NULL, + sender TEXT NOT NULL, + date TIMESTAMPTZ NOT NULL, -- everything must be in UTC + message_id TEXT, -- no uniqueness guarantee as it's arbitrarily set by senders + content BYTEA NOT NULL +); + +DO $$ BEGIN + CREATE TYPE user_type AS ENUM ('pubkey_only','federated','registered','admin'); +EXCEPTION WHEN duplicate_object THEN END $$; +CREATE TABLE users ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + CONSTRAINT id_positive CHECK (id > 0), + username TEXT UNIQUE, -- NULL when, for example, pubkey_only + type user_type NOT NULL, + password_hash TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TABLE ssh_public_keys ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + key_string TEXT NOT NULL, + CONSTRAINT unique_key_string EXCLUDE USING HASH (key_string WITH =) -- because apparently some haxxor like using rsa16384 keys which are too long for a simple UNIQUE constraint :D +); +CREATE INDEX gssh_keys_user_idx ON ssh_public_keys(user_id); + +CREATE TABLE sessions ( + session_id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + token_hash BYTEA UNIQUE NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + expires_at TIMESTAMPTZ NOT NULL +); +CREATE INDEX gsessions_user_idx ON sessions(user_id); + +DO $$ BEGIN + CREATE TYPE group_role AS ENUM ('owner'); -- just owner for now, might need to rethink ACL altogether later; might consider using a join table if we need it to be dynamic, but enum suffices for now +EXCEPTION WHEN duplicate_object THEN END $$; +CREATE TABLE user_group_roles ( + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + role group_role NOT NULL, + PRIMARY KEY(user_id, group_id) +); +CREATE INDEX gugr_group_idx ON user_group_roles(group_id); + +CREATE TABLE federated_identities ( + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + service TEXT NOT NULL, -- might need to constrain + remote_username TEXT NOT NULL, + PRIMARY KEY(user_id, service), + UNIQUE(service, remote_username) +); + +CREATE TABLE ticket_trackers ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, + name TEXT NOT NULL, + description TEXT, + UNIQUE(group_id, name) +); + +CREATE TABLE tickets ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + tracker_id BIGINT NOT NULL REFERENCES ticket_trackers(id) ON DELETE CASCADE, + tracker_local_id BIGINT NOT NULL, + title TEXT NOT NULL, + description TEXT, + UNIQUE(tracker_id, tracker_local_id) +); + +CREATE OR REPLACE FUNCTION create_tracker_ticket_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('tracker_ticket_seq_%s', NEW.id); +BEGIN + EXECUTE format('CREATE SEQUENCE g%I', seq_name); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION drop_tracker_ticket_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('tracker_ticket_seq_%s', OLD.id); +BEGIN + EXECUTE format('DROP SEQUENCE IF EXISTS %I', seq_name); + RETURN OLD; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS after_insert_ticket_tracker ON ticket_trackers; +CREATE TRIGGER after_insert_ticket_tracker +AFTER INSERT ON ticket_trackers +FOR EACH ROW +EXECUTE FUNCTION create_tracker_ticket_sequence(); +DROP TRIGGER IF EXISTS before_delete_ticket_tracker ON ticket_trackers; +CREATE TRIGGER before_delete_ticket_tracker +BEFORE DELETE ON ticket_trackers +FOR EACH ROW +EXECUTE FUNCTION drop_tracker_ticket_sequence(); +CREATE OR REPLACE FUNCTION assign_tracker_local_id() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('tracker_ticket_seq_%s', NEW.tracker_id); +BEGIN + IF NEW.tracker_local_id IS NULL THEN + EXECUTE format('SELECT nextval(%L)', seq_name) INTO NEW.tracker_local_id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS before_insert_ticket ON tickets; +CREATE TRIGGER before_insert_ticket +BEFORE INSERT ON tickets +FOR EACH ROW +EXECUTE FUNCTION assign_tracker_local_id(); +CREATE INDEX gtickets_tracker_idx ON tickets(tracker_id); + +DO $$ BEGIN + CREATE TYPE mr_status AS ENUM ('open','merged','closed'); +EXCEPTION WHEN duplicate_object THEN END $$; + +CREATE TABLE merge_requests ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + repo_id BIGINT NOT NULL REFERENCES repos(id) ON DELETE CASCADE, + repo_local_id BIGINT NOT NULL, + title TEXT NOT NULL, + creator BIGINT REFERENCES users(id) ON DELETE SET NULL, + source_repo BIGINT NOT NULL REFERENCES repos(id) ON DELETE RESTRICT, + source_ref TEXT NOT NULL, + destination_branch TEXT, + status mr_status NOT NULL, + UNIQUE (repo_id, repo_local_id) +); +CREATE UNIQUE INDEX gmr_open_src_dst_uniq + ON merge_requests (repo_id, source_repo, source_ref, coalesce(destination_branch, '')) + WHERE status = 'open'; +CREATE INDEX gmr_repo_idx ON merge_requests(repo_id); +CREATE INDEX gmr_creator_idx ON merge_requests(creator); +CREATE OR REPLACE FUNCTION create_repo_mr_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('repo_mr_seq_%s', NEW.id); +BEGIN + EXECUTE format('CREATE SEQUENCE g%I', seq_name); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION drop_repo_mr_sequence() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('repo_mr_seq_%s', OLD.id); +BEGIN + EXECUTE format('DROP SEQUENCE IF EXISTS %I', seq_name); + RETURN OLD; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS after_insert_repo ON repos; +CREATE TRIGGER after_insert_repo +AFTER INSERT ON repos +FOR EACH ROW +EXECUTE FUNCTION create_repo_mr_sequence(); +DROP TRIGGER IF EXISTS before_delete_repo ON repos; +CREATE TRIGGER before_delete_repo +BEFORE DELETE ON repos +FOR EACH ROW +EXECUTE FUNCTION drop_repo_mr_sequence(); +CREATE OR REPLACE FUNCTION assign_repo_local_id() +RETURNS TRIGGER AS $$ +DECLARE + seq_name TEXT := format('repo_mr_seq_%s', NEW.repo_id); +BEGIN + IF NEW.repo_local_id IS NULL THEN + EXECUTE format('SELECT nextval(%L)', seq_name) INTO NEW.repo_local_id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS before_insert_merge_request ON merge_requests; +CREATE TRIGGER before_insert_merge_request +BEFORE INSERT ON merge_requests +FOR EACH ROW +EXECUTE FUNCTION assign_repo_local_id(); diff --git a/forged/sqlc.yaml b/forged/sqlc.yaml new file mode 100644 index 0000000..2b6e035 --- /dev/null +++ b/forged/sqlc.yaml @@ -0,0 +1,15 @@ +version: "2" +sql: + - engine: "postgresql" + schema: "sql/schema.sql" + queries: "sql/queries" + gen: + go: + package: "queries" + out: "internal/database/queries" + sql_package: "pgx/v5" + emit_json_tags: true + emit_db_tags: true + emit_prepared_queries: true + emit_pointers_for_null_types: true + emit_enum_valid_method: true diff --git a/forged/static/style.css b/forged/static/style.css index 4923771..f70fe69 100644 --- a/forged/static/style.css +++ b/forged/static/style.css @@ -18,33 +18,33 @@ html { background-color: var(--background-color); color: var(--text-color); font-size: 1rem; - --background-color: hsl(0, 0%, 100%); - --text-color: hsl(0, 0%, 0%); - --link-color: hsl(320, 50%, 36%); - --light-text-color: hsl(0, 0%, 45%); - --darker-border-color: hsl(0, 0%, 72%); - --lighter-border-color: hsl(0, 0%, 85%); - --text-decoration-color: hsl(0, 0%, 72%); - --darker-box-background-color: hsl(0, 0%, 92%); - --lighter-box-background-color: hsl(0, 0%, 95%); - --primary-color: hsl(320, 50%, 36%); - --primary-color-contrast: hsl(320, 0%, 100%); - --danger-color: #ff0000; - --danger-color-contrast: #ffffff; + --background-color: oklch(1.000 0.000 0.0); + --text-color: oklch(0.000 0.000 0.0); + --link-color: oklch(0.457 0.143 343.4); + --light-text-color: oklch(0.555 0.000 0.0); + --darker-border-color: oklch(0.781 0.000 0.0); + --lighter-border-color: oklch(0.885 0.000 0.0); + --text-decoration-color: oklch(0.781 0.000 0.0); + --darker-box-background-color: oklch(0.939 0.000 0.0); + --lighter-box-background-color: oklch(0.962 0.000 0.0); + --primary-color: oklch(0.457 0.143 343.4); + --primary-color-contrast: oklch(1.000 0.000 0.0); + --danger-color: oklch(0.628 0.258 29.2); + --danger-color-contrast: oklch(1.000 0.000 0.0); } /* Dark mode overrides */ @media (prefers-color-scheme: dark) { html { - --background-color: hsl(0, 0%, 0%); - --text-color: hsl(0, 0%, 100%); - --link-color: hsl(320, 50%, 76%); - --light-text-color: hsl(0, 0%, 78%); - --darker-border-color: hsl(0, 0%, 35%); - --lighter-border-color: hsl(0, 0%, 25%); - --text-decoration-color: hsl(0, 0%, 50%); - --darker-box-background-color: hsl(0, 0%, 20%); - --lighter-box-background-color: hsl(0, 0%, 15%); + --background-color: oklch(0.000 0.000 0.0); + --text-color: oklch(1.000 0.000 0.0); + --link-color: oklch(0.786 0.089 339.4); + --light-text-color: oklch(0.829 0.000 0.0); + --darker-border-color: oklch(0.465 0.000 0.0); + --lighter-border-color: oklch(0.371 0.000 0.0); + --text-decoration-color: oklch(0.598 0.000 0.0); + --darker-box-background-color: oklch(0.321 0.000 0.0); + --lighter-box-background-color: oklch(0.270 0.000 0.0); } } diff --git a/forged/templates/_footer.tmpl b/forged/templates/_footer.tmpl index 22a3958..11e2365 100644 --- a/forged/templates/_footer.tmpl +++ b/forged/templates/_footer.tmpl @@ -4,7 +4,7 @@ */}} {{- define "footer" -}} Lindenii Forge -{{ .global.forge_version }} +{{ .BaseData.Global.ForgeVersion }} (upstream, license, support) diff --git a/forged/templates/_group_view.tmpl b/forged/templates/_group_view.tmpl index 92b6639..de5d45d 100644 --- a/forged/templates/_group_view.tmpl +++ b/forged/templates/_group_view.tmpl @@ -3,7 +3,7 @@ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu */}} {{- define "group_view" -}} -{{- if .subgroups -}} +{{- if .Subgroups -}} @@ -15,7 +15,7 @@ - {{- range .subgroups -}} + {{- range .Subgroups -}}
{{- .Name -}} @@ -28,7 +28,7 @@
{{- end -}} -{{- if .repos -}} +{{- if .Repos -}} @@ -40,7 +40,7 @@ - {{- range .repos -}} + {{- range .Repos -}}
{{- .Name -}} diff --git a/forged/templates/_header.tmpl b/forged/templates/_header.tmpl index 340a2ac..39d3491 100644 --- a/forged/templates/_header.tmpl +++ b/forged/templates/_header.tmpl @@ -5,15 +5,15 @@ {{- define "header" -}}
- {{- if ne .user_id_string "" -}} - {{- .username -}} + {{- if ne .BaseData.UserID "" -}} + {{- .BaseData.Username -}} {{- else -}} Login {{- end -}} diff --git a/forged/templates/group.tmpl b/forged/templates/group.tmpl index 3338f9b..31b7169 100644 --- a/forged/templates/group.tmpl +++ b/forged/templates/group.tmpl @@ -3,23 +3,23 @@ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu */}} {{- define "group" -}} -{{- $group_path := .group_path -}} +{{- $group_path := .BaseData.GroupPath -}} {{- template "head_common" . -}} - {{- range $i, $s := .group_path -}}{{- $s -}}{{- if ne $i (len $group_path) -}}/{{- end -}}{{- end }} – {{ .global.forge_title -}} + {{- range $i, $s := $group_path -}}{{- $s -}}{{- if ne $i (len $group_path) -}}/{{- end -}}{{- end }} – {{ .BaseData.Global.ForgeTitle -}} {{- template "header" . -}}
- {{- if .description -}} -

{{- .description -}}

+ {{- if .Description -}} +

{{- .Description -}}

{{- end -}} {{- template "group_view" . -}}
- {{- if .direct_access -}} + {{- if .DirectAccess -}}
diff --git a/forged/templates/index.tmpl b/forged/templates/index.tmpl index 66bd177..fa9b6a0 100644 --- a/forged/templates/index.tmpl +++ b/forged/templates/index.tmpl @@ -7,7 +7,7 @@ {{- template "head_common" . -}} - Index – {{ .global.forge_title -}} + Index – {{ .BaseData.Global.ForgeTitle -}} {{- template "header" . -}} @@ -24,7 +24,7 @@ - {{- range .groups -}} + {{- range .Groups -}} - + - +
{{- .Name -}} @@ -47,11 +47,11 @@
SSH public key{{- .global.server_public_key_string -}}{{- .BaseData.Global.SSHPubkey -}}
SSH fingerprint{{- .global.server_public_key_fingerprint -}}{{- .BaseData.Global.SSHFingerprint -}}
diff --git a/forged/templates/login.tmpl b/forged/templates/login.tmpl index 980b863..09cbb61 100644 --- a/forged/templates/login.tmpl +++ b/forged/templates/login.tmpl @@ -7,11 +7,11 @@ {{- template "head_common" . -}} - Login – {{ .global.forge_title -}} + Login – {{ .BaseData.Global.ForgeTitle -}}
- {{- .login_error -}} + {{- .LoginError -}}
-- cgit v1.2.3