aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--.editorconfig19
-rw-r--r--.gitattributes6
-rw-r--r--.gitignore5
-rw-r--r--LICENSE.APACHE203
-rw-r--r--LICENSE.BSD22
-rw-r--r--LICENSE.GPL675
-rw-r--r--LICENSE.MIT18
-rw-r--r--Makefile45
-rw-r--r--NOTES.md315
-rw-r--r--README.md60
-rw-r--r--forge.scfg108
-rw-r--r--forged/.golangci.yaml42
-rw-r--r--forged/internal/ansiec/ansiec.go5
-rw-r--r--forged/internal/ansiec/colors.go26
-rw-r--r--forged/internal/ansiec/reset.go6
-rw-r--r--forged/internal/ansiec/style.go11
-rw-r--r--forged/internal/argon2id/argon2id.go185
-rw-r--r--forged/internal/bare/errors.go20
-rw-r--r--forged/internal/bare/limit.go58
-rw-r--r--forged/internal/bare/marshal.go311
-rw-r--r--forged/internal/bare/package.go8
-rw-r--r--forged/internal/bare/reader.go190
-rw-r--r--forged/internal/bare/unions.go79
-rw-r--r--forged/internal/bare/unmarshal.go362
-rw-r--r--forged/internal/bare/varint.go30
-rw-r--r--forged/internal/bare/writer.go121
-rw-r--r--forged/internal/cmap/comparable_map.go539
-rw-r--r--forged/internal/cmap/map.go446
-rw-r--r--forged/internal/database/database.go25
-rw-r--r--forged/internal/embed/.gitignore7
-rw-r--r--forged/internal/embed/embed.go20
-rw-r--r--forged/internal/git2c/client.go46
-rw-r--r--forged/internal/git2c/cmd_index.go65
-rw-r--r--forged/internal/git2c/cmd_treeraw.go94
-rw-r--r--forged/internal/git2c/git_types.go28
-rw-r--r--forged/internal/git2c/perror.go48
-rw-r--r--forged/internal/humanize/bytes.go35
-rw-r--r--forged/internal/irc/bot.go176
-rw-r--r--forged/internal/irc/conn.go49
-rw-r--r--forged/internal/irc/errors.go8
-rw-r--r--forged/internal/irc/message.go126
-rw-r--r--forged/internal/irc/source.go50
-rw-r--r--forged/internal/misc/back.go11
-rw-r--r--forged/internal/misc/deploy.go22
-rw-r--r--forged/internal/misc/iter.go23
-rw-r--r--forged/internal/misc/misc.go18
-rw-r--r--forged/internal/misc/panic.go19
-rw-r--r--forged/internal/misc/trivial.go48
-rw-r--r--forged/internal/misc/unsafe.go20
-rw-r--r--forged/internal/misc/url.go155
-rw-r--r--forged/internal/oldgit/fmtpatch.go56
-rw-r--r--forged/internal/oldgit/oldgit.go5
-rw-r--r--forged/internal/oldgit/patch.go43
-rw-r--r--forged/internal/render/chroma.go41
-rw-r--r--forged/internal/render/escape.go14
-rw-r--r--forged/internal/render/readme.go34
-rw-r--r--forged/internal/render/render.go5
-rw-r--r--forged/internal/scfg/.golangci.yaml26
-rw-r--r--forged/internal/scfg/reader.go157
-rw-r--r--forged/internal/scfg/scfg.go59
-rw-r--r--forged/internal/scfg/struct.go82
-rw-r--r--forged/internal/scfg/unmarshal.go375
-rw-r--r--forged/internal/scfg/writer.go112
-rw-r--r--forged/internal/unsorted/acl.go59
-rw-r--r--forged/internal/unsorted/config.go94
-rw-r--r--forged/internal/unsorted/database.go43
-rw-r--r--forged/internal/unsorted/fedauth.go97
-rw-r--r--forged/internal/unsorted/git_hooks_handle_linux.go377
-rw-r--r--forged/internal/unsorted/git_hooks_handle_other.go336
-rw-r--r--forged/internal/unsorted/git_init.go34
-rw-r--r--forged/internal/unsorted/git_misc.go95
-rw-r--r--forged/internal/unsorted/git_plumbing.go188
-rw-r--r--forged/internal/unsorted/git_ref.go37
-rw-r--r--forged/internal/unsorted/http_auth.go26
-rw-r--r--forged/internal/unsorted/http_handle_branches.go46
-rw-r--r--forged/internal/unsorted/http_handle_group_index.go196
-rw-r--r--forged/internal/unsorted/http_handle_index.go26
-rw-r--r--forged/internal/unsorted/http_handle_login.go108
-rw-r--r--forged/internal/unsorted/http_handle_repo_commit.go146
-rw-r--r--forged/internal/unsorted/http_handle_repo_contrib_index.go52
-rw-r--r--forged/internal/unsorted/http_handle_repo_contrib_one.go98
-rw-r--r--forged/internal/unsorted/http_handle_repo_index.go41
-rw-r--r--forged/internal/unsorted/http_handle_repo_info.go107
-rw-r--r--forged/internal/unsorted/http_handle_repo_log.go39
-rw-r--r--forged/internal/unsorted/http_handle_repo_raw.go56
-rw-r--r--forged/internal/unsorted/http_handle_repo_tree.go55
-rw-r--r--forged/internal/unsorted/http_handle_repo_upload_pack.go107
-rw-r--r--forged/internal/unsorted/http_handle_users.go15
-rw-r--r--forged/internal/unsorted/http_server.go276
-rw-r--r--forged/internal/unsorted/http_template.go18
-rw-r--r--forged/internal/unsorted/lmtp_handle_patch.go133
-rw-r--r--forged/internal/unsorted/lmtp_server.go204
-rw-r--r--forged/internal/unsorted/remote_url.go25
-rw-r--r--forged/internal/unsorted/resources.go56
-rw-r--r--forged/internal/unsorted/server.go236
-rw-r--r--forged/internal/unsorted/ssh_handle_receive_pack.go131
-rw-r--r--forged/internal/unsorted/ssh_handle_upload_pack.go39
-rw-r--r--forged/internal/unsorted/ssh_server.go96
-rw-r--r--forged/internal/unsorted/ssh_utils.go79
-rw-r--r--forged/internal/unsorted/unsorted.go5
-rw-r--r--forged/internal/unsorted/users.go35
-rw-r--r--forged/internal/unsorted/version.go6
-rw-r--r--forged/internal/web/error_pages.go60
-rw-r--r--forged/internal/web/web.go5
-rw-r--r--forged/main.go27
-rw-r--r--forged/static/.gitignore2
-rw-r--r--forged/static/chroma.css152
-rw-r--r--forged/static/style.css (renamed from static/style.css)317
-rw-r--r--forged/templates/400.tmpl25
-rw-r--r--forged/templates/400_colon.tmpl26
-rw-r--r--forged/templates/403.tmpl25
-rw-r--r--forged/templates/404.tmpl24
-rw-r--r--forged/templates/451.tmpl25
-rw-r--r--forged/templates/500.tmpl25
-rw-r--r--forged/templates/501.tmpl24
-rw-r--r--forged/templates/_footer.tmpl12
-rw-r--r--forged/templates/_group_path.tmpl8
-rw-r--r--forged/templates/_group_view.tmpl56
-rw-r--r--forged/templates/_head.tmpl9
-rw-r--r--forged/templates/_header.tmpl35
-rw-r--r--forged/templates/_ref_query.tmpl3
-rw-r--r--forged/templates/group.tmpl80
-rw-r--r--forged/templates/index.tmpl63
-rw-r--r--forged/templates/login.tmpl59
-rw-r--r--forged/templates/repo_branches.tmpl71
-rw-r--r--forged/templates/repo_commit.tmpl117
-rw-r--r--forged/templates/repo_contrib_index.tmpl82
-rw-r--r--forged/templates/repo_contrib_one.tmpl123
-rw-r--r--forged/templates/repo_index.tmpl94
-rw-r--r--forged/templates/repo_log.tmpl90
-rw-r--r--forged/templates/repo_raw_dir.tmpl88
-rw-r--r--forged/templates/repo_tree_dir.tmpl93
-rw-r--r--forged/templates/repo_tree_file.tmpl65
-rw-r--r--git2d/.gitignore1
-rw-r--r--git2d/bare.c309
-rw-r--r--git2d/bare.h72
-rw-r--r--git2d/cmd1.c129
-rw-r--r--git2d/cmd2.c126
-rw-r--r--git2d/main.c82
-rw-r--r--git2d/rw.c34
-rw-r--r--git2d/session.c78
-rw-r--r--git2d/x.h38
-rw-r--r--global.ha11
-rw-r--r--go.mod50
-rw-r--r--go.sum186
-rw-r--r--hookc/.gitignore1
-rw-r--r--hookc/hookc.c310
-rw-r--r--main.ha65
-rw-r--r--req.ha104
-rwxr-xr-xscripts/update_deps9
-rw-r--r--sql/schema.sql195
-rw-r--r--templates/_footer.htmpl9
-rw-r--r--templates/_head_common.htmpl5
-rw-r--r--templates/_header.htmpl14
-rw-r--r--templates/index.htmpl50
-rw-r--r--url.ha20
-rw-r--r--utils/.gitignore1
-rw-r--r--utils/colb.c28
158 files changed, 12981 insertions, 665 deletions
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..5e44971
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: AGPL-3.0-only
+# SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+root = true
+
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = tab
+indent_size = 8
+tab_size = 8
+
+[*.py]
+indent_style = space
+indent_size = 4
+
+[*.yaml]
+indent_style = space
+indent_size = 2
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..5418c8b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,6 @@
+*.tmpl linguist-language=HTML
+* linguist-detectable
+go.mod -linguist-detectable
+go.sum -linguist-detectable
+.golangci.yaml -linguist-detectable
+.build.yml -linguist-detectable
diff --git a/.gitignore b/.gitignore
index c9ce4a7..95c0847 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
/forge
-/.templates.ha
-/.version.ha
+/source.tar.gz
+*.c.BAK
+*.o
diff --git a/LICENSE.APACHE b/LICENSE.APACHE
new file mode 100644
index 0000000..6b0b127
--- /dev/null
+++ b/LICENSE.APACHE
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/LICENSE.BSD b/LICENSE.BSD
new file mode 100644
index 0000000..d5dfee8
--- /dev/null
+++ b/LICENSE.BSD
@@ -0,0 +1,22 @@
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSE.GPL b/LICENSE.GPL
new file mode 100644
index 0000000..53d1f3d
--- /dev/null
+++ b/LICENSE.GPL
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
+
diff --git a/LICENSE.MIT b/LICENSE.MIT
new file mode 100644
index 0000000..3649823
--- /dev/null
+++ b/LICENSE.MIT
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Makefile b/Makefile
index a36e9de..65d6b07 100644
--- a/Makefile
+++ b/Makefile
@@ -1,10 +1,41 @@
-forge: .version.ha .templates.ha *.ha
- hare build $(HAREFLAGS) -o $@ .
+# SPDX-License-Identifier: AGPL-3.0-only
+# SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+#
+# TODO: This Makefile utilizes a lot of GNU extensions. Some of them are
+# unfortunately difficult to avoid as POSIX Make's pattern rules are not
+# sufficiently expressive. This needs to be fixed sometime (or we might move to
+# some other build system).
+#
-.templates.ha: templates/*.htmpl
- htmplgen -o $@ $^
+.PHONY: clean
-.version.ha:
- printf 'def VERSION="%s";\n' $(shell git describe --tags --always --dirty) > $@
+CFLAGS = -Wall -Wextra -pedantic -std=c99 -D_GNU_SOURCE
-.PHONY: version.ha
+VERSION = $(shell git describe --tags --always --dirty)
+SOURCE_FILES = $(shell git ls-files)
+EMBED = git2d/git2d hookc/hookc source.tar.gz $(wildcard LICENSE*) $(wildcard forged/static/*) $(wildcard forged/templates/*)
+EMBED_ = $(EMBED:%=forged/internal/embed/%)
+
+forge: $(EMBED_) $(SOURCE_FILES)
+ CGO_ENABLED=0 go build -o forge -ldflags '-extldflags "-f no-PIC -static" -X "go.lindenii.runxiyu.org/forge/forged/internal/unsorted.version=$(VERSION)"' -tags 'osusergo netgo static_build' ./forged
+
+utils/colb:
+
+hookc/hookc:
+
+git2d/git2d: $(wildcard git2d/*.c)
+ $(CC) $(CFLAGS) -o git2d/git2d $^ $(shell pkg-config --cflags --libs libgit2) -lpthread
+
+clean:
+ rm -rf forge utils/colb hookc/hookc git2d/git2d source.tar.gz */*.o
+
+source.tar.gz: $(SOURCE_FILES)
+ rm -f source.tar.gz
+ git ls-files -z | xargs -0 tar -czf source.tar.gz
+
+forged/internal/embed/%: %
+ @mkdir -p $(shell dirname $@)
+ @cp $^ $@
+
+forged/internal/embed/.gitignore:
+ @touch $@
diff --git a/NOTES.md b/NOTES.md
deleted file mode 100644
index 98536f6..0000000
--- a/NOTES.md
+++ /dev/null
@@ -1,315 +0,0 @@
-# Lindenii Forge Development Notes
-
-You will need the following dependencies:
-
-- [hare](https://git.sr.ht/~sircmpwn/hare)
-- [hare-http](https://git.sr.ht/~sircmpwn/hare-http) with
- [various patches](https://lists.sr.ht/~sircmpwn/hare-dev/patches?search=from%3Arunxiyu+prefix%3Ahare-http)
-- [hare-htmpl](https://forge.runxiyu.org/hare/:/repos/hare-htmpl/)
- ([backup](https://git.sr.ht/~runxiyu/hare-htmpl))
-
-
-Also, you'll need various horrible patches for `net::uri` before that gets fixed:
-
-```
-diff --git a/net/uri/+test.ha b/net/uri/+test.ha
-index 345f41ee..63272d52 100644
---- a/net/uri/+test.ha
-+++ b/net/uri/+test.ha
-@@ -10,7 +10,7 @@ use net::ip;
- uri {
- scheme = "file",
- host = "",
-- path = "/my/path/to/file",
-+ raw_path = "/my/path/to/file",
- ...
- },
- )!;
-@@ -19,7 +19,7 @@ use net::ip;
- uri {
- scheme = "http",
- host = "harelang.org",
-- path = "/",
-+ raw_path = "/",
- ...
- },
- )!;
-@@ -38,7 +38,7 @@ use net::ip;
- scheme = "ldap",
- host = [13, 37, 73, 31]: ip::addr4,
- port = 1234,
-- path = "/",
-+ raw_path = "/",
- ...
- },
- )!;
-@@ -47,7 +47,7 @@ use net::ip;
- uri {
- scheme = "http",
- host = ip::parse("::1")!,
-- path = "/test",
-+ raw_path = "/test",
- ...
- },
- )!;
-@@ -58,7 +58,7 @@ use net::ip;
- uri {
- scheme = "urn",
- host = "",
-- path = "example:animal:ferret:nose",
-+ raw_path = "example:animal:ferret:nose",
- ...
- },
- )!;
-@@ -67,7 +67,7 @@ use net::ip;
- uri {
- scheme = "mailto",
- host = "",
-- path = "~sircmpwn/hare-dev@lists.sr.ht",
-+ raw_path = "~sircmpwn/hare-dev@lists.sr.ht",
- ...
- },
- )!;
-@@ -76,7 +76,7 @@ use net::ip;
- uri {
- scheme = "http",
- host = "",
-- path = "/foo/bar",
-+ raw_path = "/foo/bar",
- ...
- },
- )!;
-@@ -85,7 +85,7 @@ use net::ip;
- uri {
- scheme = "http",
- host = "",
-- path = "/",
-+ raw_path = "/",
- ...
- },
- )!;
-@@ -94,7 +94,7 @@ use net::ip;
- uri {
- scheme = "https",
- host = "sr.ht",
-- path = "/projects",
-+ raw_path = "/projects",
- query = "search=%23risc-v&sort=longest-active",
- fragment = "foo",
- ...
-@@ -105,7 +105,7 @@ use net::ip;
- uri {
- scheme = "https",
- host = "en.wiktionary.org",
-- path = "/wiki/おはよう",
-+ raw_path = "/wiki/%E3%81%8A%E3%81%AF%E3%82%88%E3%81%86",
- fragment = "Japanese",
- ...
- }
-@@ -135,11 +135,11 @@ use net::ip;
-
- @test fn percent_encoding() void = {
- test_uri(
-- "https://git%2esr.ht/~sircmpw%6e/hare#Build%20status",
-+ "https://git.sr.ht/~sircmpwn/hare#Build%20status",
- uri {
- scheme = "https",
- host = "git.sr.ht",
-- path = "/~sircmpwn/hare",
-+ raw_path = "/~sircmpwn/hare",
- fragment = "Build status",
- ...
- },
-@@ -152,7 +152,7 @@ use net::ip;
- uri {
- scheme = "ldap",
- host = ip::parse("2001:db8::7")!,
-- path = "/c=GB",
-+ raw_path = "/c=GB",
- query = "objectClass?one",
- ...
- },
-@@ -161,11 +161,11 @@ use net::ip;
-
- // https://bugs.chromium.org/p/chromium/issues/detail?id=841105
- test_uri(
-- "https://web-safety.net/..;@www.google.com:%3443",
-+ "https://web-safety.net/..;@www.google.com:443",
- uri {
- scheme = "https",
- host = "web-safety.net",
-- path = "/..;@www.google.com:443",
-+ raw_path = "/..;@www.google.com:443",
- ...
- },
- "https://web-safety.net/..;@www.google.com:443",
-@@ -180,6 +180,7 @@ fn test_uri(in: str, expected_uri: uri, expected_str: str) (void | invalid) = {
- const u = parse(in)?;
- defer finish(&u);
-
-+
- assert_str(u.scheme, expected_uri.scheme);
- match (u.host) {
- case let s: str =>
-@@ -189,7 +190,7 @@ fn test_uri(in: str, expected_uri: uri, expected_str: str) (void | invalid) = {
- };
- assert(u.port == expected_uri.port);
- assert_str(u.userinfo, expected_uri.userinfo);
-- assert_str(u.path, expected_uri.path);
-+ assert_str(u.raw_path, expected_uri.raw_path);
- assert_str(u.query, expected_uri.query);
- assert_str(u.fragment, expected_uri.fragment);
-
-diff --git a/net/uri/fmt.ha b/net/uri/fmt.ha
-index 48a43f24..07cb3f7b 100644
---- a/net/uri/fmt.ha
-+++ b/net/uri/fmt.ha
-@@ -20,9 +20,9 @@ use strings;
- // query = *( pchar / "/" / "?" )
- // fragment = *( pchar / "/" / "?" )
-
--def unres_host: str = "-._~!$&'()*+,;=";
--def unres_query_frag: str = "-._~!$&'()*+,;=:@/?";
--def unres_path: str = "-._~!$&'()*+,;=:@/";
-+export def unres_host: str = "-._~!$&'()*+,;=";
-+export def unres_query_frag: str = "-._~!$&'()*+,;=:@/?";
-+export def unres_path: str = "-._~!$&'()*+,;=:@/";
-
- // Writes a formatted [[uri]] to an [[io::handle]]. Returns the number of bytes
- // written.
-@@ -63,10 +63,10 @@ export fn fmt(out: io::handle, u: *const uri) (size | io::error) = {
- if (u.port != 0) {
- n += fmt::fprintf(out, ":{}", u.port)?;
- };
-- if (has_host && len(u.path) > 0 && !strings::hasprefix(u.path, '/')) {
-+ if (has_host && len(u.raw_path) > 0 && !strings::hasprefix(u.raw_path, '/')) {
- n += fmt::fprint(out, "/")?;
- };
-- n += percent_encode(out, u.path, unres_path)?;
-+ n += memio::concat(out, u.raw_path)?;
- if (len(u.query) > 0) {
- // Always percent-encoded, see parse and encodequery/decodequery
- n += fmt::fprintf(out, "?{}", u.query)?;
-@@ -92,7 +92,7 @@ fn fmtaddr(out: io::handle, addr: ip::addr) (size | io::error) = {
- return n;
- };
-
--fn percent_encode(out: io::handle, src: str, allowed: str) (size | io::error) = {
-+export fn percent_encode(out: io::handle, src: str, allowed: str) (size | io::error) = {
- let iter = strings::iter(src);
- let n = 0z;
- for (let r => strings::next(&iter)) {
-diff --git a/net/uri/parse.ha b/net/uri/parse.ha
-index f2522c01..e108bd75 100644
---- a/net/uri/parse.ha
-+++ b/net/uri/parse.ha
-@@ -22,10 +22,10 @@ export fn parse(in: str) (uri | invalid) = {
- defer if (!success) free(scheme);
-
- // Determine hier-part variant
-- let path = "";
-+ let raw_path = "";
- let authority: ((str | ip::addr6), u16, str) = ("", 0u16, "");
- defer if (!success) {
-- free(path);
-+ free(raw_path);
- free_host(authority.0);
- free(authority.2);
- };
-@@ -50,7 +50,7 @@ export fn parse(in: str) (uri | invalid) = {
- case '/' =>
- // path-absolute
- strings::prev(&in);
-- path = parse_path(&in,
-+ raw_path = parse_path(&in,
- path_mode::ABSOLUTE)?;
- case =>
- return invalid;
-@@ -61,17 +61,17 @@ export fn parse(in: str) (uri | invalid) = {
- // path-absolute
- strings::prev(&in); // return current token
- strings::prev(&in); // return leading slash
-- path = parse_path(&in, path_mode::ABSOLUTE)?;
-+ raw_path = parse_path(&in, path_mode::ABSOLUTE)?;
- };
- case =>
- // path-absolute (just '/')
- strings::prev(&in); // return leading slash
-- path = parse_path(&in, path_mode::ABSOLUTE)?;
-+ raw_path = parse_path(&in, path_mode::ABSOLUTE)?;
- };
- case =>
- // path-rootless
- strings::prev(&in);
-- path = parse_path(&in, path_mode::ROOTLESS)?;
-+ raw_path = parse_path(&in, path_mode::ROOTLESS)?;
- };
- case => void; // path-empty
- };
-@@ -118,7 +118,7 @@ export fn parse(in: str) (uri | invalid) = {
- port = authority.1,
- userinfo = authority.2,
-
-- path = path,
-+ raw_path = raw_path,
- query = query,
- fragment = fragment,
- };
-@@ -274,7 +274,7 @@ fn parse_path(in: *strings::iterator, mode: path_mode) (str | invalid) = {
- };
- };
-
-- return percent_decode(strings::slice(&copy, in));
-+ return strings::dup(strings::slice(&copy, in))!;
- };
-
- fn parse_query(in: *strings::iterator) (str | invalid) = {
-@@ -323,13 +323,14 @@ fn parse_port(in: *strings::iterator) (u16 | invalid) = {
- };
- };
-
--fn percent_decode(s: str) (str | invalid) = {
-+// must be freed by caller
-+export fn percent_decode(s: str) (str | invalid) = {
- let buf = memio::dynamic();
- percent_decode_static(&buf, s)?;
- return memio::string(&buf)!;
- };
-
--fn percent_decode_static(out: io::handle, s: str) (void | invalid) = {
-+export fn percent_decode_static(out: io::handle, s: str) (void | invalid) = {
- let iter = strings::iter(s);
- let tmp = memio::dynamic();
- defer io::close(&tmp)!;
-diff --git a/net/uri/uri.ha b/net/uri/uri.ha
-index 623ffafb..3b7b7c4c 100644
---- a/net/uri/uri.ha
-+++ b/net/uri/uri.ha
-@@ -12,7 +12,7 @@ export type uri = struct {
- port: u16,
- userinfo: str,
-
-- path: str,
-+ raw_path: str,
- query: str,
- fragment: str,
- };
-@@ -31,7 +31,7 @@ export fn dup(u: *uri) uri = {
- port = u.port,
- userinfo = strings::dup(u.userinfo)!,
-
-- path = strings::dup(u.path)!,
-+ raw_path = strings::dup(u.raw_path)!,
- query = strings::dup(u.query)!,
- fragment = strings::dup(u.fragment)!,
- };
-@@ -46,7 +46,7 @@ export fn finish(u: *uri) void = {
- case => void;
- };
- free(u.userinfo);
-- free(u.path);
-+ free(u.raw_path);
- free(u.query);
- free(u.fragment);
- };
-```
diff --git a/README.md b/README.md
index e5493e2..94442dd 100644
--- a/README.md
+++ b/README.md
@@ -2,13 +2,57 @@
**Work in progress.**
-This is the new implementation in the [Hare](https://harelang.org) programming
-language.
+Lindenii Forge aims to be an uncomplicated yet featured software forge,
+primarily designed for self-hosting by small organizations and individuals.
-## Architecture
+* [Upstream source repository](https://forge.lindenii.runxiyu.org/forge/-/repos/server/)
+ ([backup](https://git.lindenii.runxiyu.org/forge.git/))
+* [Website and documentation](https://lindenii.runxiyu.org/forge/)
+* [Temporary issue tracker](https://todo.sr.ht/~runxiyu/forge)
+* IRC [`#lindenii`](https://webirc.runxiyu.org/kiwiirc/#lindenii)
+ on [irc.runxiyu.org](https://irc.runxiyu.org)\
+ and [`#lindenii`](https://web.libera.chat/#lindenii)
+ on [Libera.Chat](https://libera.chat)
-* Most components are one single daemon written in Hare.
-* Because libssh is difficult to use and there aren't many other SSH server
- libraries for C or Hare, we will temporarily use
- [the gliberlabs SSH library for Go](https://github.com/gliderlabs/ssh)
- in a separate process, and communicate via UNIX domain sockets.
+## Implemented features
+
+* Umambiguously parsable URL
+* Groups and subgroups
+* Repo hosting
+* Push to `contrib/` branches to automatically create merge requests
+* Basic federated authentication
+* Converting mailed patches to branches
+
+## Planned features
+
+* Further Integration with mailing list workflows
+* Ticket trackers and discussions
+ * Web interface
+ * Email integration with IMAP archives
+* SSH API
+* Email access
+
+## License
+
+We are currently using the
+[GNU Affero General Public License version 3](https://www.gnu.org/licenses/agpl-3.0.html).
+
+The forge software serves its own source at `/-/source/`.
+
+## Contribute
+
+Please submit patches by pushing to `contrib/...` in the official repo.
+
+Alternatively, send email to
+[`forge/-/repos/server@forge.lindenii.runxiyu.org`](mailto:forge%2F-%2Frepos%2Fserver@forge.lindenii.runxiyu.org).
+Note that emailing patches is still experimental.
+
+## Mirrors
+
+We have several repo mirrors:
+
+* [Official repo on our own instance of Lindenii Forge](https://forge.lindenii.runxiyu.org/forge/-/repos/server/)
+* [The Lindenii Project's backup cgit](https://git.lindenii.runxiyu.org/forge.git/)
+* [SourceHut](https://git.sr.ht/~runxiyu/forge/)
+* [Codeberg](https://codeberg.org/lindenii/forge/)
+* [GitHub](https://github.com/runxiyu/forge/)
diff --git a/forge.scfg b/forge.scfg
new file mode 100644
index 0000000..1c8eeb9
--- /dev/null
+++ b/forge.scfg
@@ -0,0 +1,108 @@
+http {
+ # What network transport should we listen on?
+ # Examples: tcp tcp4 tcp6 unix
+ net tcp
+
+ # What address to listen on?
+ # Examples for net tcp*: 127.0.0.1:8080 :80
+ # Example for unix: /var/run/lindenii/forge/http.sock
+ addr :8080
+
+ # How many seconds should cookies be remembered before they are purged?
+ cookie_expiry 604800
+
+ # What is the canonical URL of the web root?
+ root https://forge.example.org
+
+ # General HTTP server context timeout settings. It's recommended to
+ # set them slightly higher than usual as Git operations over large
+ # repos may take a long time.
+ read_timeout 120
+ write_timeout 1800
+ idle_timeout 120
+
+ # Are we running behind a reverse proxy? If so, we will trust
+ # X-Forwarded-For headers.
+ reverse_proxy true
+}
+
+irc {
+ tls true
+ net tcp
+ addr irc.runxiyu.org:6697
+ sendq 6000
+ nick forge-test
+ user forge
+ gecos "Lindenii Forge Test"
+}
+
+git {
+ # Where should newly-created Git repositories be stored?
+ repo_dir /var/lib/lindenii/forge/repos
+
+ # Where should git2d listen on?
+ socket /var/run/lindenii/forge/git2d.sock
+
+ # Where should we put git2d?
+ daemon_path /usr/libexec/lindenii/forge/git2d
+}
+
+ssh {
+ # What network transport should we listen on?
+ # This should be "tcp" in almost all cases.
+ net tcp
+
+ # What address to listen on?
+ addr :22
+
+ # What is the path to the SSH host key? Generate it with ssh-keygen.
+ # The key must have an empty password.
+ key /etc/lindenii/ssh_host_ed25519_key
+
+ # What is the canonical SSH URL?
+ root ssh://forge.example.org
+}
+
+general {
+ title "Test Forge"
+}
+
+db {
+ # What type of database are we connecting to?
+ # Currently only "postgres" is supported.
+ type postgres
+
+ # What is the connection string?
+ conn postgresql:///lindenii-forge?host=/var/run/postgresql
+}
+
+hooks {
+ # On which UNIX domain socket should we listen for hook callbacks on?
+ socket /var/run/lindenii/forge/hooks.sock
+
+ # Where should hook executables be put?
+ execs /usr/libexec/lindenii/forge/hooks
+}
+
+lmtp {
+ # On which UNIX domain socket should we listen for LMTP on?
+ socket /var/run/lindenii/forge/lmtp.sock
+
+ # What's the maximum acceptable message size?
+ max_size 1000000
+
+ # What is our domainpart?
+ domain forge.example.org
+
+ # General timeouts
+ read_timeout 300
+ write_timeout 300
+}
+
+pprof {
+ # What network to listen on for pprof?
+ net tcp
+
+ # What address to listen on?
+ addr localhost:28471
+}
diff --git a/forged/.golangci.yaml b/forged/.golangci.yaml
new file mode 100644
index 0000000..1c8c972
--- /dev/null
+++ b/forged/.golangci.yaml
@@ -0,0 +1,42 @@
+linters:
+ enable-all: true
+ disable:
+ - tenv
+ - depguard
+ - err113 # dynamically defined errors are fine for our purposes
+ - forcetypeassert # type assertion failures are usually programming errors
+ - gochecknoinits # we use inits sparingly for good reasons
+ - godox # they're just used as markers for where needs improvements
+ - ireturn # doesn't work well with how we use generics
+ - lll # long lines are acceptable
+ - mnd # it's a bit ridiculous to replace all of them
+ - nakedret # patterns should be consistent
+ - nonamedreturns # i like named returns
+ - wrapcheck # wrapping all errors is just not necessary
+ - varnamelen # "from" and "to" are very valid
+ - stylecheck
+ - containedctx
+ - godot
+ - dogsled
+ - maintidx # e
+ - nestif # e
+ - gocognit # e
+ - gocyclo # e
+ - dupl # e
+ - cyclop # e
+ - goconst # e
+ - funlen # e
+ - wsl # e
+ - nlreturn # e
+ - unused # e
+ - exhaustruct # e
+
+linters-settings:
+ revive:
+ rules:
+ - name: error-strings
+ disabled: true
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
diff --git a/forged/internal/ansiec/ansiec.go b/forged/internal/ansiec/ansiec.go
new file mode 100644
index 0000000..542c564
--- /dev/null
+++ b/forged/internal/ansiec/ansiec.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package ansiec provides definitions for ANSI escape sequences.
+package ansiec
diff --git a/forged/internal/ansiec/colors.go b/forged/internal/ansiec/colors.go
new file mode 100644
index 0000000..8e5f54b
--- /dev/null
+++ b/forged/internal/ansiec/colors.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package ansiec
+
+const (
+ Black = "\x1b[30m"
+ Red = "\x1b[31m"
+ Green = "\x1b[32m"
+ Yellow = "\x1b[33m"
+ Blue = "\x1b[34m"
+ Magenta = "\x1b[35m"
+ Cyan = "\x1b[36m"
+ White = "\x1b[37m"
+)
+
+const (
+ BrightBlack = "\x1b[30;1m"
+ BrightRed = "\x1b[31;1m"
+ BrightGreen = "\x1b[32;1m"
+ BrightYellow = "\x1b[33;1m"
+ BrightBlue = "\x1b[34;1m"
+ BrightMagenta = "\x1b[35;1m"
+ BrightCyan = "\x1b[36;1m"
+ BrightWhite = "\x1b[37;1m"
+)
diff --git a/forged/internal/ansiec/reset.go b/forged/internal/ansiec/reset.go
new file mode 100644
index 0000000..c5b6ba6
--- /dev/null
+++ b/forged/internal/ansiec/reset.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package ansiec
+
+const Reset = "\x1b[0m"
diff --git a/forged/internal/ansiec/style.go b/forged/internal/ansiec/style.go
new file mode 100644
index 0000000..dd37344
--- /dev/null
+++ b/forged/internal/ansiec/style.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package ansiec
+
+const (
+ Bold = "\x1b[1m"
+ Underline = "\x1b[4m"
+ Reversed = "\x1b[7m"
+ Italic = "\x1b[3m"
+)
diff --git a/forged/internal/argon2id/argon2id.go b/forged/internal/argon2id/argon2id.go
new file mode 100644
index 0000000..88df8f6
--- /dev/null
+++ b/forged/internal/argon2id/argon2id.go
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2018 Alex Edwards
+
+// Package argon2id provides a wrapper around Go's golang.org/x/crypto/argon2.
+package argon2id
+
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "golang.org/x/crypto/argon2"
+)
+
+var (
+ // ErrInvalidHash in returned by ComparePasswordAndHash if the provided
+ // hash isn't in the expected format.
+ ErrInvalidHash = errors.New("argon2id: hash is not in the correct format")
+
+ // ErrIncompatibleVariant is returned by ComparePasswordAndHash if the
+ // provided hash was created using a unsupported variant of Argon2.
+ // Currently only argon2id is supported by this package.
+ ErrIncompatibleVariant = errors.New("argon2id: incompatible variant of argon2")
+
+ // ErrIncompatibleVersion is returned by ComparePasswordAndHash if the
+ // provided hash was created using a different version of Argon2.
+ ErrIncompatibleVersion = errors.New("argon2id: incompatible version of argon2")
+)
+
+// DefaultParams provides some sane default parameters for hashing passwords.
+//
+// Follows recommendations given by the Argon2 RFC:
+// "The Argon2id variant with t=1 and maximum available memory is RECOMMENDED as a
+// default setting for all environments. This setting is secure against side-channel
+// attacks and maximizes adversarial costs on dedicated bruteforce hardware.""
+//
+// The default parameters should generally be used for development/testing purposes
+// only. Custom parameters should be set for production applications depending on
+// available memory/CPU resources and business requirements.
+var DefaultParams = &Params{
+ Memory: 64 * 1024,
+ Iterations: 1,
+ Parallelism: uint8(runtime.NumCPU()),
+ SaltLength: 16,
+ KeyLength: 32,
+}
+
+// Params describes the input parameters used by the Argon2id algorithm. The
+// Memory and Iterations parameters control the computational cost of hashing
+// the password. The higher these figures are, the greater the cost of generating
+// the hash and the longer the runtime. It also follows that the greater the cost
+// will be for any attacker trying to guess the password. If the code is running
+// on a machine with multiple cores, then you can decrease the runtime without
+// reducing the cost by increasing the Parallelism parameter. This controls the
+// number of threads that the work is spread across. Important note: Changing the
+// value of the Parallelism parameter changes the hash output.
+//
+// For guidance and an outline process for choosing appropriate parameters see
+// https://tools.ietf.org/html/draft-irtf-cfrg-argon2-04#section-4
+type Params struct {
+ // The amount of memory used by the algorithm (in kibibytes).
+ Memory uint32
+
+ // The number of iterations over the memory.
+ Iterations uint32
+
+ // The number of threads (or lanes) used by the algorithm.
+ // Recommended value is between 1 and runtime.NumCPU().
+ Parallelism uint8
+
+ // Length of the random salt. 16 bytes is recommended for password hashing.
+ SaltLength uint32
+
+ // Length of the generated key. 16 bytes or more is recommended.
+ KeyLength uint32
+}
+
+// CreateHash returns an Argon2id hash of a plain-text password using the
+// provided algorithm parameters. The returned hash follows the format used by
+// the Argon2 reference C implementation and contains the base64-encoded Argon2id d
+// derived key prefixed by the salt and parameters. It looks like this:
+//
+// $argon2id$v=19$m=65536,t=3,p=2$c29tZXNhbHQ$RdescudvJCsgt3ub+b+dWRWJTmaaJObG
+func CreateHash(password string, params *Params) (hash string, err error) {
+ salt, err := generateRandomBytes(params.SaltLength)
+ if err != nil {
+ return "", err
+ }
+
+ key := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength)
+
+ b64Salt := base64.RawStdEncoding.EncodeToString(salt)
+ b64Key := base64.RawStdEncoding.EncodeToString(key)
+
+ hash = fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, params.Memory, params.Iterations, params.Parallelism, b64Salt, b64Key)
+ return hash, nil
+}
+
+// ComparePasswordAndHash performs a constant-time comparison between a
+// plain-text password and Argon2id hash, using the parameters and salt
+// contained in the hash. It returns true if they match, otherwise it returns
+// false.
+func ComparePasswordAndHash(password, hash string) (match bool, err error) {
+ match, _, err = CheckHash(password, hash)
+ return match, err
+}
+
+// CheckHash is like ComparePasswordAndHash, except it also returns the params that the hash was
+// created with. This can be useful if you want to update your hash params over time (which you
+// should).
+func CheckHash(password, hash string) (match bool, params *Params, err error) {
+ params, salt, key, err := DecodeHash(hash)
+ if err != nil {
+ return false, nil, err
+ }
+
+ otherKey := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength)
+
+ keyLen := int32(len(key))
+ otherKeyLen := int32(len(otherKey))
+
+ if subtle.ConstantTimeEq(keyLen, otherKeyLen) == 0 {
+ return false, params, nil
+ }
+ if subtle.ConstantTimeCompare(key, otherKey) == 1 {
+ return true, params, nil
+ }
+ return false, params, nil
+}
+
+func generateRandomBytes(n uint32) ([]byte, error) {
+ b := make([]byte, n)
+ _, err := rand.Read(b)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+// DecodeHash expects a hash created from this package, and parses it to return the params used to
+// create it, as well as the salt and key (password hash).
+func DecodeHash(hash string) (params *Params, salt, key []byte, err error) {
+ vals := strings.Split(hash, "$")
+ if len(vals) != 6 {
+ return nil, nil, nil, ErrInvalidHash
+ }
+
+ if vals[1] != "argon2id" {
+ return nil, nil, nil, ErrIncompatibleVariant
+ }
+
+ var version int
+ _, err = fmt.Sscanf(vals[2], "v=%d", &version)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if version != argon2.Version {
+ return nil, nil, nil, ErrIncompatibleVersion
+ }
+
+ params = &Params{}
+ _, err = fmt.Sscanf(vals[3], "m=%d,t=%d,p=%d", &params.Memory, &params.Iterations, &params.Parallelism)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ salt, err = base64.RawStdEncoding.Strict().DecodeString(vals[4])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ params.SaltLength = uint32(len(salt))
+
+ key, err = base64.RawStdEncoding.Strict().DecodeString(vals[5])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ params.KeyLength = uint32(len(key))
+
+ return params, salt, key, nil
+}
diff --git a/forged/internal/bare/errors.go b/forged/internal/bare/errors.go
new file mode 100644
index 0000000..39c951a
--- /dev/null
+++ b/forged/internal/bare/errors.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+var ErrInvalidStr = errors.New("String contains invalid UTF-8 sequences")
+
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return fmt.Sprintf("Unsupported type for marshaling: %s\n", e.Type.String())
+}
diff --git a/forged/internal/bare/limit.go b/forged/internal/bare/limit.go
new file mode 100644
index 0000000..212bc05
--- /dev/null
+++ b/forged/internal/bare/limit.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "errors"
+ "io"
+)
+
+var (
+ maxUnmarshalBytes uint64 = 1024 * 1024 * 32 /* 32 MiB */
+ maxArrayLength uint64 = 1024 * 4 /* 4096 elements */
+ maxMapSize uint64 = 1024
+)
+
+// MaxUnmarshalBytes sets the maximum size of a message decoded by unmarshal.
+// By default, this is set to 32 MiB.
+func MaxUnmarshalBytes(bytes uint64) {
+ maxUnmarshalBytes = bytes
+}
+
+// MaxArrayLength sets maximum number of elements in array. Defaults to 4096 elements
+func MaxArrayLength(length uint64) {
+ maxArrayLength = length
+}
+
+// MaxMapSize sets maximum size of map. Defaults to 1024 key/value pairs
+func MaxMapSize(size uint64) {
+ maxMapSize = size
+}
+
+// Use MaxUnmarshalBytes to prevent this error from occuring on messages which
+// are large by design.
+var ErrLimitExceeded = errors.New("Maximum message size exceeded")
+
+// Identical to io.LimitedReader, except it returns our custom error instead of
+// EOF if the limit is reached.
+type limitedReader struct {
+ R io.Reader
+ N uint64
+}
+
+func (l *limitedReader) Read(p []byte) (n int, err error) {
+ if l.N <= 0 {
+ return 0, ErrLimitExceeded
+ }
+ if uint64(len(p)) > l.N {
+ p = p[0:l.N]
+ }
+ n, err = l.R.Read(p)
+ l.N -= uint64(n)
+ return
+}
+
+func newLimitedReader(r io.Reader) *limitedReader {
+ return &limitedReader{r, maxUnmarshalBytes}
+}
diff --git a/forged/internal/bare/marshal.go b/forged/internal/bare/marshal.go
new file mode 100644
index 0000000..1ce942d
--- /dev/null
+++ b/forged/internal/bare/marshal.go
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// A type which implements this interface will be responsible for marshaling
+// itself when encountered.
+type Marshalable interface {
+ Marshal(w *Writer) error
+}
+
+var encoderBufferPool = sync.Pool{
+ New: func() interface{} {
+ buf := &bytes.Buffer{}
+ buf.Grow(32)
+ return buf
+ },
+}
+
+// Marshals a value (val, which must be a pointer) into a BARE message.
+//
+// The encoding of each struct field can be customized by the format string
+// stored under the "bare" key in the struct field's tag.
+//
+// As a special case, if the field tag is "-", the field is always omitted.
+func Marshal(val interface{}) ([]byte, error) {
+ // reuse buffers from previous serializations
+ b := encoderBufferPool.Get().(*bytes.Buffer)
+ defer func() {
+ b.Reset()
+ encoderBufferPool.Put(b)
+ }()
+
+ w := NewWriter(b)
+ err := MarshalWriter(w, val)
+
+ msg := make([]byte, b.Len())
+ copy(msg, b.Bytes())
+
+ return msg, err
+}
+
+// Marshals a value (val, which must be a pointer) into a BARE message and
+// writes it to a Writer. See Marshal for details.
+func MarshalWriter(w *Writer, val interface{}) error {
+ t := reflect.TypeOf(val)
+ v := reflect.ValueOf(val)
+ if t.Kind() != reflect.Ptr {
+ return errors.New("Expected val to be pointer type")
+ }
+
+ return getEncoder(t.Elem())(w, v.Elem())
+}
+
+type encodeFunc func(w *Writer, v reflect.Value) error
+
+var encodeFuncCache sync.Map // map[reflect.Type]encodeFunc
+
+// get decoder from cache
+func getEncoder(t reflect.Type) encodeFunc {
+ if f, ok := encodeFuncCache.Load(t); ok {
+ return f.(encodeFunc)
+ }
+
+ f := encoderFunc(t)
+ encodeFuncCache.Store(t, f)
+ return f
+}
+
+var marshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem()
+
+func encoderFunc(t reflect.Type) encodeFunc {
+ if reflect.PointerTo(t).Implements(marshalableInterface) {
+ return func(w *Writer, v reflect.Value) error {
+ uv := v.Addr().Interface().(Marshalable)
+ return uv.Marshal(w)
+ }
+ }
+
+ if t.Kind() == reflect.Interface && t.Implements(unionInterface) {
+ return encodeUnion(t)
+ }
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ return encodeOptional(t.Elem())
+ case reflect.Struct:
+ return encodeStruct(t)
+ case reflect.Array:
+ return encodeArray(t)
+ case reflect.Slice:
+ return encodeSlice(t)
+ case reflect.Map:
+ return encodeMap(t)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return encodeUint
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return encodeInt
+ case reflect.Float32, reflect.Float64:
+ return encodeFloat
+ case reflect.Bool:
+ return encodeBool
+ case reflect.String:
+ return encodeString
+ }
+
+ return func(w *Writer, v reflect.Value) error {
+ return &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func encodeOptional(t reflect.Type) encodeFunc {
+ return func(w *Writer, v reflect.Value) error {
+ if v.IsNil() {
+ return w.WriteBool(false)
+ }
+
+ if err := w.WriteBool(true); err != nil {
+ return err
+ }
+
+ return getEncoder(t)(w, v.Elem())
+ }
+}
+
+func encodeStruct(t reflect.Type) encodeFunc {
+ n := t.NumField()
+ encoders := make([]encodeFunc, n)
+ for i := 0; i < n; i++ {
+ field := t.Field(i)
+ if field.Tag.Get("bare") == "-" {
+ continue
+ }
+ encoders[i] = getEncoder(field.Type)
+ }
+
+ return func(w *Writer, v reflect.Value) error {
+ for i := 0; i < n; i++ {
+ if encoders[i] == nil {
+ continue
+ }
+ err := encoders[i](w, v.Field(i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func encodeArray(t reflect.Type) encodeFunc {
+ f := getEncoder(t.Elem())
+ len := t.Len()
+
+ return func(w *Writer, v reflect.Value) error {
+ for i := 0; i < len; i++ {
+ if err := f(w, v.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func encodeSlice(t reflect.Type) encodeFunc {
+ elem := t.Elem()
+ f := getEncoder(elem)
+
+ return func(w *Writer, v reflect.Value) error {
+ if err := w.WriteUint(uint64(v.Len())); err != nil {
+ return err
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ if err := f(w, v.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func encodeMap(t reflect.Type) encodeFunc {
+ keyType := t.Key()
+ keyf := getEncoder(keyType)
+
+ valueType := t.Elem()
+ valf := getEncoder(valueType)
+
+ return func(w *Writer, v reflect.Value) error {
+ if err := w.WriteUint(uint64(v.Len())); err != nil {
+ return err
+ }
+
+ iter := v.MapRange()
+ for iter.Next() {
+ if err := keyf(w, iter.Key()); err != nil {
+ return err
+ }
+ if err := valf(w, iter.Value()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func encodeUnion(t reflect.Type) encodeFunc {
+ ut, ok := unionRegistry[t]
+ if !ok {
+ return func(w *Writer, v reflect.Value) error {
+ return fmt.Errorf("Union type %s is not registered", t.Name())
+ }
+ }
+
+ encoders := make(map[uint64]encodeFunc)
+ for tag, t := range ut.types {
+ encoders[tag] = getEncoder(t)
+ }
+
+ return func(w *Writer, v reflect.Value) error {
+ t := v.Elem().Type()
+ if t.Kind() == reflect.Ptr {
+ // If T is a valid union value type, *T is valid too.
+ t = t.Elem()
+ v = v.Elem()
+ }
+ tag, ok := ut.tags[t]
+ if !ok {
+ return fmt.Errorf("Invalid union value: %s", v.Elem().String())
+ }
+
+ if err := w.WriteUint(tag); err != nil {
+ return err
+ }
+
+ return encoders[tag](w, v.Elem())
+ }
+}
+
+func encodeUint(w *Writer, v reflect.Value) error {
+ switch getIntKind(v.Type()) {
+ case reflect.Uint:
+ return w.WriteUint(v.Uint())
+
+ case reflect.Uint8:
+ return w.WriteU8(uint8(v.Uint()))
+
+ case reflect.Uint16:
+ return w.WriteU16(uint16(v.Uint()))
+
+ case reflect.Uint32:
+ return w.WriteU32(uint32(v.Uint()))
+
+ case reflect.Uint64:
+ return w.WriteU64(uint64(v.Uint()))
+ }
+
+ panic("not uint")
+}
+
+func encodeInt(w *Writer, v reflect.Value) error {
+ switch getIntKind(v.Type()) {
+ case reflect.Int:
+ return w.WriteInt(v.Int())
+
+ case reflect.Int8:
+ return w.WriteI8(int8(v.Int()))
+
+ case reflect.Int16:
+ return w.WriteI16(int16(v.Int()))
+
+ case reflect.Int32:
+ return w.WriteI32(int32(v.Int()))
+
+ case reflect.Int64:
+ return w.WriteI64(int64(v.Int()))
+ }
+
+ panic("not int")
+}
+
+func encodeFloat(w *Writer, v reflect.Value) error {
+ switch v.Type().Kind() {
+ case reflect.Float32:
+ return w.WriteF32(float32(v.Float()))
+ case reflect.Float64:
+ return w.WriteF64(v.Float())
+ }
+
+ panic("not float")
+}
+
+func encodeBool(w *Writer, v reflect.Value) error {
+ return w.WriteBool(v.Bool())
+}
+
+func encodeString(w *Writer, v reflect.Value) error {
+ if v.Kind() != reflect.String {
+ panic("not string")
+ }
+ return w.WriteString(v.String())
+}
diff --git a/forged/internal/bare/package.go b/forged/internal/bare/package.go
new file mode 100644
index 0000000..2f12f55
--- /dev/null
+++ b/forged/internal/bare/package.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package bare provides primitives to encode and decode BARE messages.
+//
+// There is no guarantee that this is compatible with the upstream
+// implementation at https://git.sr.ht/~sircmpwn/go-bare.
+package bare
diff --git a/forged/internal/bare/reader.go b/forged/internal/bare/reader.go
new file mode 100644
index 0000000..58325e3
--- /dev/null
+++ b/forged/internal/bare/reader.go
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "unicode/utf8"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// A Reader for BARE primitive types.
+type Reader struct {
+ base byteReader
+ scratch [8]byte
+}
+
+type simpleByteReader struct {
+ io.Reader
+ scratch [1]byte
+}
+
+func (r simpleByteReader) ReadByte() (byte, error) {
+ // using reference type here saves us allocations
+ _, err := r.Read(r.scratch[:])
+ return r.scratch[0], err
+}
+
+// Returns a new BARE primitive reader wrapping the given io.Reader.
+func NewReader(base io.Reader) *Reader {
+ br, ok := base.(byteReader)
+ if !ok {
+ br = simpleByteReader{Reader: base}
+ }
+ return &Reader{base: br}
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ x, err := binary.ReadUvarint(r.base)
+ if err != nil {
+ return x, err
+ }
+ return x, nil
+}
+
+func (r *Reader) ReadU8() (uint8, error) {
+ return r.base.ReadByte()
+}
+
+func (r *Reader) ReadU16() (uint16, error) {
+ var i uint16
+ if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil {
+ return i, err
+ }
+ return binary.LittleEndian.Uint16(r.scratch[:]), nil
+}
+
+func (r *Reader) ReadU32() (uint32, error) {
+ var i uint32
+ if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil {
+ return i, err
+ }
+ return binary.LittleEndian.Uint32(r.scratch[:]), nil
+}
+
+func (r *Reader) ReadU64() (uint64, error) {
+ var i uint64
+ if _, err := io.ReadAtLeast(r.base, r.scratch[:8], 8); err != nil {
+ return i, err
+ }
+ return binary.LittleEndian.Uint64(r.scratch[:]), nil
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+ return binary.ReadVarint(r.base)
+}
+
+func (r *Reader) ReadI8() (int8, error) {
+ b, err := r.base.ReadByte()
+ return int8(b), err
+}
+
+func (r *Reader) ReadI16() (int16, error) {
+ var i int16
+ if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil {
+ return i, err
+ }
+ return int16(binary.LittleEndian.Uint16(r.scratch[:])), nil
+}
+
+func (r *Reader) ReadI32() (int32, error) {
+ var i int32
+ if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil {
+ return i, err
+ }
+ return int32(binary.LittleEndian.Uint32(r.scratch[:])), nil
+}
+
+func (r *Reader) ReadI64() (int64, error) {
+ var i int64
+ if _, err := io.ReadAtLeast(r.base, r.scratch[:], 8); err != nil {
+ return i, err
+ }
+ return int64(binary.LittleEndian.Uint64(r.scratch[:])), nil
+}
+
+func (r *Reader) ReadF32() (float32, error) {
+ u, err := r.ReadU32()
+ f := math.Float32frombits(u)
+ if math.IsNaN(float64(f)) {
+ return 0.0, fmt.Errorf("NaN is not permitted in BARE floats")
+ }
+ return f, err
+}
+
+func (r *Reader) ReadF64() (float64, error) {
+ u, err := r.ReadU64()
+ f := math.Float64frombits(u)
+ if math.IsNaN(f) {
+ return 0.0, fmt.Errorf("NaN is not permitted in BARE floats")
+ }
+ return f, err
+}
+
+func (r *Reader) ReadBool() (bool, error) {
+ b, err := r.ReadU8()
+ if err != nil {
+ return false, err
+ }
+
+ if b > 1 {
+ return false, fmt.Errorf("Invalid bool value: %#x", b)
+ }
+
+ return b == 1, nil
+}
+
+func (r *Reader) ReadString() (string, error) {
+ buf, err := r.ReadData()
+ if err != nil {
+ return "", err
+ }
+ if !utf8.Valid(buf) {
+ return "", ErrInvalidStr
+ }
+ return misc.BytesToString(buf), nil
+}
+
+// Reads a fixed amount of arbitrary data, defined by the length of the slice.
+func (r *Reader) ReadDataFixed(dest []byte) error {
+ var amt int = 0
+ for amt < len(dest) {
+ n, err := r.base.Read(dest[amt:])
+ if err != nil {
+ return err
+ }
+ amt += n
+ }
+ return nil
+}
+
+// Reads arbitrary data whose length is read from the message.
+func (r *Reader) ReadData() ([]byte, error) {
+ l, err := r.ReadUint()
+ if err != nil {
+ return nil, err
+ }
+ if l >= maxUnmarshalBytes {
+ return nil, ErrLimitExceeded
+ }
+ buf := make([]byte, l)
+ var amt uint64 = 0
+ for amt < l {
+ n, err := r.base.Read(buf[amt:])
+ if err != nil {
+ return nil, err
+ }
+ amt += uint64(n)
+ }
+ return buf, nil
+}
diff --git a/forged/internal/bare/unions.go b/forged/internal/bare/unions.go
new file mode 100644
index 0000000..0270a5f
--- /dev/null
+++ b/forged/internal/bare/unions.go
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Any type which is a union member must implement this interface. You must
+// also call RegisterUnion for go-bare to marshal or unmarshal messages which
+// utilize your union type.
+type Union interface {
+ IsUnion()
+}
+
+type UnionTags struct {
+ iface reflect.Type
+ tags map[reflect.Type]uint64
+ types map[uint64]reflect.Type
+}
+
+var unionInterface = reflect.TypeOf((*Union)(nil)).Elem()
+var unionRegistry map[reflect.Type]*UnionTags
+
+func init() {
+ unionRegistry = make(map[reflect.Type]*UnionTags)
+}
+
+// Registers a union type in this context. Pass the union interface and the
+// list of types associated with it, sorted ascending by their union tag.
+func RegisterUnion(iface interface{}) *UnionTags {
+ ity := reflect.TypeOf(iface).Elem()
+ if _, ok := unionRegistry[ity]; ok {
+ panic(fmt.Errorf("Type %s has already been registered", ity.Name()))
+ }
+
+ if !ity.Implements(reflect.TypeOf((*Union)(nil)).Elem()) {
+ panic(fmt.Errorf("Type %s does not implement bare.Union", ity.Name()))
+ }
+
+ utypes := &UnionTags{
+ iface: ity,
+ tags: make(map[reflect.Type]uint64),
+ types: make(map[uint64]reflect.Type),
+ }
+ unionRegistry[ity] = utypes
+ return utypes
+}
+
+func (ut *UnionTags) Member(t interface{}, tag uint64) *UnionTags {
+ ty := reflect.TypeOf(t)
+ if !ty.AssignableTo(ut.iface) {
+ panic(fmt.Errorf("Type %s does not implement interface %s",
+ ty.Name(), ut.iface.Name()))
+ }
+ if _, ok := ut.tags[ty]; ok {
+ panic(fmt.Errorf("Type %s is already registered for union %s",
+ ty.Name(), ut.iface.Name()))
+ }
+ if _, ok := ut.types[tag]; ok {
+ panic(fmt.Errorf("Tag %d is already registered for union %s",
+ tag, ut.iface.Name()))
+ }
+ ut.tags[ty] = tag
+ ut.types[tag] = ty
+ return ut
+}
+
+func (ut *UnionTags) TagFor(v interface{}) (uint64, bool) {
+ tag, ok := ut.tags[reflect.TypeOf(v)]
+ return tag, ok
+}
+
+func (ut *UnionTags) TypeFor(tag uint64) (reflect.Type, bool) {
+ t, ok := ut.types[tag]
+ return t, ok
+}
diff --git a/forged/internal/bare/unmarshal.go b/forged/internal/bare/unmarshal.go
new file mode 100644
index 0000000..d55f32c
--- /dev/null
+++ b/forged/internal/bare/unmarshal.go
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sync"
+)
+
+// A type which implements this interface will be responsible for unmarshaling
+// itself when encountered.
+type Unmarshalable interface {
+ Unmarshal(r *Reader) error
+}
+
+// Unmarshals a BARE message into val, which must be a pointer to a value of
+// the message type.
+func Unmarshal(data []byte, val interface{}) error {
+ b := bytes.NewReader(data)
+ r := NewReader(b)
+ return UnmarshalBareReader(r, val)
+}
+
+// Unmarshals a BARE message into value (val, which must be a pointer), from a
+// reader. See Unmarshal for details.
+func UnmarshalReader(r io.Reader, val interface{}) error {
+ r = newLimitedReader(r)
+ return UnmarshalBareReader(NewReader(r), val)
+}
+
+type decodeFunc func(r *Reader, v reflect.Value) error
+
+var decodeFuncCache sync.Map // map[reflect.Type]decodeFunc
+
+func UnmarshalBareReader(r *Reader, val interface{}) error {
+ t := reflect.TypeOf(val)
+ v := reflect.ValueOf(val)
+ if t.Kind() != reflect.Ptr {
+ return errors.New("Expected val to be pointer type")
+ }
+
+ return getDecoder(t.Elem())(r, v.Elem())
+}
+
+// get decoder from cache
+func getDecoder(t reflect.Type) decodeFunc {
+ if f, ok := decodeFuncCache.Load(t); ok {
+ return f.(decodeFunc)
+ }
+
+ f := decoderFunc(t)
+ decodeFuncCache.Store(t, f)
+ return f
+}
+
+var unmarshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem()
+
+func decoderFunc(t reflect.Type) decodeFunc {
+ if reflect.PointerTo(t).Implements(unmarshalableInterface) {
+ return func(r *Reader, v reflect.Value) error {
+ uv := v.Addr().Interface().(Unmarshalable)
+ return uv.Unmarshal(r)
+ }
+ }
+
+ if t.Kind() == reflect.Interface && t.Implements(unionInterface) {
+ return decodeUnion(t)
+ }
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ return decodeOptional(t.Elem())
+ case reflect.Struct:
+ return decodeStruct(t)
+ case reflect.Array:
+ return decodeArray(t)
+ case reflect.Slice:
+ return decodeSlice(t)
+ case reflect.Map:
+ return decodeMap(t)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return decodeUint
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return decodeInt
+ case reflect.Float32, reflect.Float64:
+ return decodeFloat
+ case reflect.Bool:
+ return decodeBool
+ case reflect.String:
+ return decodeString
+ }
+
+ return func(r *Reader, v reflect.Value) error {
+ return &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func decodeOptional(t reflect.Type) decodeFunc {
+ return func(r *Reader, v reflect.Value) error {
+ s, err := r.ReadU8()
+ if err != nil {
+ return err
+ }
+
+ if s > 1 {
+ return fmt.Errorf("Invalid optional value: %#x", s)
+ }
+
+ if s == 0 {
+ return nil
+ }
+
+ v.Set(reflect.New(t))
+ return getDecoder(t)(r, v.Elem())
+ }
+}
+
+func decodeStruct(t reflect.Type) decodeFunc {
+ n := t.NumField()
+ decoders := make([]decodeFunc, n)
+ for i := 0; i < n; i++ {
+ field := t.Field(i)
+ if field.Tag.Get("bare") == "-" {
+ continue
+ }
+ decoders[i] = getDecoder(field.Type)
+ }
+
+ return func(r *Reader, v reflect.Value) error {
+ for i := 0; i < n; i++ {
+ if decoders[i] == nil {
+ continue
+ }
+ err := decoders[i](r, v.Field(i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func decodeArray(t reflect.Type) decodeFunc {
+ f := getDecoder(t.Elem())
+ len := t.Len()
+
+ return func(r *Reader, v reflect.Value) error {
+ for i := 0; i < len; i++ {
+ err := f(r, v.Index(i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func decodeSlice(t reflect.Type) decodeFunc {
+ elem := t.Elem()
+ f := getDecoder(elem)
+
+ return func(r *Reader, v reflect.Value) error {
+ len, err := r.ReadUint()
+ if err != nil {
+ return err
+ }
+
+ if len > maxArrayLength {
+ return fmt.Errorf("Array length %d exceeds configured limit of %d", len, maxArrayLength)
+ }
+
+ v.Set(reflect.MakeSlice(t, int(len), int(len)))
+
+ for i := 0; i < int(len); i++ {
+ if err := f(r, v.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func decodeMap(t reflect.Type) decodeFunc {
+ keyType := t.Key()
+ keyf := getDecoder(keyType)
+
+ valueType := t.Elem()
+ valf := getDecoder(valueType)
+
+ return func(r *Reader, v reflect.Value) error {
+ size, err := r.ReadUint()
+ if err != nil {
+ return err
+ }
+
+ if size > maxMapSize {
+ return fmt.Errorf("Map size %d exceeds configured limit of %d", size, maxMapSize)
+ }
+
+ v.Set(reflect.MakeMapWithSize(t, int(size)))
+
+ key := reflect.New(keyType).Elem()
+ value := reflect.New(valueType).Elem()
+
+ for i := uint64(0); i < size; i++ {
+ if err := keyf(r, key); err != nil {
+ return err
+ }
+
+ if v.MapIndex(key).Kind() > reflect.Invalid {
+ return fmt.Errorf("Encountered duplicate map key: %v", key.Interface())
+ }
+
+ if err := valf(r, value); err != nil {
+ return err
+ }
+
+ v.SetMapIndex(key, value)
+ }
+ return nil
+ }
+}
+
+func decodeUnion(t reflect.Type) decodeFunc {
+ ut, ok := unionRegistry[t]
+ if !ok {
+ return func(r *Reader, v reflect.Value) error {
+ return fmt.Errorf("Union type %s is not registered", t.Name())
+ }
+ }
+
+ decoders := make(map[uint64]decodeFunc)
+ for tag, t := range ut.types {
+ t := t
+ f := getDecoder(t)
+
+ decoders[tag] = func(r *Reader, v reflect.Value) error {
+ nv := reflect.New(t)
+ if err := f(r, nv.Elem()); err != nil {
+ return err
+ }
+
+ v.Set(nv)
+ return nil
+ }
+ }
+
+ return func(r *Reader, v reflect.Value) error {
+ tag, err := r.ReadUint()
+ if err != nil {
+ return err
+ }
+
+ if f, ok := decoders[tag]; ok {
+ return f(r, v)
+ }
+
+ return fmt.Errorf("Invalid union tag %d for type %s", tag, t.Name())
+ }
+}
+
+func decodeUint(r *Reader, v reflect.Value) error {
+ var err error
+ switch getIntKind(v.Type()) {
+ case reflect.Uint:
+ var u uint64
+ u, err = r.ReadUint()
+ v.SetUint(u)
+
+ case reflect.Uint8:
+ var u uint8
+ u, err = r.ReadU8()
+ v.SetUint(uint64(u))
+
+ case reflect.Uint16:
+ var u uint16
+ u, err = r.ReadU16()
+ v.SetUint(uint64(u))
+ case reflect.Uint32:
+ var u uint32
+ u, err = r.ReadU32()
+ v.SetUint(uint64(u))
+
+ case reflect.Uint64:
+ var u uint64
+ u, err = r.ReadU64()
+ v.SetUint(uint64(u))
+
+ default:
+ panic("not an uint")
+ }
+
+ return err
+}
+
+func decodeInt(r *Reader, v reflect.Value) error {
+ var err error
+ switch getIntKind(v.Type()) {
+ case reflect.Int:
+ var i int64
+ i, err = r.ReadInt()
+ v.SetInt(i)
+
+ case reflect.Int8:
+ var i int8
+ i, err = r.ReadI8()
+ v.SetInt(int64(i))
+
+ case reflect.Int16:
+ var i int16
+ i, err = r.ReadI16()
+ v.SetInt(int64(i))
+ case reflect.Int32:
+ var i int32
+ i, err = r.ReadI32()
+ v.SetInt(int64(i))
+
+ case reflect.Int64:
+ var i int64
+ i, err = r.ReadI64()
+ v.SetInt(int64(i))
+
+ default:
+ panic("not an int")
+ }
+
+ return err
+}
+
+func decodeFloat(r *Reader, v reflect.Value) error {
+ var err error
+ switch v.Type().Kind() {
+ case reflect.Float32:
+ var f float32
+ f, err = r.ReadF32()
+ v.SetFloat(float64(f))
+ case reflect.Float64:
+ var f float64
+ f, err = r.ReadF64()
+ v.SetFloat(f)
+ default:
+ panic("not a float")
+ }
+ return err
+}
+
+func decodeBool(r *Reader, v reflect.Value) error {
+ b, err := r.ReadBool()
+ v.SetBool(b)
+ return err
+}
+
+func decodeString(r *Reader, v reflect.Value) error {
+ s, err := r.ReadString()
+ v.SetString(s)
+ return err
+}
diff --git a/forged/internal/bare/varint.go b/forged/internal/bare/varint.go
new file mode 100644
index 0000000..a185ac8
--- /dev/null
+++ b/forged/internal/bare/varint.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "reflect"
+)
+
+// Int is a variable-length encoded signed integer.
+type Int int64
+
+// Uint is a variable-length encoded unsigned integer.
+type Uint uint64
+
+var (
+ intType = reflect.TypeOf(Int(0))
+ uintType = reflect.TypeOf(Uint(0))
+)
+
+func getIntKind(t reflect.Type) reflect.Kind {
+ switch t {
+ case intType:
+ return reflect.Int
+ case uintType:
+ return reflect.Uint
+ default:
+ return t.Kind()
+ }
+}
diff --git a/forged/internal/bare/writer.go b/forged/internal/bare/writer.go
new file mode 100644
index 0000000..bada045
--- /dev/null
+++ b/forged/internal/bare/writer.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
+
+package bare
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+// A Writer for BARE primitive types.
+type Writer struct {
+ base io.Writer
+ scratch [binary.MaxVarintLen64]byte
+}
+
+// Returns a new BARE primitive writer wrapping the given io.Writer.
+func NewWriter(base io.Writer) *Writer {
+ return &Writer{base: base}
+}
+
+func (w *Writer) WriteUint(i uint64) error {
+ n := binary.PutUvarint(w.scratch[:], i)
+ _, err := w.base.Write(w.scratch[:n])
+ return err
+}
+
+func (w *Writer) WriteU8(i uint8) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteU16(i uint16) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteU32(i uint32) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteU64(i uint64) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteInt(i int64) error {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], i)
+ _, err := w.base.Write(buf[:n])
+ return err
+}
+
+func (w *Writer) WriteI8(i int8) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteI16(i int16) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteI32(i int32) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteI64(i int64) error {
+ return binary.Write(w.base, binary.LittleEndian, i)
+}
+
+func (w *Writer) WriteF32(f float32) error {
+ if math.IsNaN(float64(f)) {
+ return fmt.Errorf("NaN is not permitted in BARE floats")
+ }
+ return binary.Write(w.base, binary.LittleEndian, f)
+}
+
+func (w *Writer) WriteF64(f float64) error {
+ if math.IsNaN(f) {
+ return fmt.Errorf("NaN is not permitted in BARE floats")
+ }
+ return binary.Write(w.base, binary.LittleEndian, f)
+}
+
+func (w *Writer) WriteBool(b bool) error {
+ return binary.Write(w.base, binary.LittleEndian, b)
+}
+
+func (w *Writer) WriteString(str string) error {
+ return w.WriteData(misc.StringToBytes(str))
+}
+
+// Writes a fixed amount of arbitrary data, defined by the length of the slice.
+func (w *Writer) WriteDataFixed(data []byte) error {
+ var amt int = 0
+ for amt < len(data) {
+ n, err := w.base.Write(data[amt:])
+ if err != nil {
+ return err
+ }
+ amt += n
+ }
+ return nil
+}
+
+// Writes arbitrary data whose length is encoded into the message.
+func (w *Writer) WriteData(data []byte) error {
+ err := w.WriteUint(uint64(len(data)))
+ if err != nil {
+ return err
+ }
+ var amt int = 0
+ for amt < len(data) {
+ n, err := w.base.Write(data[amt:])
+ if err != nil {
+ return err
+ }
+ amt += n
+ }
+ return nil
+}
diff --git a/forged/internal/cmap/comparable_map.go b/forged/internal/cmap/comparable_map.go
new file mode 100644
index 0000000..cd9d4ce
--- /dev/null
+++ b/forged/internal/cmap/comparable_map.go
@@ -0,0 +1,539 @@
+// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically
+// written from scratch with Go 1.23's sync.Map.
+// Copyright 2024 Runxi Yu (porting it to generics)
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+package cmap
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+// ComparableMap[K comparable, V comparable] is like a Go map[K]V but is safe for concurrent use
+// by multiple goroutines without additional locking or coordination. Loads,
+// stores, and deletes run in amortized constant time.
+//
+// The ComparableMap type is optimized for two common use cases: (1) when the comparableEntry for a given
+// key is only ever written once but read many times, as in caches that only grow,
+// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
+// sets of keys. In these two cases, use of a ComparableMap may significantly reduce lock
+// contention compared to a Go map paired with a separate [Mutex] or [RWMutex].
+//
+// The zero ComparableMap is empty and ready for use. A ComparableMap must not be copied after first use.
+//
+// In the terminology of [the Go memory model], ComparableMap arranges that a write operation
+// “synchronizes before” any read operation that observes the effect of the write, where
+// read and write operations are defined as follows.
+// [ComparableMap.Load], [ComparableMap.LoadAndDelete], [ComparableMap.LoadOrStore], [ComparableMap.Swap], [ComparableMap.CompareAndSwap],
+// and [ComparableMap.CompareAndDelete] are read operations;
+// [ComparableMap.Delete], [ComparableMap.LoadAndDelete], [ComparableMap.Store], and [ComparableMap.Swap] are write operations;
+// [ComparableMap.LoadOrStore] is a write operation when it returns loaded set to false;
+// [ComparableMap.CompareAndSwap] is a write operation when it returns swapped set to true;
+// and [ComparableMap.CompareAndDelete] is a write operation when it returns deleted set to true.
+//
+// [the Go memory model]: https://go.dev/ref/mem
+type ComparableMap[K comparable, V comparable] struct {
+ mu sync.Mutex
+
+ // read contains the portion of the map's contents that are safe for
+ // concurrent access (with or without mu held).
+ //
+ // The read field itself is always safe to load, but must only be stored with
+ // mu held.
+ //
+ // Entries stored in read may be updated concurrently without mu, but updating
+ // a previously-comparableExpunged comparableEntry requires that the comparableEntry be copied to the dirty
+ // map and uncomparableExpunged with mu held.
+ read atomic.Pointer[comparableReadOnly[K, V]]
+
+ // dirty contains the portion of the map's contents that require mu to be
+ // held. To ensure that the dirty map can be promoted to the read map quickly,
+ // it also includes all of the non-comparableExpunged entries in the read map.
+ //
+ // Expunged entries are not stored in the dirty map. An comparableExpunged comparableEntry in the
+ // clean map must be uncomparableExpunged and added to the dirty map before a new value
+ // can be stored to it.
+ //
+ // If the dirty map is nil, the next write to the map will initialize it by
+ // making a shallow copy of the clean map, omitting stale entries.
+ dirty map[K]*comparableEntry[V]
+
+ // misses counts the number of loads since the read map was last updated that
+ // needed to lock mu to determine whether the key was present.
+ //
+ // Once enough misses have occurred to cover the cost of copying the dirty
+ // map, the dirty map will be promoted to the read map (in the unamended
+ // state) and the next store to the map will make a new dirty copy.
+ misses int
+}
+
+// comparableReadOnly is an immutable struct stored atomically in the ComparableMap.read field.
+type comparableReadOnly[K comparable, V comparable] struct {
+ m map[K]*comparableEntry[V]
+ amended bool // true if the dirty map contains some key not in m.
+}
+
+// comparableExpunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var comparableExpunged = unsafe.Pointer(new(any))
+
+// An comparableEntry is a slot in the map corresponding to a particular key.
+type comparableEntry[V comparable] struct {
+ // p points to the value stored for the comparableEntry.
+ //
+ // If p == nil, the comparableEntry has been deleted, and either m.dirty == nil or
+ // m.dirty[key] is e.
+ //
+ // If p == comparableExpunged, the comparableEntry has been deleted, m.dirty != nil, and the comparableEntry
+ // is missing from m.dirty.
+ //
+ // Otherwise, the comparableEntry is valid and recorded in m.read.m[key] and, if m.dirty
+ // != nil, in m.dirty[key].
+ //
+ // An comparableEntry can be deleted by atomic replacement with nil: when m.dirty is
+ // next created, it will atomically replace nil with comparableExpunged and leave
+ // m.dirty[key] unset.
+ //
+ // An comparableEntry's associated value can be updated by atomic replacement, provided
+ // p != comparableExpunged. If p == comparableExpunged, an comparableEntry's associated value can be updated
+ // only after first setting m.dirty[key] = e so that lookups using the dirty
+ // map find the comparableEntry.
+ p unsafe.Pointer
+}
+
+func newComparableEntry[V comparable](i V) *comparableEntry[V] {
+ return &comparableEntry[V]{p: unsafe.Pointer(&i)}
+}
+
+func (m *ComparableMap[K, V]) loadReadOnly() comparableReadOnly[K, V] {
+ if p := m.read.Load(); p != nil {
+ return *p
+ }
+ return comparableReadOnly[K, V]{}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *ComparableMap[K, V]) Load(key K) (value V, ok bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ // Avoid reporting a spurious miss if m.dirty got promoted while we were
+ // blocked on m.mu. (If further loads of the same key will not miss, it's
+ // not worth copying the dirty map for this key.)
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Regardless of whether the comparableEntry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if !ok {
+ return *new(V), false
+ }
+ return e.load()
+}
+
+func (e *comparableEntry[V]) load() (value V, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == comparableExpunged {
+ return value, false
+ }
+ return *(*V)(p), true
+}
+
+// Store sets the value for a key.
+func (m *ComparableMap[K, V]) Store(key K, value V) {
+ _, _ = m.Swap(key, value)
+}
+
+// Clear deletes all the entries, resulting in an empty ComparableMap.
+func (m *ComparableMap[K, V]) Clear() {
+ read := m.loadReadOnly()
+ if len(read.m) == 0 && !read.amended {
+ // Avoid allocating a new comparableReadOnly when the map is already clear.
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ read = m.loadReadOnly()
+ if len(read.m) > 0 || read.amended {
+ m.read.Store(&comparableReadOnly[K, V]{})
+ }
+
+ clear(m.dirty)
+ // Don't immediately promote the newly-cleared dirty map on the next operation.
+ m.misses = 0
+}
+
+// tryCompareAndSwap compare the comparableEntry with the given old value and swaps
+// it with a new value if the comparableEntry is equal to the old value, and the comparableEntry
+// has not been comparableExpunged.
+//
+// If the comparableEntry is comparableExpunged, tryCompareAndSwap returns false and leaves
+// the comparableEntry unchanged.
+func (e *comparableEntry[V]) tryCompareAndSwap(old V, new V) bool {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == comparableExpunged || *(*V)(p) != old { // XXX
+ return false
+ }
+
+ // Copy the pointer after the first load to make this method more amenable
+ // to escape analysis: if the comparison fails from the start, we shouldn't
+ // bother heap-allocating a pointer to store.
+ nc := new
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(&nc)) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == nil || p == comparableExpunged || *(*V)(p) != old {
+ return false
+ }
+ }
+}
+
+// unexpungeLocked ensures that the comparableEntry is not marked as comparableExpunged.
+//
+// If the comparableEntry was previously comparableExpunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *comparableEntry[V]) unexpungeLocked() (wasExpunged bool) {
+ return atomic.CompareAndSwapPointer(&e.p, comparableExpunged, nil)
+}
+
+// swapLocked unconditionally swaps a value into the comparableEntry.
+//
+// The comparableEntry must be known not to be comparableExpunged.
+func (e *comparableEntry[V]) swapLocked(i *V) *V {
+ return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i)))
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *ComparableMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
+ // Avoid locking if it's a clean hit.
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ actual, loaded, ok := e.tryLoadOrStore(value)
+ if ok {
+ return actual, loaded
+ }
+ }
+
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ m.dirty[key] = e
+ }
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ } else if e, ok := m.dirty[key]; ok {
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ m.missLocked()
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true})
+ }
+ m.dirty[key] = newComparableEntry(value)
+ actual, loaded = value, false
+ }
+ m.mu.Unlock()
+
+ return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the comparableEntry is not
+// comparableExpunged.
+//
+// If the comparableEntry is comparableExpunged, tryLoadOrStore leaves the comparableEntry unchanged and
+// returns with ok==false.
+func (e *comparableEntry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == comparableExpunged {
+ return actual, false, false
+ }
+ if p != nil {
+ return *(*V)(p), true, true
+ }
+
+ // Copy the pointer after the first load to make this method more amenable
+ // to escape analysis: if we hit the "load" path or the comparableEntry is comparableExpunged, we
+ // shouldn't bother heap-allocating.
+ ic := i
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
+ return i, false, true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == comparableExpunged {
+ return actual, false, false
+ }
+ if p != nil {
+ return *(*V)(p), true, true
+ }
+ }
+}
+
+// LoadAndDelete deletes the value for a key, returning the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *ComparableMap[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ delete(m.dirty, key)
+ // Regardless of whether the comparableEntry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if ok {
+ return e.delete()
+ }
+ return value, false
+}
+
+// Delete deletes the value for a key.
+func (m *ComparableMap[K, V]) Delete(key K) {
+ m.LoadAndDelete(key)
+}
+
+func (e *comparableEntry[V]) delete() (value V, ok bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == comparableExpunged {
+ return value, false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+ return *(*V)(p), true
+ }
+ }
+}
+
+// trySwap swaps a value if the comparableEntry has not been comparableExpunged.
+//
+// If the comparableEntry is comparableExpunged, trySwap returns false and leaves the comparableEntry
+// unchanged.
+func (e *comparableEntry[V]) trySwap(i *V) (*V, bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == comparableExpunged {
+ return nil, false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
+ return (*V)(p), true
+ }
+ }
+}
+
+// Swap swaps the value for a key and returns the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *ComparableMap[K, V]) Swap(key K, value V) (previous V, loaded bool) {
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if v, ok := e.trySwap(&value); ok {
+ if v == nil {
+ return previous, false
+ }
+ return *v, true
+ }
+ }
+
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ // The comparableEntry was previously comparableExpunged, which implies that there is a
+ // non-nil dirty map and this comparableEntry is not in it.
+ m.dirty[key] = e
+ }
+ if v := e.swapLocked(&value); v != nil {
+ loaded = true
+ previous = *v
+ }
+ } else if e, ok := m.dirty[key]; ok {
+ if v := e.swapLocked(&value); v != nil {
+ loaded = true
+ previous = *v
+ }
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true})
+ }
+ m.dirty[key] = newComparableEntry(value)
+ }
+ m.mu.Unlock()
+ return previous, loaded
+}
+
+// CompareAndSwap swaps the old and new values for key
+// if the value stored in the map is equal to old.
+// The old value must be of a comparable type.
+func (m *ComparableMap[K, V]) CompareAndSwap(key K, old, new V) (swapped bool) {
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ return e.tryCompareAndSwap(old, new)
+ } else if !read.amended {
+ return false // No existing value for key.
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ read = m.loadReadOnly()
+ swapped = false
+ if e, ok := read.m[key]; ok {
+ swapped = e.tryCompareAndSwap(old, new)
+ } else if e, ok := m.dirty[key]; ok {
+ swapped = e.tryCompareAndSwap(old, new)
+ // We needed to lock mu in order to load the comparableEntry for key,
+ // and the operation didn't change the set of keys in the map
+ // (so it would be made more efficient by promoting the dirty
+ // map to read-only).
+ // Count it as a miss so that we will eventually switch to the
+ // more efficient steady state.
+ m.missLocked()
+ }
+ return swapped
+}
+
+// CompareAndDelete deletes the comparableEntry for key if its value is equal to old.
+// The old value must be of a comparable type.
+//
+// If there is no current value for key in the map, CompareAndDelete
+// returns false (even if the old value is a nil pointer).
+func (m *ComparableMap[K, V]) CompareAndDelete(key K, old V) (deleted bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Don't delete key from m.dirty: we still need to do the “compare” part
+ // of the operation. The comparableEntry will eventually be comparableExpunged when the
+ // dirty map is promoted to the read map.
+ //
+ // Regardless of whether the comparableEntry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ for ok {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == comparableExpunged || *(*V)(p) != old {
+ return false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+ return true
+ }
+ }
+ return false
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the ComparableMap's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently (including by f), Range may reflect any
+// mapping for that key from any point during the Range call. Range does not
+// block other methods on the receiver; even f itself may call any method on m.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *ComparableMap[K, V]) Range(f func(key K, value V) bool) {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read := m.loadReadOnly()
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if read.amended {
+ read = comparableReadOnly[K, V]{m: m.dirty}
+ copyRead := read
+ m.read.Store(&copyRead)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for k, e := range read.m {
+ v, ok := e.load()
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *ComparableMap[K, V]) missLocked() {
+ m.misses++
+ if m.misses < len(m.dirty) {
+ return
+ }
+ m.read.Store(&comparableReadOnly[K, V]{m: m.dirty})
+ m.dirty = nil
+ m.misses = 0
+}
+
+func (m *ComparableMap[K, V]) dirtyLocked() {
+ if m.dirty != nil {
+ return
+ }
+
+ read := m.loadReadOnly()
+ m.dirty = make(map[K]*comparableEntry[V], len(read.m))
+ for k, e := range read.m {
+ if !e.tryExpungeLocked() {
+ m.dirty[k] = e
+ }
+ }
+}
+
+func (e *comparableEntry[V]) tryExpungeLocked() (isExpunged bool) {
+ p := atomic.LoadPointer(&e.p)
+ for p == nil {
+ if atomic.CompareAndSwapPointer(&e.p, nil, comparableExpunged) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ }
+ return p == comparableExpunged
+}
diff --git a/forged/internal/cmap/map.go b/forged/internal/cmap/map.go
new file mode 100644
index 0000000..4f43627
--- /dev/null
+++ b/forged/internal/cmap/map.go
@@ -0,0 +1,446 @@
+// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically
+// written from scratch with Go 1.23's sync.Map.
+// Copyright 2024 Runxi Yu (porting it to generics)
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// Package cmap provides a generic Map safe for concurrent use.
+package cmap
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+// Map[K comparable, V any] is like a Go map[K]V but is safe for concurrent use
+// by multiple goroutines without additional locking or coordination. Loads,
+// stores, and deletes run in amortized constant time.
+//
+// The Map type is optimized for two common use cases: (1) when the entry for a given
+// key is only ever written once but read many times, as in caches that only grow,
+// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
+// sets of keys. In these two cases, use of a Map may significantly reduce lock
+// contention compared to a Go map paired with a separate [Mutex] or [RWMutex].
+//
+// The zero Map is empty and ready for use. A Map must not be copied after first use.
+//
+// In the terminology of [the Go memory model], Map arranges that a write operation
+// “synchronizes before” any read operation that observes the effect of the write, where
+// read and write operations are defined as follows.
+// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap],
+// and [Map.CompareAndDelete] are read operations;
+// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations;
+// [Map.LoadOrStore] is a write operation when it returns loaded set to false;
+// [Map.CompareAndSwap] is a write operation when it returns swapped set to true;
+// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true.
+//
+// [the Go memory model]: https://go.dev/ref/mem
+type Map[K comparable, V any] struct {
+ mu sync.Mutex
+
+ // read contains the portion of the map's contents that are safe for
+ // concurrent access (with or without mu held).
+ //
+ // The read field itself is always safe to load, but must only be stored with
+ // mu held.
+ //
+ // Entries stored in read may be updated concurrently without mu, but updating
+ // a previously-expunged entry requires that the entry be copied to the dirty
+ // map and unexpunged with mu held.
+ read atomic.Pointer[readOnly[K, V]]
+
+ // dirty contains the portion of the map's contents that require mu to be
+ // held. To ensure that the dirty map can be promoted to the read map quickly,
+ // it also includes all of the non-expunged entries in the read map.
+ //
+ // Expunged entries are not stored in the dirty map. An expunged entry in the
+ // clean map must be unexpunged and added to the dirty map before a new value
+ // can be stored to it.
+ //
+ // If the dirty map is nil, the next write to the map will initialize it by
+ // making a shallow copy of the clean map, omitting stale entries.
+ dirty map[K]*entry[V]
+
+ // misses counts the number of loads since the read map was last updated that
+ // needed to lock mu to determine whether the key was present.
+ //
+ // Once enough misses have occurred to cover the cost of copying the dirty
+ // map, the dirty map will be promoted to the read map (in the unamended
+ // state) and the next store to the map will make a new dirty copy.
+ misses int
+}
+
+// readOnly is an immutable struct stored atomically in the Map.read field.
+type readOnly[K comparable, V any] struct {
+ m map[K]*entry[V]
+ amended bool // true if the dirty map contains some key not in m.
+}
+
+// expunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var expunged = unsafe.Pointer(new(any))
+
+// An entry is a slot in the map corresponding to a particular key.
+type entry[V any] struct {
+ // p points to the value stored for the entry.
+ //
+ // If p == nil, the entry has been deleted, and either m.dirty == nil or
+ // m.dirty[key] is e.
+ //
+ // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
+ // is missing from m.dirty.
+ //
+ // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
+ // != nil, in m.dirty[key].
+ //
+ // An entry can be deleted by atomic replacement with nil: when m.dirty is
+ // next created, it will atomically replace nil with expunged and leave
+ // m.dirty[key] unset.
+ //
+ // An entry's associated value can be updated by atomic replacement, provided
+ // p != expunged. If p == expunged, an entry's associated value can be updated
+ // only after first setting m.dirty[key] = e so that lookups using the dirty
+ // map find the entry.
+ p unsafe.Pointer
+}
+
+func newEntry[V any](i V) *entry[V] {
+ return &entry[V]{p: unsafe.Pointer(&i)}
+}
+
+func (m *Map[K, V]) loadReadOnly() readOnly[K, V] {
+ if p := m.read.Load(); p != nil {
+ return *p
+ }
+ return readOnly[K, V]{}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *Map[K, V]) Load(key K) (value V, ok bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ // Avoid reporting a spurious miss if m.dirty got promoted while we were
+ // blocked on m.mu. (If further loads of the same key will not miss, it's
+ // not worth copying the dirty map for this key.)
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if !ok {
+ return *new(V), false
+ }
+ return e.load()
+}
+
+func (e *entry[V]) load() (value V, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return value, false
+ }
+ return *(*V)(p), true
+}
+
+// Store sets the value for a key.
+func (m *Map[K, V]) Store(key K, value V) {
+ _, _ = m.Swap(key, value)
+}
+
+// Clear deletes all the entries, resulting in an empty Map.
+func (m *Map[K, V]) Clear() {
+ read := m.loadReadOnly()
+ if len(read.m) == 0 && !read.amended {
+ // Avoid allocating a new readOnly when the map is already clear.
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ read = m.loadReadOnly()
+ if len(read.m) > 0 || read.amended {
+ m.read.Store(&readOnly[K, V]{})
+ }
+
+ clear(m.dirty)
+ // Don't immediately promote the newly-cleared dirty map on the next operation.
+ m.misses = 0
+}
+
+// unexpungeLocked ensures that the entry is not marked as expunged.
+//
+// If the entry was previously expunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *entry[V]) unexpungeLocked() (wasExpunged bool) {
+ return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
+}
+
+// swapLocked unconditionally swaps a value into the entry.
+//
+// The entry must be known not to be expunged.
+func (e *entry[V]) swapLocked(i *V) *V {
+ return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i)))
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
+ // Avoid locking if it's a clean hit.
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ actual, loaded, ok := e.tryLoadOrStore(value)
+ if ok {
+ return actual, loaded
+ }
+ }
+
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ m.dirty[key] = e
+ }
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ } else if e, ok := m.dirty[key]; ok {
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ m.missLocked()
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(&readOnly[K, V]{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ actual, loaded = value, false
+ }
+ m.mu.Unlock()
+
+ return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the entry is not
+// expunged.
+//
+// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
+// returns with ok==false.
+func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return actual, false, false
+ }
+ if p != nil {
+ return *(*V)(p), true, true
+ }
+
+ // Copy the pointer after the first load to make this method more amenable
+ // to escape analysis: if we hit the "load" path or the entry is expunged, we
+ // shouldn't bother heap-allocating.
+ ic := i
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
+ return i, false, true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return actual, false, false
+ }
+ if p != nil {
+ return *(*V)(p), true, true
+ }
+ }
+}
+
+// LoadAndDelete deletes the value for a key, returning the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ delete(m.dirty, key)
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if ok {
+ return e.delete()
+ }
+ return value, false
+}
+
+// Delete deletes the value for a key.
+func (m *Map[K, V]) Delete(key K) {
+ m.LoadAndDelete(key)
+}
+
+func (e *entry[V]) delete() (value V, ok bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return value, false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+ return *(*V)(p), true
+ }
+ }
+}
+
+// trySwap swaps a value if the entry has not been expunged.
+//
+// If the entry is expunged, trySwap returns false and leaves the entry
+// unchanged.
+func (e *entry[V]) trySwap(i *V) (*V, bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
+ return (*V)(p), true
+ }
+ }
+}
+
+// Swap swaps the value for a key and returns the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) {
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if v, ok := e.trySwap(&value); ok {
+ if v == nil {
+ return previous, false
+ }
+ return *v, true
+ }
+ }
+
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ // The entry was previously expunged, which implies that there is a
+ // non-nil dirty map and this entry is not in it.
+ m.dirty[key] = e
+ }
+ if v := e.swapLocked(&value); v != nil {
+ loaded = true
+ previous = *v
+ }
+ } else if e, ok := m.dirty[key]; ok {
+ if v := e.swapLocked(&value); v != nil {
+ loaded = true
+ previous = *v
+ }
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(&readOnly[K, V]{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ }
+ m.mu.Unlock()
+ return previous, loaded
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the Map's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently (including by f), Range may reflect any
+// mapping for that key from any point during the Range call. Range does not
+// block other methods on the receiver; even f itself may call any method on m.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *Map[K, V]) Range(f func(key K, value V) bool) {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read := m.loadReadOnly()
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if read.amended {
+ read = readOnly[K, V]{m: m.dirty}
+ copyRead := read
+ m.read.Store(&copyRead)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for k, e := range read.m {
+ v, ok := e.load()
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *Map[K, V]) missLocked() {
+ m.misses++
+ if m.misses < len(m.dirty) {
+ return
+ }
+ m.read.Store(&readOnly[K, V]{m: m.dirty})
+ m.dirty = nil
+ m.misses = 0
+}
+
+func (m *Map[K, V]) dirtyLocked() {
+ if m.dirty != nil {
+ return
+ }
+
+ read := m.loadReadOnly()
+ m.dirty = make(map[K]*entry[V], len(read.m))
+ for k, e := range read.m {
+ if !e.tryExpungeLocked() {
+ m.dirty[k] = e
+ }
+ }
+}
+
+func (e *entry[V]) tryExpungeLocked() (isExpunged bool) {
+ p := atomic.LoadPointer(&e.p)
+ for p == nil {
+ if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ }
+ return p == expunged
+}
diff --git a/forged/internal/database/database.go b/forged/internal/database/database.go
new file mode 100644
index 0000000..b995adc
--- /dev/null
+++ b/forged/internal/database/database.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package database provides stubs and wrappers for databases.
+package database
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5/pgxpool"
+)
+
+// Database is a wrapper around pgxpool.Pool to provide a common interface for
+// other packages in the forge.
+type Database struct {
+ *pgxpool.Pool
+}
+
+// Open opens a new database connection pool using the provided connection
+// string. It returns a Database instance and an error if any occurs.
+// It is run indefinitely in the background.
+func Open(connString string) (Database, error) {
+ db, err := pgxpool.New(context.Background(), connString)
+ return Database{db}, err
+}
diff --git a/forged/internal/embed/.gitignore b/forged/internal/embed/.gitignore
new file mode 100644
index 0000000..e8708b1
--- /dev/null
+++ b/forged/internal/embed/.gitignore
@@ -0,0 +1,7 @@
+/source.tar.gz
+/hookc/hookc
+/git2d/git2d
+/static
+/templates
+/LICENSE*
+/forged
diff --git a/forged/internal/embed/embed.go b/forged/internal/embed/embed.go
new file mode 100644
index 0000000..1f0dcdf
--- /dev/null
+++ b/forged/internal/embed/embed.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package embed provides embedded filesystems created in build-time.
+package embed
+
+import "embed"
+
+// Source contains the licenses and source tarballs collected at build time.
+// It is intended to be served to the user.
+//
+//go:embed LICENSE* source.tar.gz
+var Source embed.FS
+
+// Resources contains the templates and static files used by the web interface,
+// as well as the git backend daemon and the hookc helper.
+//
+//go:embed forged/templates/* forged/static/*
+//go:embed hookc/hookc git2d/git2d
+var Resources embed.FS
diff --git a/forged/internal/git2c/client.go b/forged/internal/git2c/client.go
new file mode 100644
index 0000000..ed9390c
--- /dev/null
+++ b/forged/internal/git2c/client.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package git2c provides routines to interact with the git2d backend daemon.
+package git2c
+
+import (
+ "fmt"
+ "net"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/bare"
+)
+
+// Client represents a connection to the git2d backend daemon.
+type Client struct {
+ socketPath string
+ conn net.Conn
+ writer *bare.Writer
+ reader *bare.Reader
+}
+
+// NewClient establishes a connection to a git2d socket and returns a new Client.
+func NewClient(socketPath string) (*Client, error) {
+ conn, err := net.Dial("unix", socketPath)
+ if err != nil {
+ return nil, fmt.Errorf("git2d connection failed: %w", err)
+ }
+
+ writer := bare.NewWriter(conn)
+ reader := bare.NewReader(conn)
+
+ return &Client{
+ socketPath: socketPath,
+ conn: conn,
+ writer: writer,
+ reader: reader,
+ }, nil
+}
+
+// Close terminates the underlying socket connection.
+func (c *Client) Close() error {
+ if c.conn != nil {
+ return c.conn.Close()
+ }
+ return nil
+}
diff --git a/forged/internal/git2c/cmd_index.go b/forged/internal/git2c/cmd_index.go
new file mode 100644
index 0000000..8862b2c
--- /dev/null
+++ b/forged/internal/git2c/cmd_index.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package git2c
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// CmdIndex requests a repository index from git2d and returns the list of commits
+// and the contents of a README file if available.
+func (c *Client) CmdIndex(repoPath string) ([]Commit, *FilenameContents, error) {
+ if err := c.writer.WriteData([]byte(repoPath)); err != nil {
+ return nil, nil, fmt.Errorf("sending repo path failed: %w", err)
+ }
+ if err := c.writer.WriteUint(1); err != nil {
+ return nil, nil, fmt.Errorf("sending command failed: %w", err)
+ }
+
+ status, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, nil, fmt.Errorf("reading status failed: %w", err)
+ }
+ if status != 0 {
+ return nil, nil, fmt.Errorf("git2d error: %d", status)
+ }
+
+ // README
+ readmeRaw, err := c.reader.ReadData()
+ if err != nil {
+ readmeRaw = nil
+ }
+
+ readmeFilename := "README.md" // TODO
+ readme := &FilenameContents{Filename: readmeFilename, Content: readmeRaw}
+
+ // Commits
+ var commits []Commit
+ for {
+ id, err := c.reader.ReadData()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return nil, nil, fmt.Errorf("reading commit ID failed: %w", err)
+ }
+ title, _ := c.reader.ReadData()
+ authorName, _ := c.reader.ReadData()
+ authorEmail, _ := c.reader.ReadData()
+ authorDate, _ := c.reader.ReadData()
+
+ commits = append(commits, Commit{
+ Hash: hex.EncodeToString(id),
+ Author: string(authorName),
+ Email: string(authorEmail),
+ Date: string(authorDate),
+ Message: string(title),
+ })
+ }
+
+ return commits, readme, nil
+}
diff --git a/forged/internal/git2c/cmd_treeraw.go b/forged/internal/git2c/cmd_treeraw.go
new file mode 100644
index 0000000..492cb84
--- /dev/null
+++ b/forged/internal/git2c/cmd_treeraw.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package git2c
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// CmdTreeRaw queries git2d for a tree or blob object at the given path within the repository.
+// It returns either a directory listing or the contents of a file.
+func (c *Client) CmdTreeRaw(repoPath, pathSpec string) ([]TreeEntry, string, error) {
+ if err := c.writer.WriteData([]byte(repoPath)); err != nil {
+ return nil, "", fmt.Errorf("sending repo path failed: %w", err)
+ }
+ if err := c.writer.WriteUint(2); err != nil {
+ return nil, "", fmt.Errorf("sending command failed: %w", err)
+ }
+ if err := c.writer.WriteData([]byte(pathSpec)); err != nil {
+ return nil, "", fmt.Errorf("sending path failed: %w", err)
+ }
+
+ status, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, "", fmt.Errorf("reading status failed: %w", err)
+ }
+
+ switch status {
+ case 0:
+ kind, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, "", fmt.Errorf("reading object kind failed: %w", err)
+ }
+
+ switch kind {
+ case 1:
+ // Tree
+ count, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, "", fmt.Errorf("reading entry count failed: %w", err)
+ }
+
+ var files []TreeEntry
+ for range count {
+ typeCode, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading entry type: %w", err)
+ }
+ mode, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading entry mode: %w", err)
+ }
+ size, err := c.reader.ReadUint()
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading entry size: %w", err)
+ }
+ name, err := c.reader.ReadData()
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading entry name: %w", err)
+ }
+
+ files = append(files, TreeEntry{
+ Name: string(name),
+ Mode: fmt.Sprintf("%06o", mode),
+ Size: size,
+ IsFile: typeCode == 2,
+ IsSubtree: typeCode == 1,
+ })
+ }
+
+ return files, "", nil
+
+ case 2:
+ // Blob
+ content, err := c.reader.ReadData()
+ if err != nil && !errors.Is(err, io.EOF) {
+ return nil, "", fmt.Errorf("error reading file content: %w", err)
+ }
+
+ return nil, string(content), nil
+
+ default:
+ return nil, "", fmt.Errorf("unknown kind: %d", kind)
+ }
+
+ case 3:
+ return nil, "", fmt.Errorf("path not found: %s", pathSpec)
+
+ default:
+ return nil, "", fmt.Errorf("unknown status code: %d", status)
+ }
+}
diff --git a/forged/internal/git2c/git_types.go b/forged/internal/git2c/git_types.go
new file mode 100644
index 0000000..bf13f05
--- /dev/null
+++ b/forged/internal/git2c/git_types.go
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package git2c
+
+// Commit represents a single commit object retrieved from the git2d daemon.
+type Commit struct {
+ Hash string
+ Author string
+ Email string
+ Date string
+ Message string
+}
+
+// FilenameContents holds the filename and byte contents of a file, such as a README.
+type FilenameContents struct {
+ Filename string
+ Content []byte
+}
+
+// TreeEntry represents a file or directory entry within a Git tree object.
+type TreeEntry struct {
+ Name string
+ Mode string
+ Size uint64
+ IsFile bool
+ IsSubtree bool
+}
diff --git a/forged/internal/git2c/perror.go b/forged/internal/git2c/perror.go
new file mode 100644
index 0000000..96bffd5
--- /dev/null
+++ b/forged/internal/git2c/perror.go
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// TODO: Make the C part report detailed error messages too
+
+package git2c
+
+import "errors"
+
+var (
+ Success error
+ ErrUnknown = errors.New("git2c: unknown error")
+ ErrPath = errors.New("git2c: get tree entry by path failed")
+ ErrRevparse = errors.New("git2c: revparse failed")
+ ErrReadme = errors.New("git2c: no readme")
+ ErrBlobExpected = errors.New("git2c: blob expected")
+ ErrEntryToObject = errors.New("git2c: tree entry to object conversion failed")
+ ErrBlobRawContent = errors.New("git2c: get blob raw content failed")
+ ErrRevwalk = errors.New("git2c: revwalk failed")
+ ErrRevwalkPushHead = errors.New("git2c: revwalk push head failed")
+ ErrBareProto = errors.New("git2c: bare protocol error")
+)
+
+func Perror(errno uint) error {
+ switch errno {
+ case 0:
+ return Success
+ case 3:
+ return ErrPath
+ case 4:
+ return ErrRevparse
+ case 5:
+ return ErrReadme
+ case 6:
+ return ErrBlobExpected
+ case 7:
+ return ErrEntryToObject
+ case 8:
+ return ErrBlobRawContent
+ case 9:
+ return ErrRevwalk
+ case 10:
+ return ErrRevwalkPushHead
+ case 11:
+ return ErrBareProto
+ }
+ return ErrUnknown
+}
diff --git a/forged/internal/humanize/bytes.go b/forged/internal/humanize/bytes.go
new file mode 100644
index 0000000..bea504c
--- /dev/null
+++ b/forged/internal/humanize/bytes.go
@@ -0,0 +1,35 @@
+// SPDX-FileCopyrightText: Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package humanize provides functions to convert numbers into human-readable formats.
+package humanize
+
+import (
+ "fmt"
+ "math"
+)
+
+// IBytes produces a human readable representation of an IEC size.
+func IBytes(s uint64) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+ return humanateBytes(s, 1024, sizes)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+ if s < 10 {
+ return fmt.Sprintf("%d B", s)
+ }
+ e := math.Floor(logn(float64(s), base))
+ suffix := sizes[int(e)]
+ val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+}
+
+func logn(n, b float64) float64 {
+ return math.Log(n) / math.Log(b)
+}
diff --git a/forged/internal/irc/bot.go b/forged/internal/irc/bot.go
new file mode 100644
index 0000000..1c6d32f
--- /dev/null
+++ b/forged/internal/irc/bot.go
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package irc provides basic IRC bot functionality.
+package irc
+
+import (
+ "crypto/tls"
+ "log/slog"
+ "net"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+// Config contains IRC connection and identity settings for the bot.
+// This should usually be a part of the primary config struct.
+type Config struct {
+ Net string `scfg:"net"`
+ Addr string `scfg:"addr"`
+ TLS bool `scfg:"tls"`
+ SendQ uint `scfg:"sendq"`
+ Nick string `scfg:"nick"`
+ User string `scfg:"user"`
+ Gecos string `scfg:"gecos"`
+}
+
+// Bot represents an IRC bot client that handles events and allows for sending messages.
+type Bot struct {
+ config *Config
+ ircSendBuffered chan string
+ ircSendDirectChan chan misc.ErrorBack[string]
+}
+
+// NewBot creates a new Bot instance using the provided configuration.
+func NewBot(c *Config) (b *Bot) {
+ b = &Bot{
+ config: c,
+ }
+ return
+}
+
+// Connect establishes a new IRC session and starts handling incoming and outgoing messages.
+// This method blocks until an error occurs or the connection is closed.
+func (b *Bot) Connect() error {
+ var err error
+ var underlyingConn net.Conn
+ if b.config.TLS {
+ underlyingConn, err = tls.Dial(b.config.Net, b.config.Addr, nil)
+ } else {
+ underlyingConn, err = net.Dial(b.config.Net, b.config.Addr)
+ }
+ if err != nil {
+ return err
+ }
+ defer underlyingConn.Close()
+
+ conn := NewConn(underlyingConn)
+
+ logAndWriteLn := func(s string) (n int, err error) {
+ slog.Debug("irc tx", "line", s)
+ return conn.WriteString(s + "\r\n")
+ }
+
+ _, err = logAndWriteLn("NICK " + b.config.Nick)
+ if err != nil {
+ return err
+ }
+ _, err = logAndWriteLn("USER " + b.config.User + " 0 * :" + b.config.Gecos)
+ if err != nil {
+ return err
+ }
+
+ readLoopError := make(chan error)
+ writeLoopAbort := make(chan struct{})
+ go func() {
+ for {
+ select {
+ case <-writeLoopAbort:
+ return
+ default:
+ }
+
+ msg, line, err := conn.ReadMessage()
+ if err != nil {
+ readLoopError <- err
+ return
+ }
+
+ slog.Debug("irc rx", "line", line)
+
+ switch msg.Command {
+ case "001":
+ _, err = logAndWriteLn("JOIN #chat")
+ if err != nil {
+ readLoopError <- err
+ return
+ }
+ case "PING":
+ _, err = logAndWriteLn("PONG :" + msg.Args[0])
+ if err != nil {
+ readLoopError <- err
+ return
+ }
+ case "JOIN":
+ c, ok := msg.Source.(Client)
+ if !ok {
+ slog.Error("unable to convert source of JOIN to client")
+ }
+ if c.Nick != b.config.Nick {
+ continue
+ }
+ default:
+ }
+ }
+ }()
+
+ for {
+ select {
+ case err = <-readLoopError:
+ return err
+ case line := <-b.ircSendBuffered:
+ _, err = logAndWriteLn(line)
+ if err != nil {
+ select {
+ case b.ircSendBuffered <- line:
+ default:
+ slog.Error("unable to requeue message", "line", line)
+ }
+ writeLoopAbort <- struct{}{}
+ return err
+ }
+ case lineErrorBack := <-b.ircSendDirectChan:
+ _, err = logAndWriteLn(lineErrorBack.Content)
+ lineErrorBack.ErrorChan <- err
+ if err != nil {
+ writeLoopAbort <- struct{}{}
+ return err
+ }
+ }
+ }
+}
+
+// SendDirect sends an IRC message directly to the connection and bypasses
+// the buffering system.
+func (b *Bot) SendDirect(line string) error {
+ ech := make(chan error, 1)
+
+ b.ircSendDirectChan <- misc.ErrorBack[string]{
+ Content: line,
+ ErrorChan: ech,
+ }
+
+ return <-ech
+}
+
+// Send queues a message to be sent asynchronously via the buffered send queue.
+// If the queue is full, the message is dropped and an error is logged.
+func (b *Bot) Send(line string) {
+ select {
+ case b.ircSendBuffered <- line:
+ default:
+ slog.Error("irc sendq full", "line", line)
+ }
+}
+
+// ConnectLoop continuously attempts to maintain an IRC session.
+// If the connection drops, it automatically retries with no delay.
+func (b *Bot) ConnectLoop() {
+ b.ircSendBuffered = make(chan string, b.config.SendQ)
+ b.ircSendDirectChan = make(chan misc.ErrorBack[string])
+
+ for {
+ err := b.Connect()
+ slog.Error("irc session error", "error", err)
+ }
+}
diff --git a/forged/internal/irc/conn.go b/forged/internal/irc/conn.go
new file mode 100644
index 0000000..b975b72
--- /dev/null
+++ b/forged/internal/irc/conn.go
@@ -0,0 +1,49 @@
+package irc
+
+import (
+ "bufio"
+ "net"
+ "slices"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+type Conn struct {
+ netConn net.Conn
+ bufReader *bufio.Reader
+}
+
+func NewConn(netConn net.Conn) Conn {
+ return Conn{
+ netConn: netConn,
+ bufReader: bufio.NewReader(netConn),
+ }
+}
+
+func (c *Conn) ReadMessage() (msg Message, line string, err error) {
+ raw, err := c.bufReader.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+
+ if raw[len(raw)-1] == '\n' {
+ raw = raw[:len(raw)-1]
+ }
+ if raw[len(raw)-1] == '\r' {
+ raw = raw[:len(raw)-1]
+ }
+
+ lineBytes := slices.Clone(raw)
+ line = misc.BytesToString(lineBytes)
+ msg, err = Parse(lineBytes)
+
+ return
+}
+
+func (c *Conn) Write(p []byte) (n int, err error) {
+ return c.netConn.Write(p)
+}
+
+func (c *Conn) WriteString(s string) (n int, err error) {
+ return c.netConn.Write(misc.StringToBytes(s))
+}
diff --git a/forged/internal/irc/errors.go b/forged/internal/irc/errors.go
new file mode 100644
index 0000000..3506c70
--- /dev/null
+++ b/forged/internal/irc/errors.go
@@ -0,0 +1,8 @@
+package irc
+
+import "errors"
+
+var (
+ ErrInvalidIRCv3Tag = errors.New("invalid ircv3 tag")
+ ErrMalformedMsg = errors.New("malformed irc message")
+)
diff --git a/forged/internal/irc/message.go b/forged/internal/irc/message.go
new file mode 100644
index 0000000..84b6867
--- /dev/null
+++ b/forged/internal/irc/message.go
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2018-2024 luk3yx <https://luk3yx.github.io>
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package irc
+
+import (
+ "bytes"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+type Message struct {
+ Command string
+ Source Source
+ Tags map[string]string
+ Args []string
+}
+
+// All strings returned are borrowed from the input byte slice.
+func Parse(raw []byte) (msg Message, err error) {
+ sp := bytes.Split(raw, []byte{' '}) // TODO: Use bytes.Cut instead here
+
+ if bytes.HasPrefix(sp[0], []byte{'@'}) { // TODO: Check size manually
+ if len(sp[0]) < 2 {
+ err = ErrMalformedMsg
+ return
+ }
+ sp[0] = sp[0][1:]
+
+ msg.Tags, err = tagsToMap(sp[0])
+ if err != nil {
+ return
+ }
+
+ if len(sp) < 2 {
+ err = ErrMalformedMsg
+ return
+ }
+ sp = sp[1:]
+ } else {
+ msg.Tags = nil // TODO: Is a nil map the correct thing to use here?
+ }
+
+ if bytes.HasPrefix(sp[0], []byte{':'}) { // TODO: Check size manually
+ if len(sp[0]) < 2 {
+ err = ErrMalformedMsg
+ return
+ }
+ sp[0] = sp[0][1:]
+
+ msg.Source = parseSource(sp[0])
+
+ if len(sp) < 2 {
+ err = ErrMalformedMsg
+ return
+ }
+ sp = sp[1:]
+ }
+
+ msg.Command = misc.BytesToString(sp[0])
+ if len(sp) < 2 {
+ return
+ }
+ sp = sp[1:]
+
+ for i := 0; i < len(sp); i++ {
+ if len(sp[i]) == 0 {
+ continue
+ }
+ if sp[i][0] == ':' {
+ if len(sp[i]) < 2 {
+ sp[i] = []byte{}
+ } else {
+ sp[i] = sp[i][1:]
+ }
+ msg.Args = append(msg.Args, misc.BytesToString(bytes.Join(sp[i:], []byte{' '})))
+ // TODO: Avoid Join by not using sp in the first place
+ break
+ }
+ msg.Args = append(msg.Args, misc.BytesToString(sp[i]))
+ }
+
+ return
+}
+
+var ircv3TagEscapes = map[byte]byte{ //nolint:gochecknoglobals
+ ':': ';',
+ 's': ' ',
+ 'r': '\r',
+ 'n': '\n',
+}
+
+func tagsToMap(raw []byte) (tags map[string]string, err error) {
+ tags = make(map[string]string)
+ for rawTag := range bytes.SplitSeq(raw, []byte{';'}) {
+ key, value, found := bytes.Cut(rawTag, []byte{'='})
+ if !found {
+ err = ErrInvalidIRCv3Tag
+ return
+ }
+ if len(value) == 0 {
+ tags[misc.BytesToString(key)] = ""
+ } else {
+ if !bytes.Contains(value, []byte{'\\'}) {
+ tags[misc.BytesToString(key)] = misc.BytesToString(value)
+ } else {
+ valueUnescaped := bytes.NewBuffer(make([]byte, 0, len(value)))
+ for i := 0; i < len(value); i++ {
+ if value[i] == '\\' {
+ i++
+ byteUnescaped, ok := ircv3TagEscapes[value[i]]
+ if !ok {
+ byteUnescaped = value[i]
+ }
+ valueUnescaped.WriteByte(byteUnescaped)
+ } else {
+ valueUnescaped.WriteByte(value[i])
+ }
+ }
+ tags[misc.BytesToString(key)] = misc.BytesToString(valueUnescaped.Bytes())
+ }
+ }
+ }
+ return
+}
diff --git a/forged/internal/irc/source.go b/forged/internal/irc/source.go
new file mode 100644
index 0000000..d955f45
--- /dev/null
+++ b/forged/internal/irc/source.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package irc
+
+import (
+ "bytes"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+type Source interface {
+ AsSourceString() string
+}
+
+func parseSource(s []byte) Source {
+ nick, userhost, found := bytes.Cut(s, []byte{'!'})
+ if !found {
+ return Server{name: misc.BytesToString(s)}
+ }
+
+ user, host, found := bytes.Cut(userhost, []byte{'@'})
+ if !found {
+ return Server{name: misc.BytesToString(s)}
+ }
+
+ return Client{
+ Nick: misc.BytesToString(nick),
+ User: misc.BytesToString(user),
+ Host: misc.BytesToString(host),
+ }
+}
+
+type Server struct {
+ name string
+}
+
+func (s Server) AsSourceString() string {
+ return s.name
+}
+
+type Client struct {
+ Nick string
+ User string
+ Host string
+}
+
+func (c Client) AsSourceString() string {
+ return c.Nick + "!" + c.User + "@" + c.Host
+}
diff --git a/forged/internal/misc/back.go b/forged/internal/misc/back.go
new file mode 100644
index 0000000..5351359
--- /dev/null
+++ b/forged/internal/misc/back.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+// ErrorBack wraps a value and a channel for communicating an associated error.
+// Typically used to get an error response after sending data across a channel.
+type ErrorBack[T any] struct {
+ Content T
+ ErrorChan chan error
+}
diff --git a/forged/internal/misc/deploy.go b/forged/internal/misc/deploy.go
new file mode 100644
index 0000000..3ee5f92
--- /dev/null
+++ b/forged/internal/misc/deploy.go
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+import (
+ "io"
+ "io/fs"
+ "os"
+)
+
+// DeployBinary copies the contents of a binary file to the target destination path.
+// The destination file is created with executable permissions.
+func DeployBinary(src fs.File, dst string) (err error) {
+ var dstFile *os.File
+ if dstFile, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755); err != nil {
+ return err
+ }
+ defer dstFile.Close()
+ _, err = io.Copy(dstFile, src)
+ return err
+}
diff --git a/forged/internal/misc/iter.go b/forged/internal/misc/iter.go
new file mode 100644
index 0000000..61a96f4
--- /dev/null
+++ b/forged/internal/misc/iter.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+import "iter"
+
+// iterSeqLimit returns an iterator equivalent to the supplied one, but stops
+// after n iterations.
+func IterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] {
+ return func(yield func(T) bool) {
+ var iterations uint
+ for v := range s {
+ if iterations > n-1 {
+ return
+ }
+ if !yield(v) {
+ return
+ }
+ iterations++
+ }
+ }
+}
diff --git a/forged/internal/misc/misc.go b/forged/internal/misc/misc.go
new file mode 100644
index 0000000..398020a
--- /dev/null
+++ b/forged/internal/misc/misc.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package misc provides miscellaneous functions and other definitions.
+package misc
+
+import "strings"
+
+// sliceContainsNewlines returns true if and only if the given slice contains
+// one or more strings that contains newlines.
+func SliceContainsNewlines(s []string) bool {
+ for _, v := range s {
+ if strings.Contains(v, "\n") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/forged/internal/misc/panic.go b/forged/internal/misc/panic.go
new file mode 100644
index 0000000..34c49c5
--- /dev/null
+++ b/forged/internal/misc/panic.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+// FirstOrPanic returns the value or panics if the error is non-nil.
+func FirstOrPanic[T any](v T, err error) T {
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// NoneOrPanic panics if the provided error is non-nil.
+func NoneOrPanic(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/forged/internal/misc/trivial.go b/forged/internal/misc/trivial.go
new file mode 100644
index 0000000..e59c17e
--- /dev/null
+++ b/forged/internal/misc/trivial.go
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+import (
+ "net/url"
+ "strings"
+)
+
+// These are all trivial functions that are intended to be used in HTML
+// templates.
+
+// FirstLine returns the first line of a string.
+func FirstLine(s string) string {
+ before, _, _ := strings.Cut(s, "\n")
+ return before
+}
+
+// PathEscape escapes the input as an URL path segment.
+func PathEscape(s string) string {
+ return url.PathEscape(s)
+}
+
+// QueryEscape escapes the input as an URL query segment.
+func QueryEscape(s string) string {
+ return url.QueryEscape(s)
+}
+
+// Dereference dereferences a pointer.
+func Dereference[T any](p *T) T {
+ return *p
+}
+
+// DereferenceOrZero dereferences a pointer. If the pointer is nil, the zero
+// value of its associated type is returned instead.
+func DereferenceOrZero[T any](p *T) T {
+ if p != nil {
+ return *p
+ }
+ var z T
+ return z
+}
+
+// Minus subtracts two numbers.
+func Minus(a, b int) int {
+ return a - b
+}
diff --git a/forged/internal/misc/unsafe.go b/forged/internal/misc/unsafe.go
new file mode 100644
index 0000000..6c2192f
--- /dev/null
+++ b/forged/internal/misc/unsafe.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+import "unsafe"
+
+// StringToBytes converts a string to a byte slice without copying the string.
+// Memory is borrowed from the string.
+// The resulting byte slice must not be modified in any form.
+func StringToBytes(s string) (bytes []byte) {
+ return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// BytesToString converts a byte slice to a string without copying the bytes.
+// Memory is borrowed from the byte slice.
+// The source byte slice must not be modified.
+func BytesToString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
diff --git a/forged/internal/misc/url.go b/forged/internal/misc/url.go
new file mode 100644
index 0000000..0f9dc04
--- /dev/null
+++ b/forged/internal/misc/url.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package misc
+
+import (
+ "errors"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+var (
+ ErrDupRefSpec = errors.New("duplicate ref spec")
+ ErrNoRefSpec = errors.New("no ref spec")
+)
+
+// getParamRefTypeName looks at the query parameters in an HTTP request and
+// returns its ref name and type, if any.
+func GetParamRefTypeName(request *http.Request) (retRefType, retRefName string, err error) {
+ rawQuery := request.URL.RawQuery
+ queryValues, err := url.ParseQuery(rawQuery)
+ if err != nil {
+ return
+ }
+ done := false
+ for _, refType := range []string{"commit", "branch", "tag"} {
+ refName, ok := queryValues[refType]
+ if ok {
+ if done {
+ err = ErrDupRefSpec
+ return
+ }
+ done = true
+ if len(refName) != 1 {
+ err = ErrDupRefSpec
+ return
+ }
+ retRefName = refName[0]
+ retRefType = refType
+ }
+ }
+ if !done {
+ err = ErrNoRefSpec
+ }
+ return
+}
+
+// ParseReqURI parses an HTTP request URL, and returns a slice of path segments
+// and the query parameters. It handles %2F correctly.
+func ParseReqURI(requestURI string) (segments []string, params url.Values, err error) {
+ path, paramsStr, _ := strings.Cut(requestURI, "?")
+
+ segments, err = PathToSegments(path)
+ if err != nil {
+ return
+ }
+
+ params, err = url.ParseQuery(paramsStr)
+ return
+}
+
+// PathToSegments splits a path into unescaped segments. It handles %2F correctly.
+func PathToSegments(path string) (segments []string, err error) {
+ segments = strings.Split(strings.TrimPrefix(path, "/"), "/")
+
+ for i, segment := range segments {
+ segments[i], err = url.PathUnescape(segment)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+// RedirectDir returns true and redirects the user to a version of the URL with
+// a trailing slash, if and only if the request URL does not already have a
+// trailing slash.
+func RedirectDir(writer http.ResponseWriter, request *http.Request) bool {
+ requestURI := request.RequestURI
+
+ pathEnd := strings.IndexAny(requestURI, "?#")
+ var path, rest string
+ if pathEnd == -1 {
+ path = requestURI
+ } else {
+ path = requestURI[:pathEnd]
+ rest = requestURI[pathEnd:]
+ }
+
+ if !strings.HasSuffix(path, "/") {
+ http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther)
+ return true
+ }
+ return false
+}
+
+// RedirectNoDir returns true and redirects the user to a version of the URL
+// without a trailing slash, if and only if the request URL has a trailing
+// slash.
+func RedirectNoDir(writer http.ResponseWriter, request *http.Request) bool {
+ requestURI := request.RequestURI
+
+ pathEnd := strings.IndexAny(requestURI, "?#")
+ var path, rest string
+ if pathEnd == -1 {
+ path = requestURI
+ } else {
+ path = requestURI[:pathEnd]
+ rest = requestURI[pathEnd:]
+ }
+
+ if strings.HasSuffix(path, "/") {
+ http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther)
+ return true
+ }
+ return false
+}
+
+// RedirectUnconditionally unconditionally redirects the user back to the
+// current page while preserving query parameters.
+func RedirectUnconditionally(writer http.ResponseWriter, request *http.Request) {
+ requestURI := request.RequestURI
+
+ pathEnd := strings.IndexAny(requestURI, "?#")
+ var path, rest string
+ if pathEnd == -1 {
+ path = requestURI
+ } else {
+ path = requestURI[:pathEnd]
+ rest = requestURI[pathEnd:]
+ }
+
+ http.Redirect(writer, request, path+rest, http.StatusSeeOther)
+}
+
+// SegmentsToURL joins URL segments to the path component of a URL.
+// Each segment is escaped properly first.
+func SegmentsToURL(segments []string) string {
+ for i, segment := range segments {
+ segments[i] = url.PathEscape(segment)
+ }
+ return strings.Join(segments, "/")
+}
+
+// AnyContain returns true if and only if ss contains a string that contains c.
+func AnyContain(ss []string, c string) bool {
+ for _, s := range ss {
+ if strings.Contains(s, c) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/forged/internal/oldgit/fmtpatch.go b/forged/internal/oldgit/fmtpatch.go
new file mode 100644
index 0000000..79be5d8
--- /dev/null
+++ b/forged/internal/oldgit/fmtpatch.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package oldgit
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/go-git/go-git/v5/plumbing/object"
+)
+
+// FmtCommitPatch formats a commit object as if it was returned by
+// git-format-patch.
+func FmtCommitPatch(commit *object.Commit) (final string, err error) {
+ var patch *object.Patch
+ var buf bytes.Buffer
+ var author object.Signature
+ var date string
+ var commitTitle, commitDetails string
+
+ if _, patch, err = CommitToPatch(commit); err != nil {
+ return "", err
+ }
+
+ author = commit.Author
+ date = author.When.Format(time.RFC1123Z)
+
+ commitTitle, commitDetails, _ = strings.Cut(commit.Message, "\n")
+
+ // This date is hardcoded in Git.
+ fmt.Fprintf(&buf, "From %s Mon Sep 17 00:00:00 2001\n", commit.Hash)
+ fmt.Fprintf(&buf, "From: %s <%s>\n", author.Name, author.Email)
+ fmt.Fprintf(&buf, "Date: %s\n", date)
+ fmt.Fprintf(&buf, "Subject: [PATCH] %s\n\n", commitTitle)
+
+ if commitDetails != "" {
+ commitDetails1, commitDetails2, _ := strings.Cut(commitDetails, "\n")
+ if strings.TrimSpace(commitDetails1) == "" {
+ commitDetails = commitDetails2
+ }
+ buf.WriteString(commitDetails)
+ buf.WriteString("\n")
+ }
+ buf.WriteString("---\n")
+ fmt.Fprint(&buf, patch.Stats().String())
+ fmt.Fprintln(&buf)
+
+ buf.WriteString(patch.String())
+
+ fmt.Fprintf(&buf, "\n-- \n2.48.1\n")
+
+ return buf.String(), nil
+}
diff --git a/forged/internal/oldgit/oldgit.go b/forged/internal/oldgit/oldgit.go
new file mode 100644
index 0000000..4c99d6a
--- /dev/null
+++ b/forged/internal/oldgit/oldgit.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package oldgit provides deprecated functions that depend on go-git.
+package oldgit
diff --git a/forged/internal/oldgit/patch.go b/forged/internal/oldgit/patch.go
new file mode 100644
index 0000000..fc8ef98
--- /dev/null
+++ b/forged/internal/oldgit/patch.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package oldgit
+
+import (
+ "errors"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+)
+
+// CommitToPatch creates an [object.Patch] from the first parent of a given
+// [object.Commit].
+//
+// TODO: This function should be deprecated as it only diffs with the first
+// parent and does not correctly handle merge commits.
+func CommitToPatch(commit *object.Commit) (parentCommitHash plumbing.Hash, patch *object.Patch, err error) {
+ var parentCommit *object.Commit
+ var commitTree *object.Tree
+
+ parentCommit, err = commit.Parent(0)
+ switch {
+ case errors.Is(err, object.ErrParentNotFound):
+ if commitTree, err = commit.Tree(); err != nil {
+ return
+ }
+ if patch, err = NullTree.Patch(commitTree); err != nil {
+ return
+ }
+ case err != nil:
+ return
+ default:
+ parentCommitHash = parentCommit.Hash
+ if patch, err = parentCommit.Patch(commit); err != nil {
+ return
+ }
+ }
+ return
+}
+
+// NullTree is a tree object that is empty and has no hash.
+var NullTree object.Tree //nolint:gochecknoglobals
diff --git a/forged/internal/render/chroma.go b/forged/internal/render/chroma.go
new file mode 100644
index 0000000..64bfde0
--- /dev/null
+++ b/forged/internal/render/chroma.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package render
+
+import (
+ "bytes"
+ "html/template"
+
+ chromaHTML "github.com/alecthomas/chroma/v2/formatters/html"
+ chromaLexers "github.com/alecthomas/chroma/v2/lexers"
+ chromaStyles "github.com/alecthomas/chroma/v2/styles"
+)
+
+// Highlight returns HTML with syntax highlighting for the given file content,
+// using Chroma. The lexer is selected based on the filename.
+// If tokenization or formatting fails, a fallback <pre> block is returned with the error.
+func Highlight(filename, content string) template.HTML {
+ lexer := chromaLexers.Match(filename)
+ if lexer == nil {
+ lexer = chromaLexers.Fallback
+ }
+
+ iterator, err := lexer.Tokenise(nil, content)
+ if err != nil {
+ return template.HTML("<pre>Error tokenizing file: " + err.Error() + "</pre>") //#nosec G203`
+ }
+
+ var buf bytes.Buffer
+ style := chromaStyles.Get("autumn")
+ formatter := chromaHTML.New(
+ chromaHTML.WithClasses(true),
+ chromaHTML.TabWidth(8),
+ )
+
+ if err := formatter.Format(&buf, style, iterator); err != nil {
+ return template.HTML("<pre>Error formatting file: " + err.Error() + "</pre>") //#nosec G203
+ }
+
+ return template.HTML(buf.Bytes()) //#nosec G203
+}
diff --git a/forged/internal/render/escape.go b/forged/internal/render/escape.go
new file mode 100644
index 0000000..031e333
--- /dev/null
+++ b/forged/internal/render/escape.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package render
+
+import (
+ "html"
+ "html/template"
+)
+
+// EscapeHTML just escapes a string and wraps it in [template.HTML].
+func EscapeHTML(s string) template.HTML {
+ return template.HTML(html.EscapeString(s)) //#nosec G203
+}
diff --git a/forged/internal/render/readme.go b/forged/internal/render/readme.go
new file mode 100644
index 0000000..fa1be7e
--- /dev/null
+++ b/forged/internal/render/readme.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package render
+
+import (
+ "bytes"
+ "html"
+ "html/template"
+ "strings"
+
+ "github.com/microcosm-cc/bluemonday"
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/extension"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+var markdownConverter = goldmark.New(goldmark.WithExtensions(extension.GFM)) //nolint:gochecknoglobals
+
+// renderReadme renders and sanitizes README content from a byte slice and filename.
+func Readme(data []byte, filename string) (string, template.HTML) {
+ switch strings.ToLower(filename) {
+ case "readme":
+ return "README", template.HTML("<pre>" + html.EscapeString(misc.BytesToString(data)) + "</pre>") //#nosec G203
+ case "readme.md":
+ var buf bytes.Buffer
+ if err := markdownConverter.Convert(data, &buf); err != nil {
+ return "Error fetching README", EscapeHTML("Unable to render README: " + err.Error())
+ }
+ return "README.md", template.HTML(bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes())) //#nosec G203
+ default:
+ return filename, template.HTML("<pre>" + html.EscapeString(misc.BytesToString(data)) + "</pre>") //#nosec G203
+ }
+}
diff --git a/forged/internal/render/render.go b/forged/internal/render/render.go
new file mode 100644
index 0000000..465e410
--- /dev/null
+++ b/forged/internal/render/render.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package render provides functions to render code and READMEs.
+package render
diff --git a/forged/internal/scfg/.golangci.yaml b/forged/internal/scfg/.golangci.yaml
new file mode 100644
index 0000000..59f1970
--- /dev/null
+++ b/forged/internal/scfg/.golangci.yaml
@@ -0,0 +1,26 @@
+linters:
+ enable-all: true
+ disable:
+ - perfsprint
+ - wsl
+ - varnamelen
+ - nlreturn
+ - exhaustruct
+ - wrapcheck
+ - lll
+ - exhaustive
+ - intrange
+ - godox
+ - nestif
+ - err113
+ - staticcheck
+ - errorlint
+ - cyclop
+ - nonamedreturns
+ - funlen
+ - gochecknoglobals
+ - tenv
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
diff --git a/forged/internal/scfg/reader.go b/forged/internal/scfg/reader.go
new file mode 100644
index 0000000..6a2bedc
--- /dev/null
+++ b/forged/internal/scfg/reader.go
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
+
+package scfg
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// This limits the max block nesting depth to prevent stack overflows.
+const maxNestingDepth = 1000
+
+// Load loads a configuration file.
+func Load(path string) (Block, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return Read(f)
+}
+
+// Read parses a configuration file from an io.Reader.
+func Read(r io.Reader) (Block, error) {
+ scanner := bufio.NewScanner(r)
+
+ dec := decoder{scanner: scanner}
+ block, closingBrace, err := dec.readBlock()
+ if err != nil {
+ return nil, err
+ } else if closingBrace {
+ return nil, fmt.Errorf("line %v: unexpected '}'", dec.lineno)
+ }
+
+ return block, scanner.Err()
+}
+
+type decoder struct {
+ scanner *bufio.Scanner
+ lineno int
+ blockDepth int
+}
+
+// readBlock reads a block. closingBrace is true if parsing stopped on '}'
+// (otherwise, it stopped on Scanner.Scan).
+func (dec *decoder) readBlock() (block Block, closingBrace bool, err error) {
+ dec.blockDepth++
+ defer func() {
+ dec.blockDepth--
+ }()
+
+ if dec.blockDepth >= maxNestingDepth {
+ return nil, false, fmt.Errorf("exceeded max block depth")
+ }
+
+ for dec.scanner.Scan() {
+ dec.lineno++
+
+ l := dec.scanner.Text()
+ words, err := splitWords(l)
+ if err != nil {
+ return nil, false, fmt.Errorf("line %v: %v", dec.lineno, err)
+ } else if len(words) == 0 {
+ continue
+ }
+
+ if len(words) == 1 && l[len(l)-1] == '}' {
+ closingBrace = true
+ break
+ }
+
+ var d *Directive
+ if words[len(words)-1] == "{" && l[len(l)-1] == '{' {
+ words = words[:len(words)-1]
+
+ var name string
+ params := words
+ if len(words) > 0 {
+ name, params = words[0], words[1:]
+ }
+
+ startLineno := dec.lineno
+ childBlock, childClosingBrace, err := dec.readBlock()
+ if err != nil {
+ return nil, false, err
+ } else if !childClosingBrace {
+ return nil, false, fmt.Errorf("line %v: unterminated block", startLineno)
+ }
+
+ // Allows callers to tell apart "no block" and "empty block"
+ if childBlock == nil {
+ childBlock = Block{}
+ }
+
+ d = &Directive{Name: name, Params: params, Children: childBlock, lineno: dec.lineno}
+ } else {
+ d = &Directive{Name: words[0], Params: words[1:], lineno: dec.lineno}
+ }
+ block = append(block, d)
+ }
+
+ return block, closingBrace, nil
+}
+
+func splitWords(l string) ([]string, error) {
+ var (
+ words []string
+ sb strings.Builder
+ escape bool
+ quote rune
+ wantWSP bool
+ )
+ for _, ch := range l {
+ switch {
+ case escape:
+ sb.WriteRune(ch)
+ escape = false
+ case wantWSP && (ch != ' ' && ch != '\t'):
+ return words, fmt.Errorf("atom not allowed after quoted string")
+ case ch == '\\':
+ escape = true
+ case quote != 0 && ch == quote:
+ quote = 0
+ wantWSP = true
+ if sb.Len() == 0 {
+ words = append(words, "")
+ }
+ case quote == 0 && len(words) == 0 && sb.Len() == 0 && ch == '#':
+ return nil, nil
+ case quote == 0 && (ch == '\'' || ch == '"'):
+ if sb.Len() > 0 {
+ return words, fmt.Errorf("quoted string not allowed after atom")
+ }
+ quote = ch
+ case quote == 0 && (ch == ' ' || ch == '\t'):
+ if sb.Len() > 0 {
+ words = append(words, sb.String())
+ }
+ sb.Reset()
+ wantWSP = false
+ default:
+ sb.WriteRune(ch)
+ }
+ }
+ if quote != 0 {
+ return words, fmt.Errorf("unterminated quoted string")
+ }
+ if sb.Len() > 0 {
+ words = append(words, sb.String())
+ }
+ return words, nil
+}
diff --git a/forged/internal/scfg/scfg.go b/forged/internal/scfg/scfg.go
new file mode 100644
index 0000000..4533e63
--- /dev/null
+++ b/forged/internal/scfg/scfg.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
+
+// Package scfg parses and formats configuration files.
+// Note that this fork of scfg behaves differently from upstream scfg.
+package scfg
+
+import (
+ "fmt"
+)
+
+// Block is a list of directives.
+type Block []*Directive
+
+// GetAll returns a list of directives with the provided name.
+func (blk Block) GetAll(name string) []*Directive {
+ l := make([]*Directive, 0, len(blk))
+ for _, child := range blk {
+ if child.Name == name {
+ l = append(l, child)
+ }
+ }
+ return l
+}
+
+// Get returns the first directive with the provided name.
+func (blk Block) Get(name string) *Directive {
+ for _, child := range blk {
+ if child.Name == name {
+ return child
+ }
+ }
+ return nil
+}
+
+// Directive is a configuration directive.
+type Directive struct {
+ Name string
+ Params []string
+
+ Children Block
+
+ lineno int
+}
+
+// ParseParams extracts parameters from the directive. It errors out if the
+// user hasn't provided enough parameters.
+func (d *Directive) ParseParams(params ...*string) error {
+ if len(d.Params) < len(params) {
+ return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params))
+ }
+ for i, ptr := range params {
+ if ptr == nil {
+ continue
+ }
+ *ptr = d.Params[i]
+ }
+ return nil
+}
diff --git a/forged/internal/scfg/struct.go b/forged/internal/scfg/struct.go
new file mode 100644
index 0000000..98ec943
--- /dev/null
+++ b/forged/internal/scfg/struct.go
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
+
+package scfg
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// structInfo contains scfg metadata for structs.
+type structInfo struct {
+ param int // index of field storing parameters
+ children map[string]int // indices of fields storing child directives
+}
+
+var (
+ structCacheMutex sync.Mutex
+ structCache = make(map[reflect.Type]*structInfo)
+)
+
+func getStructInfo(t reflect.Type) (*structInfo, error) {
+ structCacheMutex.Lock()
+ defer structCacheMutex.Unlock()
+
+ if info := structCache[t]; info != nil {
+ return info, nil
+ }
+
+ info := &structInfo{
+ param: -1,
+ children: make(map[string]int),
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Anonymous {
+ return nil, fmt.Errorf("scfg: anonymous struct fields are not supported")
+ } else if !f.IsExported() {
+ continue
+ }
+
+ tag := f.Tag.Get("scfg")
+ parts := strings.Split(tag, ",")
+ k, options := parts[0], parts[1:]
+ if k == "-" {
+ continue
+ } else if k == "" {
+ k = f.Name
+ }
+
+ isParam := false
+ for _, opt := range options {
+ switch opt {
+ case "param":
+ isParam = true
+ default:
+ return nil, fmt.Errorf("scfg: invalid option %q in struct tag", opt)
+ }
+ }
+
+ if isParam {
+ if info.param >= 0 {
+ return nil, fmt.Errorf("scfg: param option specified multiple times in struct tag in %v", t)
+ }
+ if parts[0] != "" {
+ return nil, fmt.Errorf("scfg: name must be empty when param option is specified in struct tag in %v", t)
+ }
+ info.param = i
+ } else {
+ if _, ok := info.children[k]; ok {
+ return nil, fmt.Errorf("scfg: key %q specified multiple times in struct tag in %v", k, t)
+ }
+ info.children[k] = i
+ }
+ }
+
+ structCache[t] = info
+ return info, nil
+}
diff --git a/forged/internal/scfg/unmarshal.go b/forged/internal/scfg/unmarshal.go
new file mode 100644
index 0000000..8befc10
--- /dev/null
+++ b/forged/internal/scfg/unmarshal.go
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package scfg
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+// Decoder reads and decodes an scfg document from an input stream.
+type Decoder struct {
+ r io.Reader
+ unknownDirectives []*Directive
+}
+
+// NewDecoder returns a new decoder which reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UnknownDirectives returns a slice of all unknown directives encountered
+// during Decode.
+func (dec *Decoder) UnknownDirectives() []*Directive {
+ return dec.unknownDirectives
+}
+
+// Decode reads scfg document from the input and stores it in the value pointed
+// to by v.
+//
+// If v is nil or not a pointer, Decode returns an error.
+//
+// Blocks can be unmarshaled to:
+//
+// - Maps. Each directive is unmarshaled into a map entry. The map key must
+// be a string.
+// - Structs. Each directive is unmarshaled into a struct field.
+//
+// Duplicate directives are not allowed, unless the struct field or map value
+// is a slice of values representing a directive: structs or maps.
+//
+// Directives can be unmarshaled to:
+//
+// - Maps. The children block is unmarshaled into the map. Parameters are not
+// allowed.
+// - Structs. The children block is unmarshaled into the struct. Parameters
+// are allowed if one of the struct fields contains the "param" option in
+// its tag.
+// - Slices. Parameters are unmarshaled into the slice. Children blocks are
+// not allowed.
+// - Arrays. Parameters are unmarshaled into the array. The number of
+// parameters must match exactly the length of the array. Children blocks
+// are not allowed.
+// - Strings, booleans, integers, floating-point values, values implementing
+// encoding.TextUnmarshaler. Only a single parameter is allowed and is
+// unmarshaled into the value. Children blocks are not allowed.
+//
+// The decoding of each struct field can be customized by the format string
+// stored under the "scfg" key in the struct field's tag. The tag contains the
+// name of the field possibly followed by a comma-separated list of options.
+// The name may be empty in order to specify options without overriding the
+// default field name. As a special case, if the field name is "-", the field
+// is ignored. The "param" option specifies that directive parameters are
+// stored in this field (the name must be empty).
+func (dec *Decoder) Decode(v interface{}) error {
+ block, err := Read(dec.r)
+ if err != nil {
+ return err
+ }
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return fmt.Errorf("scfg: invalid value for unmarshaling")
+ }
+
+ return dec.unmarshalBlock(block, rv)
+}
+
+func (dec *Decoder) unmarshalBlock(block Block, v reflect.Value) error {
+ v = unwrapPointers(v)
+ t := v.Type()
+
+ dirsByName := make(map[string][]*Directive, len(block))
+ for _, dir := range block {
+ dirsByName[dir.Name] = append(dirsByName[dir.Name], dir)
+ }
+
+ switch v.Kind() {
+ case reflect.Map:
+ if t.Key().Kind() != reflect.String {
+ return fmt.Errorf("scfg: map key type must be string")
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ } else if v.Len() > 0 {
+ clearMap(v)
+ }
+
+ for name, dirs := range dirsByName {
+ mv := reflect.New(t.Elem()).Elem()
+ if err := dec.unmarshalDirectiveList(dirs, mv); err != nil {
+ return err
+ }
+ v.SetMapIndex(reflect.ValueOf(name), mv)
+ }
+
+ case reflect.Struct:
+ si, err := getStructInfo(t)
+ if err != nil {
+ return err
+ }
+
+ seen := make(map[int]bool)
+
+ for name, dirs := range dirsByName {
+ fieldIndex, ok := si.children[name]
+ if !ok {
+ dec.unknownDirectives = append(dec.unknownDirectives, dirs...)
+ continue
+ }
+ fv := v.Field(fieldIndex)
+ if err := dec.unmarshalDirectiveList(dirs, fv); err != nil {
+ return err
+ }
+ seen[fieldIndex] = true
+ }
+
+ for name, fieldIndex := range si.children {
+ if fieldIndex == si.param {
+ continue
+ }
+ if _, ok := seen[fieldIndex]; !ok {
+ return fmt.Errorf("scfg: missing required directive %q", name)
+ }
+ }
+
+ default:
+ return fmt.Errorf("scfg: unsupported type for unmarshaling blocks: %v", t)
+ }
+
+ return nil
+}
+
+func (dec *Decoder) unmarshalDirectiveList(dirs []*Directive, v reflect.Value) error {
+ v = unwrapPointers(v)
+ t := v.Type()
+
+ if v.Kind() != reflect.Slice || !isDirectiveType(t.Elem()) {
+ if len(dirs) > 1 {
+ return newUnmarshalDirectiveError(dirs[1], "directive must not be specified more than once")
+ }
+ return dec.unmarshalDirective(dirs[0], v)
+ }
+
+ sv := reflect.MakeSlice(t, len(dirs), len(dirs))
+ for i, dir := range dirs {
+ if err := dec.unmarshalDirective(dir, sv.Index(i)); err != nil {
+ return err
+ }
+ }
+ v.Set(sv)
+ return nil
+}
+
+// isDirectiveType checks whether a type can only be unmarshaled as a
+// directive, not as a parameter. Accepting too many types here would result in
+// ambiguities, see:
+// https://lists.sr.ht/~emersion/public-inbox/%3C20230629132458.152205-1-contact%40emersion.fr%3E#%3Ch4Y2peS_YBqY3ar4XlmPDPiNBFpYGns3EBYUx3_6zWEhV2o8_-fBQveRujGADWYhVVCucHBEryFGoPtpC3d3mQ-x10pWnFogfprbQTSvtxc=@emersion.fr%3E
+func isDirectiveType(t reflect.Type) bool {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ textUnmarshalerType := reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ if reflect.PointerTo(t).Implements(textUnmarshalerType) {
+ return false
+ }
+
+ switch t.Kind() {
+ case reflect.Struct, reflect.Map:
+ return true
+ default:
+ return false
+ }
+}
+
+func (dec *Decoder) unmarshalDirective(dir *Directive, v reflect.Value) error {
+ v = unwrapPointers(v)
+ t := v.Type()
+
+ if v.CanAddr() {
+ if _, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ if len(dir.Children) != 0 {
+ return newUnmarshalDirectiveError(dir, "directive requires zero children")
+ }
+ return unmarshalParamList(dir, v)
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Map:
+ if len(dir.Params) > 0 {
+ return newUnmarshalDirectiveError(dir, "directive requires zero parameters")
+ }
+ if err := dec.unmarshalBlock(dir.Children, v); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ si, err := getStructInfo(t)
+ if err != nil {
+ return err
+ }
+
+ if si.param >= 0 {
+ if err := unmarshalParamList(dir, v.Field(si.param)); err != nil {
+ return err
+ }
+ } else {
+ if len(dir.Params) > 0 {
+ return newUnmarshalDirectiveError(dir, "directive requires zero parameters")
+ }
+ }
+
+ if err := dec.unmarshalBlock(dir.Children, v); err != nil {
+ return err
+ }
+ default:
+ if len(dir.Children) != 0 {
+ return newUnmarshalDirectiveError(dir, "directive requires zero children")
+ }
+ if err := unmarshalParamList(dir, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unmarshalParamList(dir *Directive, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ t := v.Type()
+ sv := reflect.MakeSlice(t, len(dir.Params), len(dir.Params))
+ for i, param := range dir.Params {
+ if err := unmarshalParam(param, sv.Index(i)); err != nil {
+ return newUnmarshalParamError(dir, i, err)
+ }
+ }
+ v.Set(sv)
+ case reflect.Array:
+ if len(dir.Params) != v.Len() {
+ return newUnmarshalDirectiveError(dir, fmt.Sprintf("directive requires exactly %v parameters", v.Len()))
+ }
+ for i, param := range dir.Params {
+ if err := unmarshalParam(param, v.Index(i)); err != nil {
+ return newUnmarshalParamError(dir, i, err)
+ }
+ }
+ default:
+ if len(dir.Params) != 1 {
+ return newUnmarshalDirectiveError(dir, "directive requires exactly one parameter")
+ }
+ if err := unmarshalParam(dir.Params[0], v); err != nil {
+ return newUnmarshalParamError(dir, 0, err)
+ }
+ }
+
+ return nil
+}
+
+func unmarshalParam(param string, v reflect.Value) error {
+ v = unwrapPointers(v)
+ t := v.Type()
+
+ // TODO: improve our logic following:
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/encoding/json/decode.go;drc=b9b8cecbfc72168ca03ad586cc2ed52b0e8db409;l=421
+ if v.CanAddr() {
+ if v, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ return v.UnmarshalText([]byte(param))
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ v.Set(reflect.ValueOf(param))
+ case reflect.Bool:
+ switch param {
+ case "true":
+ v.Set(reflect.ValueOf(true))
+ case "false":
+ v.Set(reflect.ValueOf(false))
+ default:
+ return fmt.Errorf("invalid bool parameter %q", param)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i, err := strconv.ParseInt(param, 10, t.Bits())
+ if err != nil {
+ return fmt.Errorf("invalid %v parameter: %v", t, err)
+ }
+ v.Set(reflect.ValueOf(i).Convert(t))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ u, err := strconv.ParseUint(param, 10, t.Bits())
+ if err != nil {
+ return fmt.Errorf("invalid %v parameter: %v", t, err)
+ }
+ v.Set(reflect.ValueOf(u).Convert(t))
+ case reflect.Float32, reflect.Float64:
+ f, err := strconv.ParseFloat(param, t.Bits())
+ if err != nil {
+ return fmt.Errorf("invalid %v parameter: %v", t, err)
+ }
+ v.Set(reflect.ValueOf(f).Convert(t))
+ default:
+ return fmt.Errorf("unsupported type for unmarshaling parameter: %v", t)
+ }
+
+ return nil
+}
+
+func unwrapPointers(v reflect.Value) reflect.Value {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ return v
+}
+
+func clearMap(v reflect.Value) {
+ for _, k := range v.MapKeys() {
+ v.SetMapIndex(k, reflect.Value{})
+ }
+}
+
+type unmarshalDirectiveError struct {
+ lineno int
+ name string
+ msg string
+}
+
+func newUnmarshalDirectiveError(dir *Directive, msg string) *unmarshalDirectiveError {
+ return &unmarshalDirectiveError{
+ name: dir.Name,
+ lineno: dir.lineno,
+ msg: msg,
+ }
+}
+
+func (err *unmarshalDirectiveError) Error() string {
+ return fmt.Sprintf("line %v, directive %q: %v", err.lineno, err.name, err.msg)
+}
+
+type unmarshalParamError struct {
+ lineno int
+ directive string
+ paramIndex int
+ err error
+}
+
+func newUnmarshalParamError(dir *Directive, paramIndex int, err error) *unmarshalParamError {
+ return &unmarshalParamError{
+ directive: dir.Name,
+ lineno: dir.lineno,
+ paramIndex: paramIndex,
+ err: err,
+ }
+}
+
+func (err *unmarshalParamError) Error() string {
+ return fmt.Sprintf("line %v, directive %q, parameter %v: %v", err.lineno, err.directive, err.paramIndex+1, err.err)
+}
diff --git a/forged/internal/scfg/writer.go b/forged/internal/scfg/writer.go
new file mode 100644
index 0000000..02a07fe
--- /dev/null
+++ b/forged/internal/scfg/writer.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
+
+package scfg
+
+import (
+ "errors"
+ "io"
+ "strings"
+)
+
+var errDirEmptyName = errors.New("scfg: directive with empty name")
+
+// Write writes a parsed configuration to the provided io.Writer.
+func Write(w io.Writer, blk Block) error {
+ enc := newEncoder(w)
+ err := enc.encodeBlock(blk)
+ return err
+}
+
+// encoder write SCFG directives to an output stream.
+type encoder struct {
+ w io.Writer
+ lvl int
+ err error
+}
+
+// newEncoder returns a new encoder that writes to w.
+func newEncoder(w io.Writer) *encoder {
+ return &encoder{w: w}
+}
+
+func (enc *encoder) push() {
+ enc.lvl++
+}
+
+func (enc *encoder) pop() {
+ enc.lvl--
+}
+
+func (enc *encoder) writeIndent() {
+ for i := 0; i < enc.lvl; i++ {
+ enc.write([]byte("\t"))
+ }
+}
+
+func (enc *encoder) write(p []byte) {
+ if enc.err != nil {
+ return
+ }
+ _, enc.err = enc.w.Write(p)
+}
+
+func (enc *encoder) encodeBlock(blk Block) error {
+ for _, dir := range blk {
+ if err := enc.encodeDir(*dir); err != nil {
+ return err
+ }
+ }
+ return enc.err
+}
+
+func (enc *encoder) encodeDir(dir Directive) error {
+ if enc.err != nil {
+ return enc.err
+ }
+
+ if dir.Name == "" {
+ enc.err = errDirEmptyName
+ return enc.err
+ }
+
+ enc.writeIndent()
+ enc.write([]byte(maybeQuote(dir.Name)))
+ for _, p := range dir.Params {
+ enc.write([]byte(" "))
+ enc.write([]byte(maybeQuote(p)))
+ }
+
+ if len(dir.Children) > 0 {
+ enc.write([]byte(" {\n"))
+ enc.push()
+ if err := enc.encodeBlock(dir.Children); err != nil {
+ return err
+ }
+ enc.pop()
+
+ enc.writeIndent()
+ enc.write([]byte("}"))
+ }
+ enc.write([]byte("\n"))
+
+ return enc.err
+}
+
+const specialChars = "\"\\\r\n'{} \t"
+
+func maybeQuote(s string) string {
+ if s == "" || strings.ContainsAny(s, specialChars) {
+ var sb strings.Builder
+ sb.WriteByte('"')
+ for _, ch := range s {
+ if strings.ContainsRune(`"\`, ch) {
+ sb.WriteByte('\\')
+ }
+ sb.WriteRune(ch)
+ }
+ sb.WriteByte('"')
+ return sb.String()
+ }
+ return s
+}
diff --git a/forged/internal/unsorted/acl.go b/forged/internal/unsorted/acl.go
new file mode 100644
index 0000000..c2e887d
--- /dev/null
+++ b/forged/internal/unsorted/acl.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// getRepoInfo returns the filesystem path and direct access permission for a
+// given repo and a provided ssh public key.
+//
+// TODO: Revamp.
+func (s *Server) getRepoInfo(ctx context.Context, groupPath []string, repoName, sshPubkey string) (repoID int, fsPath string, access bool, contribReq, userType string, userID int, err error) {
+ err = s.database.QueryRow(ctx, `
+WITH RECURSIVE group_path_cte AS (
+ -- Start: match the first name in the path where parent_group IS NULL
+ SELECT
+ id,
+ parent_group,
+ name,
+ 1 AS depth
+ FROM groups
+ WHERE name = ($1::text[])[1]
+ AND parent_group IS NULL
+
+ UNION ALL
+
+ -- Recurse: join next segment of the path
+ SELECT
+ g.id,
+ g.parent_group,
+ g.name,
+ group_path_cte.depth + 1
+ FROM groups g
+ JOIN group_path_cte ON g.parent_group = group_path_cte.id
+ WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
+ AND group_path_cte.depth + 1 <= cardinality($1::text[])
+)
+SELECT
+ r.id,
+ r.filesystem_path,
+ CASE WHEN ugr.user_id IS NOT NULL THEN TRUE ELSE FALSE END AS has_role_in_group,
+ r.contrib_requirements,
+ COALESCE(u.type, ''),
+ COALESCE(u.id, 0)
+FROM group_path_cte g
+JOIN repos r ON r.group_id = g.id
+LEFT JOIN ssh_public_keys s ON s.key_string = $3
+LEFT JOIN users u ON u.id = s.user_id
+LEFT JOIN user_group_roles ugr ON ugr.group_id = g.id AND ugr.user_id = u.id
+WHERE g.depth = cardinality($1::text[])
+ AND r.name = $2
+`, pgtype.FlatArray[string](groupPath), repoName, sshPubkey,
+ ).Scan(&repoID, &fsPath, &access, &contribReq, &userType, &userID)
+ return
+}
diff --git a/forged/internal/unsorted/config.go b/forged/internal/unsorted/config.go
new file mode 100644
index 0000000..9f07480
--- /dev/null
+++ b/forged/internal/unsorted/config.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "bufio"
+ "errors"
+ "log/slog"
+ "os"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/database"
+ "go.lindenii.runxiyu.org/forge/forged/internal/irc"
+ "go.lindenii.runxiyu.org/forge/forged/internal/scfg"
+)
+
+type Config struct {
+ HTTP struct {
+ Net string `scfg:"net"`
+ Addr string `scfg:"addr"`
+ CookieExpiry int `scfg:"cookie_expiry"`
+ Root string `scfg:"root"`
+ ReadTimeout uint32 `scfg:"read_timeout"`
+ WriteTimeout uint32 `scfg:"write_timeout"`
+ IdleTimeout uint32 `scfg:"idle_timeout"`
+ ReverseProxy bool `scfg:"reverse_proxy"`
+ } `scfg:"http"`
+ Hooks struct {
+ Socket string `scfg:"socket"`
+ Execs string `scfg:"execs"`
+ } `scfg:"hooks"`
+ LMTP struct {
+ Socket string `scfg:"socket"`
+ Domain string `scfg:"domain"`
+ MaxSize int64 `scfg:"max_size"`
+ WriteTimeout uint32 `scfg:"write_timeout"`
+ ReadTimeout uint32 `scfg:"read_timeout"`
+ } `scfg:"lmtp"`
+ Git struct {
+ RepoDir string `scfg:"repo_dir"`
+ Socket string `scfg:"socket"`
+ DaemonPath string `scfg:"daemon_path"`
+ } `scfg:"git"`
+ SSH struct {
+ Net string `scfg:"net"`
+ Addr string `scfg:"addr"`
+ Key string `scfg:"key"`
+ Root string `scfg:"root"`
+ } `scfg:"ssh"`
+ IRC irc.Config `scfg:"irc"`
+ General struct {
+ Title string `scfg:"title"`
+ } `scfg:"general"`
+ DB struct {
+ Type string `scfg:"type"`
+ Conn string `scfg:"conn"`
+ } `scfg:"db"`
+ Pprof struct {
+ Net string `scfg:"net"`
+ Addr string `scfg:"addr"`
+ } `scfg:"pprof"`
+}
+
+// LoadConfig loads a configuration file from the specified path and unmarshals
+// it to the global [config] struct. This may race with concurrent reads from
+// [config]; additional synchronization is necessary if the configuration is to
+// be made reloadable.
+func (s *Server) loadConfig(path string) (err error) {
+ var configFile *os.File
+ if configFile, err = os.Open(path); err != nil {
+ return err
+ }
+ defer configFile.Close()
+
+ decoder := scfg.NewDecoder(bufio.NewReader(configFile))
+ if err = decoder.Decode(&s.config); err != nil {
+ return err
+ }
+ for _, u := range decoder.UnknownDirectives() {
+ slog.Warn("unknown configuration directive", "directive", u)
+ }
+
+ if s.config.DB.Type != "postgres" {
+ return errors.New("unsupported database type")
+ }
+
+ if s.database, err = database.Open(s.config.DB.Conn); err != nil {
+ return err
+ }
+
+ s.globalData["forge_title"] = s.config.General.Title
+
+ return nil
+}
diff --git a/forged/internal/unsorted/database.go b/forged/internal/unsorted/database.go
new file mode 100644
index 0000000..222b0c4
--- /dev/null
+++ b/forged/internal/unsorted/database.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+)
+
+// TODO: All database handling logic in all request handlers must be revamped.
+// We must ensure that each request has all logic in one transaction (subject
+// to exceptions if appropriate) so they get a consistent view of the database
+// at a single point. A failure to do so may cause things as serious as
+// privilege escalation.
+
+// queryNameDesc is a helper function that executes a query and returns a
+// list of nameDesc results. The query must return two string arguments, i.e. a
+// name and a description.
+func (s *Server) queryNameDesc(ctx context.Context, query string, args ...any) (result []nameDesc, err error) {
+ var rows pgx.Rows
+
+ if rows, err = s.database.Query(ctx, query, args...); err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var name, description string
+ if err = rows.Scan(&name, &description); err != nil {
+ return nil, err
+ }
+ result = append(result, nameDesc{name, description})
+ }
+ return result, rows.Err()
+}
+
+// nameDesc holds a name and a description.
+type nameDesc struct {
+ Name string
+ Description string
+}
diff --git a/forged/internal/unsorted/fedauth.go b/forged/internal/unsorted/fedauth.go
new file mode 100644
index 0000000..f54649b
--- /dev/null
+++ b/forged/internal/unsorted/fedauth.go
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/jackc/pgx/v5"
+)
+
+// fedauth checks whether a user's SSH public key matches the remote username
+// they claim to have on the service. If so, the association is recorded.
+func (s *Server) fedauth(ctx context.Context, userID int, service, remoteUsername, pubkey string) (bool, error) {
+ var err error
+
+ matched := false
+ usernameEscaped := url.PathEscape(remoteUsername)
+
+ var req *http.Request
+ switch service {
+ // TODO: Services should be configurable by the instance administrator
+ // and should not be hardcoded in the source code.
+ case "sr.ht":
+ req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://meta.sr.ht/~"+usernameEscaped+".keys", nil)
+ case "github":
+ req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://github.com/"+usernameEscaped+".keys", nil)
+ case "codeberg":
+ req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://codeberg.org/"+usernameEscaped+".keys", nil)
+ case "tangled":
+ req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://tangled.sh/keys/"+usernameEscaped, nil)
+ // TODO: Don't rely on one webview
+ default:
+ return false, errors.New("unknown federated service")
+ }
+ if err != nil {
+ return false, err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = resp.Body.Close()
+ }()
+ buf := bufio.NewReader(resp.Body)
+
+ for {
+ line, err := buf.ReadString('\n')
+ if errors.Is(err, io.EOF) {
+ break
+ } else if err != nil {
+ return false, err
+ }
+
+ lineSplit := strings.Split(line, " ")
+ if len(lineSplit) < 2 {
+ continue
+ }
+ line = strings.Join(lineSplit[:2], " ")
+
+ if line == pubkey {
+ matched = true
+ break
+ }
+ }
+
+ if !matched {
+ return false, nil
+ }
+
+ var txn pgx.Tx
+ if txn, err = s.database.Begin(ctx); err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = txn.Rollback(ctx)
+ }()
+ if _, err = txn.Exec(ctx, `UPDATE users SET type = 'federated' WHERE id = $1 AND type = 'pubkey_only'`, userID); err != nil {
+ return false, err
+ }
+ if _, err = txn.Exec(ctx, `INSERT INTO federated_identities (user_id, service, remote_username) VALUES ($1, $2, $3)`, userID, service, remoteUsername); err != nil {
+ return false, err
+ }
+ if err = txn.Commit(ctx); err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
diff --git a/forged/internal/unsorted/git_hooks_handle_linux.go b/forged/internal/unsorted/git_hooks_handle_linux.go
new file mode 100644
index 0000000..f904550
--- /dev/null
+++ b/forged/internal/unsorted/git_hooks_handle_linux.go
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+//
+//go:build linux
+
+package unsorted
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/jackc/pgx/v5"
+ "go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+var (
+ errGetFD = errors.New("unable to get file descriptor")
+ errGetUcred = errors.New("failed getsockopt")
+)
+
+// hooksHandler handles a connection from hookc via the
+// unix socket.
+func (s *Server) hooksHandler(conn net.Conn) {
+ var ctx context.Context
+ var cancel context.CancelFunc
+ var ucred *syscall.Ucred
+ var err error
+ var cookie []byte
+ var packPass packPass
+ var sshStderr io.Writer
+ var hookRet byte
+
+ defer conn.Close()
+ ctx, cancel = context.WithCancel(context.Background())
+ defer cancel()
+
+ // There aren't reasonable cases where someone would run this as
+ // another user.
+ if ucred, err = getUcred(conn); err != nil {
+ if _, err = conn.Write([]byte{1}); err != nil {
+ return
+ }
+ writeRedError(conn, "\nUnable to get peer credentials: %v", err)
+ return
+ }
+ uint32uid := uint32(os.Getuid()) //#nosec G115
+ if ucred.Uid != uint32uid {
+ if _, err = conn.Write([]byte{1}); err != nil {
+ return
+ }
+ writeRedError(conn, "\nUID mismatch")
+ return
+ }
+
+ cookie = make([]byte, 64)
+ if _, err = conn.Read(cookie); err != nil {
+ if _, err = conn.Write([]byte{1}); err != nil {
+ return
+ }
+ writeRedError(conn, "\nFailed to read cookie: %v", err)
+ return
+ }
+
+ {
+ var ok bool
+ packPass, ok = s.packPasses.Load(misc.BytesToString(cookie))
+ if !ok {
+ if _, err = conn.Write([]byte{1}); err != nil {
+ return
+ }
+ writeRedError(conn, "\nInvalid handler cookie")
+ return
+ }
+ }
+
+ sshStderr = packPass.session.Stderr()
+
+ _, _ = sshStderr.Write([]byte{'\n'})
+
+ hookRet = func() byte {
+ var argc64 uint64
+ if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil {
+ writeRedError(sshStderr, "Failed to read argc: %v", err)
+ return 1
+ }
+ var args []string
+ for range argc64 {
+ var arg bytes.Buffer
+ for {
+ nextByte := make([]byte, 1)
+ n, err := conn.Read(nextByte)
+ if err != nil || n != 1 {
+ writeRedError(sshStderr, "Failed to read arg: %v", err)
+ return 1
+ }
+ if nextByte[0] == 0 {
+ break
+ }
+ arg.WriteByte(nextByte[0])
+ }
+ args = append(args, arg.String())
+ }
+
+ gitEnv := make(map[string]string)
+ for {
+ var envLine bytes.Buffer
+ for {
+ nextByte := make([]byte, 1)
+ n, err := conn.Read(nextByte)
+ if err != nil || n != 1 {
+ writeRedError(sshStderr, "Failed to read environment variable: %v", err)
+ return 1
+ }
+ if nextByte[0] == 0 {
+ break
+ }
+ envLine.WriteByte(nextByte[0])
+ }
+ if envLine.Len() == 0 {
+ break
+ }
+ kv := envLine.String()
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) < 2 {
+ writeRedError(sshStderr, "Invalid environment variable line: %v", kv)
+ return 1
+ }
+ gitEnv[parts[0]] = parts[1]
+ }
+
+ var stdin bytes.Buffer
+ if _, err = io.Copy(&stdin, conn); err != nil {
+ writeRedError(conn, "Failed to read to the stdin buffer: %v", err)
+ }
+
+ switch filepath.Base(args[0]) {
+ case "pre-receive":
+ if packPass.directAccess {
+ return 0
+ }
+ allOK := true
+ for {
+ var line, oldOID, rest, newIOID, refName string
+ var found bool
+ var oldHash, newHash plumbing.Hash
+ var oldCommit, newCommit *object.Commit
+ var pushOptCount int
+
+ pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"])
+ if err != nil {
+ writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err)
+ return 1
+ }
+
+ // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface?
+ // Also it'd be nice to be able to combine users or whatever
+ if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" {
+ if pushOptCount == 0 {
+ writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
+ return 1
+ }
+ for pushOptIndex := range pushOptCount {
+ pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)]
+ if !ok {
+ writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex)
+ return 1
+ }
+ if strings.HasPrefix(pushOpt, "fedid=") {
+ fedUserID := strings.TrimPrefix(pushOpt, "fedid=")
+ service, username, found := strings.Cut(fedUserID, ":")
+ if !found {
+ writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID)
+ return 1
+ }
+
+ ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey)
+ if err != nil {
+ writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err)
+ return 1
+ }
+ if !ok {
+ writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID)
+ return 1
+ }
+
+ break
+ }
+ if pushOptIndex == pushOptCount-1 {
+ writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
+ return 1
+ }
+ }
+ }
+
+ line, err = stdin.ReadString('\n')
+ if errors.Is(err, io.EOF) {
+ break
+ } else if err != nil {
+ writeRedError(sshStderr, "Failed to read pre-receive line: %v", err)
+ return 1
+ }
+ line = line[:len(line)-1]
+
+ oldOID, rest, found = strings.Cut(line, " ")
+ if !found {
+ writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
+ return 1
+ }
+
+ newIOID, refName, found = strings.Cut(rest, " ")
+ if !found {
+ writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
+ return 1
+ }
+
+ if strings.HasPrefix(refName, "refs/heads/contrib/") {
+ if allZero(oldOID) { // New branch
+ fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
+ var newMRLocalID int
+
+ if packPass.userID != 0 {
+ err = s.database.QueryRow(ctx,
+ "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id",
+ packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"),
+ ).Scan(&newMRLocalID)
+ } else {
+ err = s.database.QueryRow(ctx,
+ "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id",
+ packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"),
+ ).Scan(&newMRLocalID)
+ }
+ if err != nil {
+ writeRedError(sshStderr, "Error creating merge request: %v", err)
+ return 1
+ }
+ mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID)
+ fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset)
+
+ s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL)
+ } else { // Existing contrib branch
+ var existingMRUser int
+ var isAncestor bool
+
+ err = s.database.QueryRow(ctx,
+ "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
+ strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID,
+ ).Scan(&existingMRUser)
+ if err != nil {
+ if errors.Is(err, pgx.ErrNoRows) {
+ writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err)
+ } else {
+ writeRedError(sshStderr, "Error querying for existing merge request: %v", err)
+ }
+ return 1
+ }
+ if existingMRUser == 0 {
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)")
+ continue
+ }
+
+ if existingMRUser != packPass.userID {
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)")
+ continue
+ }
+
+ oldHash = plumbing.NewHash(oldOID)
+
+ if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil {
+ writeRedError(sshStderr, "Daemon failed to get old commit: %v", err)
+ return 1
+ }
+
+ // Potential BUG: I'm not sure if new_commit is guaranteed to be
+ // detectable as they haven't been merged into the main repo's
+ // objects yet. But it seems to work, and I don't think there's
+ // any reason for this to only work intermitently.
+ newHash = plumbing.NewHash(newIOID)
+ if newCommit, err = packPass.repo.CommitObject(newHash); err != nil {
+ writeRedError(sshStderr, "Daemon failed to get new commit: %v", err)
+ return 1
+ }
+
+ if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil {
+ writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err)
+ return 1
+ }
+
+ if !isAncestor {
+ // TODO: Create MR snapshot ref instead
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)")
+ continue
+ }
+
+ fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
+ }
+ } else { // Non-contrib branch
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)")
+ }
+ }
+
+ fmt.Fprintln(sshStderr)
+ if allOK {
+ fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)")
+ return 0
+ }
+ fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)")
+ return 1
+ default:
+ fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset)
+ return 1
+ }
+ }()
+
+ fmt.Fprintln(sshStderr)
+
+ _, _ = conn.Write([]byte{hookRet})
+}
+
+// serveGitHooks handles connections on the specified network listener and
+// treats incoming connections as those from git hook handlers by spawning
+// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The
+// function itself blocks.
+func (s *Server) serveGitHooks(listener net.Listener) error {
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ return err
+ }
+ go s.hooksHandler(conn)
+ }
+}
+
+// getUcred fetches connection credentials as a [syscall.Ucred] from a given
+// [net.Conn]. It panics when conn is not a [net.UnixConn].
+func getUcred(conn net.Conn) (ucred *syscall.Ucred, err error) {
+ unixConn := conn.(*net.UnixConn)
+ var unixConnFD *os.File
+
+ if unixConnFD, err = unixConn.File(); err != nil {
+ return nil, errGetFD
+ }
+ defer unixConnFD.Close()
+
+ if ucred, err = syscall.GetsockoptUcred(int(unixConnFD.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED); err != nil {
+ return nil, errGetUcred
+ }
+ return ucred, nil
+}
+
+// allZero returns true if all runes in a given string are '0'. The comparison
+// is not constant time and must not be used in contexts where time-based side
+// channel attacks are a concern.
+func allZero(s string) bool {
+ for _, r := range s {
+ if r != '0' {
+ return false
+ }
+ }
+ return true
+}
diff --git a/forged/internal/unsorted/git_hooks_handle_other.go b/forged/internal/unsorted/git_hooks_handle_other.go
new file mode 100644
index 0000000..70b2072
--- /dev/null
+++ b/forged/internal/unsorted/git_hooks_handle_other.go
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+//
+//go:build !linux
+
+package unsorted
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/jackc/pgx/v5"
+ "go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+// hooksHandler handles a connection from hookc via the
+// unix socket.
+func (s *Server) hooksHandler(conn net.Conn) {
+ var ctx context.Context
+ var cancel context.CancelFunc
+ var err error
+ var cookie []byte
+ var packPass packPass
+ var sshStderr io.Writer
+ var hookRet byte
+
+ defer conn.Close()
+ ctx, cancel = context.WithCancel(context.Background())
+ defer cancel()
+
+ // TODO: ucred-like checks
+
+ cookie = make([]byte, 64)
+ if _, err = conn.Read(cookie); err != nil {
+ if _, err = conn.Write([]byte{1}); err != nil {
+ return
+ }
+ writeRedError(conn, "\nFailed to read cookie: %v", err)
+ return
+ }
+
+ {
+ var ok bool
+ packPass, ok = s.packPasses.Load(misc.BytesToString(cookie))
+ if !ok {
+ if _, err = conn.Write([]byte{1}); err != nil {
+ return
+ }
+ writeRedError(conn, "\nInvalid handler cookie")
+ return
+ }
+ }
+
+ sshStderr = packPass.session.Stderr()
+
+ _, _ = sshStderr.Write([]byte{'\n'})
+
+ hookRet = func() byte {
+ var argc64 uint64
+ if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil {
+ writeRedError(sshStderr, "Failed to read argc: %v", err)
+ return 1
+ }
+ var args []string
+ for range argc64 {
+ var arg bytes.Buffer
+ for {
+ nextByte := make([]byte, 1)
+ n, err := conn.Read(nextByte)
+ if err != nil || n != 1 {
+ writeRedError(sshStderr, "Failed to read arg: %v", err)
+ return 1
+ }
+ if nextByte[0] == 0 {
+ break
+ }
+ arg.WriteByte(nextByte[0])
+ }
+ args = append(args, arg.String())
+ }
+
+ gitEnv := make(map[string]string)
+ for {
+ var envLine bytes.Buffer
+ for {
+ nextByte := make([]byte, 1)
+ n, err := conn.Read(nextByte)
+ if err != nil || n != 1 {
+ writeRedError(sshStderr, "Failed to read environment variable: %v", err)
+ return 1
+ }
+ if nextByte[0] == 0 {
+ break
+ }
+ envLine.WriteByte(nextByte[0])
+ }
+ if envLine.Len() == 0 {
+ break
+ }
+ kv := envLine.String()
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) < 2 {
+ writeRedError(sshStderr, "Invalid environment variable line: %v", kv)
+ return 1
+ }
+ gitEnv[parts[0]] = parts[1]
+ }
+
+ var stdin bytes.Buffer
+ if _, err = io.Copy(&stdin, conn); err != nil {
+ writeRedError(conn, "Failed to read to the stdin buffer: %v", err)
+ }
+
+ switch filepath.Base(args[0]) {
+ case "pre-receive":
+ if packPass.directAccess {
+ return 0
+ }
+ allOK := true
+ for {
+ var line, oldOID, rest, newIOID, refName string
+ var found bool
+ var oldHash, newHash plumbing.Hash
+ var oldCommit, newCommit *object.Commit
+ var pushOptCount int
+
+ pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"])
+ if err != nil {
+ writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err)
+ return 1
+ }
+
+ // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface?
+ // Also it'd be nice to be able to combine users or whatever
+ if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" {
+ if pushOptCount == 0 {
+ writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
+ return 1
+ }
+ for pushOptIndex := range pushOptCount {
+ pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)]
+ if !ok {
+ writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex)
+ return 1
+ }
+ if strings.HasPrefix(pushOpt, "fedid=") {
+ fedUserID := strings.TrimPrefix(pushOpt, "fedid=")
+ service, username, found := strings.Cut(fedUserID, ":")
+ if !found {
+ writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID)
+ return 1
+ }
+
+ ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey)
+ if err != nil {
+ writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err)
+ return 1
+ }
+ if !ok {
+ writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID)
+ return 1
+ }
+
+ break
+ }
+ if pushOptIndex == pushOptCount-1 {
+ writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
+ return 1
+ }
+ }
+ }
+
+ line, err = stdin.ReadString('\n')
+ if errors.Is(err, io.EOF) {
+ break
+ } else if err != nil {
+ writeRedError(sshStderr, "Failed to read pre-receive line: %v", err)
+ return 1
+ }
+ line = line[:len(line)-1]
+
+ oldOID, rest, found = strings.Cut(line, " ")
+ if !found {
+ writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
+ return 1
+ }
+
+ newIOID, refName, found = strings.Cut(rest, " ")
+ if !found {
+ writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
+ return 1
+ }
+
+ if strings.HasPrefix(refName, "refs/heads/contrib/") {
+ if allZero(oldOID) { // New branch
+ fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
+ var newMRLocalID int
+
+ if packPass.userID != 0 {
+ err = s.database.QueryRow(ctx,
+ "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id",
+ packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"),
+ ).Scan(&newMRLocalID)
+ } else {
+ err = s.database.QueryRow(ctx,
+ "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id",
+ packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"),
+ ).Scan(&newMRLocalID)
+ }
+ if err != nil {
+ writeRedError(sshStderr, "Error creating merge request: %v", err)
+ return 1
+ }
+ mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID)
+ fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset)
+
+ s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL)
+ } else { // Existing contrib branch
+ var existingMRUser int
+ var isAncestor bool
+
+ err = s.database.QueryRow(ctx,
+ "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
+ strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID,
+ ).Scan(&existingMRUser)
+ if err != nil {
+ if errors.Is(err, pgx.ErrNoRows) {
+ writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err)
+ } else {
+ writeRedError(sshStderr, "Error querying for existing merge request: %v", err)
+ }
+ return 1
+ }
+ if existingMRUser == 0 {
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)")
+ continue
+ }
+
+ if existingMRUser != packPass.userID {
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)")
+ continue
+ }
+
+ oldHash = plumbing.NewHash(oldOID)
+
+ if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil {
+ writeRedError(sshStderr, "Daemon failed to get old commit: %v", err)
+ return 1
+ }
+
+ // Potential BUG: I'm not sure if new_commit is guaranteed to be
+ // detectable as they haven't been merged into the main repo's
+ // objects yet. But it seems to work, and I don't think there's
+ // any reason for this to only work intermitently.
+ newHash = plumbing.NewHash(newIOID)
+ if newCommit, err = packPass.repo.CommitObject(newHash); err != nil {
+ writeRedError(sshStderr, "Daemon failed to get new commit: %v", err)
+ return 1
+ }
+
+ if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil {
+ writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err)
+ return 1
+ }
+
+ if !isAncestor {
+ // TODO: Create MR snapshot ref instead
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)")
+ continue
+ }
+
+ fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
+ }
+ } else { // Non-contrib branch
+ allOK = false
+ fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)")
+ }
+ }
+
+ fmt.Fprintln(sshStderr)
+ if allOK {
+ fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)")
+ return 0
+ }
+ fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)")
+ return 1
+ default:
+ fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset)
+ return 1
+ }
+ }()
+
+ fmt.Fprintln(sshStderr)
+
+ _, _ = conn.Write([]byte{hookRet})
+}
+
+// serveGitHooks handles connections on the specified network listener and
+// treats incoming connections as those from git hook handlers by spawning
+// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The
+// function itself blocks.
+func (s *Server) serveGitHooks(listener net.Listener) error {
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ return err
+ }
+ go s.hooksHandler(conn)
+ }
+}
+
+// allZero returns true if all runes in a given string are '0'. The comparison
+// is not constant time and must not be used in contexts where time-based side
+// channel attacks are a concern.
+func allZero(s string) bool {
+ for _, r := range s {
+ if r != '0' {
+ return false
+ }
+ }
+ return true
+}
diff --git a/forged/internal/unsorted/git_init.go b/forged/internal/unsorted/git_init.go
new file mode 100644
index 0000000..a9bba78
--- /dev/null
+++ b/forged/internal/unsorted/git_init.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "github.com/go-git/go-git/v5"
+ gitConfig "github.com/go-git/go-git/v5/config"
+ gitFmtConfig "github.com/go-git/go-git/v5/plumbing/format/config"
+)
+
+// gitInit initializes a bare git repository with the forge-deployed hooks
+// directory as the hooksPath.
+func (s *Server) gitInit(repoPath string) (err error) {
+ var repo *git.Repository
+ var gitConf *gitConfig.Config
+
+ if repo, err = git.PlainInit(repoPath, true); err != nil {
+ return err
+ }
+
+ if gitConf, err = repo.Config(); err != nil {
+ return err
+ }
+
+ gitConf.Raw.SetOption("core", gitFmtConfig.NoSubsection, "hooksPath", s.config.Hooks.Execs)
+ gitConf.Raw.SetOption("receive", gitFmtConfig.NoSubsection, "advertisePushOptions", "true")
+
+ if err = repo.SetConfig(gitConf); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/forged/internal/unsorted/git_misc.go b/forged/internal/unsorted/git_misc.go
new file mode 100644
index 0000000..dd93726
--- /dev/null
+++ b/forged/internal/unsorted/git_misc.go
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "context"
+ "errors"
+ "io"
+ "iter"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// openRepo opens a git repository by group and repo name.
+//
+// TODO: This should be deprecated in favor of doing it in the relevant
+// request/router context in the future, as it cannot cover the nuance of
+// fields needed.
+func (s *Server) openRepo(ctx context.Context, groupPath []string, repoName string) (repo *git.Repository, description string, repoID int, fsPath string, err error) {
+ err = s.database.QueryRow(ctx, `
+WITH RECURSIVE group_path_cte AS (
+ -- Start: match the first name in the path where parent_group IS NULL
+ SELECT
+ id,
+ parent_group,
+ name,
+ 1 AS depth
+ FROM groups
+ WHERE name = ($1::text[])[1]
+ AND parent_group IS NULL
+
+ UNION ALL
+
+ -- Recurse: join next segment of the path
+ SELECT
+ g.id,
+ g.parent_group,
+ g.name,
+ group_path_cte.depth + 1
+ FROM groups g
+ JOIN group_path_cte ON g.parent_group = group_path_cte.id
+ WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
+ AND group_path_cte.depth + 1 <= cardinality($1::text[])
+)
+SELECT
+ r.filesystem_path,
+ COALESCE(r.description, ''),
+ r.id
+FROM group_path_cte g
+JOIN repos r ON r.group_id = g.id
+WHERE g.depth = cardinality($1::text[])
+ AND r.name = $2
+ `, pgtype.FlatArray[string](groupPath), repoName).Scan(&fsPath, &description, &repoID)
+ if err != nil {
+ return
+ }
+
+ repo, err = git.PlainOpen(fsPath)
+ return
+}
+
+// commitIterSeqErr creates an [iter.Seq[*object.Commit]] from an
+// [object.CommitIter], and additionally returns a pointer to error.
+// The pointer to error is guaranteed to be populated with either nil or the
+// error returned by the commit iterator after the returned iterator is
+// finished.
+func commitIterSeqErr(ctx context.Context, commitIter object.CommitIter) (iter.Seq[*object.Commit], *error) {
+ var err error
+ return func(yield func(*object.Commit) bool) {
+ for {
+ commit, err2 := commitIter.Next()
+ if err2 != nil {
+ if errors.Is(err2, io.EOF) {
+ return
+ }
+ err = err2
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ return
+ default:
+ }
+
+ if !yield(commit) {
+ return
+ }
+ }
+ }, &err
+}
diff --git a/forged/internal/unsorted/git_plumbing.go b/forged/internal/unsorted/git_plumbing.go
new file mode 100644
index 0000000..e7ebe8f
--- /dev/null
+++ b/forged/internal/unsorted/git_plumbing.go
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "os"
+ "os/exec"
+ "path"
+ "sort"
+ "strings"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+func writeTree(ctx context.Context, repoPath string, entries []treeEntry) (string, error) {
+ var buf bytes.Buffer
+
+ sort.Slice(entries, func(i, j int) bool {
+ nameI, nameJ := entries[i].name, entries[j].name
+
+ if nameI == nameJ { // meh
+ return !(entries[i].mode == "40000") && (entries[j].mode == "40000")
+ }
+
+ if strings.HasPrefix(nameJ, nameI) && len(nameI) < len(nameJ) {
+ return !(entries[i].mode == "40000")
+ }
+
+ if strings.HasPrefix(nameI, nameJ) && len(nameJ) < len(nameI) {
+ return entries[j].mode == "40000"
+ }
+
+ return nameI < nameJ
+ })
+
+ for _, e := range entries {
+ buf.WriteString(e.mode)
+ buf.WriteByte(' ')
+ buf.WriteString(e.name)
+ buf.WriteByte(0)
+ buf.Write(e.sha)
+ }
+
+ cmd := exec.CommandContext(ctx, "git", "hash-object", "-w", "-t", "tree", "--stdin")
+ cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath)
+ cmd.Stdin = &buf
+
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(out.String()), nil
+}
+
+func buildTreeRecursive(ctx context.Context, repoPath, baseTree string, updates map[string][]byte) (string, error) {
+ treeCache := make(map[string][]treeEntry)
+
+ var walk func(string, string) error
+ walk = func(prefix, sha string) error {
+ cmd := exec.CommandContext(ctx, "git", "cat-file", "tree", sha)
+ cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath)
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ data := out.Bytes()
+ i := 0
+ var entries []treeEntry
+ for i < len(data) {
+ modeEnd := bytes.IndexByte(data[i:], ' ')
+ if modeEnd < 0 {
+ return errors.New("invalid tree format")
+ }
+ mode := misc.BytesToString(data[i : i+modeEnd])
+ i += modeEnd + 1
+
+ nameEnd := bytes.IndexByte(data[i:], 0)
+ if nameEnd < 0 {
+ return errors.New("missing null after filename")
+ }
+ name := misc.BytesToString(data[i : i+nameEnd])
+ i += nameEnd + 1
+
+ if i+20 > len(data) {
+ return errors.New("unexpected EOF in SHA")
+ }
+ shaBytes := data[i : i+20]
+ i += 20
+
+ entries = append(entries, treeEntry{
+ mode: mode,
+ name: name,
+ sha: shaBytes,
+ })
+
+ if mode == "40000" {
+ subPrefix := path.Join(prefix, name)
+ if err := walk(subPrefix, hex.EncodeToString(shaBytes)); err != nil {
+ return err
+ }
+ }
+ }
+ treeCache[prefix] = entries
+ return nil
+ }
+
+ if err := walk("", baseTree); err != nil {
+ return "", err
+ }
+
+ for filePath, blobSha := range updates {
+ parts := strings.Split(filePath, "/")
+ dir := strings.Join(parts[:len(parts)-1], "/")
+ name := parts[len(parts)-1]
+
+ entries := treeCache[dir]
+ found := false
+ for i, e := range entries {
+ if e.name == name {
+ if blobSha == nil {
+ // Remove TODO
+ entries = append(entries[:i], entries[i+1:]...)
+ } else {
+ entries[i].sha = blobSha
+ }
+ found = true
+ break
+ }
+ }
+ if !found && blobSha != nil {
+ entries = append(entries, treeEntry{
+ mode: "100644",
+ name: name,
+ sha: blobSha,
+ })
+ }
+ treeCache[dir] = entries
+ }
+
+ built := make(map[string][]byte)
+ var build func(string) ([]byte, error)
+ build = func(prefix string) ([]byte, error) {
+ entries := treeCache[prefix]
+ for i, e := range entries {
+ if e.mode == "40000" {
+ subPrefix := path.Join(prefix, e.name)
+ if sha, ok := built[subPrefix]; ok {
+ entries[i].sha = sha
+ continue
+ }
+ newShaStr, err := build(subPrefix)
+ if err != nil {
+ return nil, err
+ }
+ entries[i].sha = newShaStr
+ }
+ }
+ shaStr, err := writeTree(ctx, repoPath, entries)
+ if err != nil {
+ return nil, err
+ }
+ shaBytes, err := hex.DecodeString(shaStr)
+ if err != nil {
+ return nil, err
+ }
+ built[prefix] = shaBytes
+ return shaBytes, nil
+ }
+
+ rootShaBytes, err := build("")
+ if err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(rootShaBytes), nil
+}
+
+type treeEntry struct {
+ mode string // like "100644"
+ name string // individual name
+ sha []byte
+}
diff --git a/forged/internal/unsorted/git_ref.go b/forged/internal/unsorted/git_ref.go
new file mode 100644
index 0000000..d9735ba
--- /dev/null
+++ b/forged/internal/unsorted/git_ref.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+)
+
+// getRefHash returns the hash of a reference given its
+// type and name as supplied in URL queries.
+func getRefHash(repo *git.Repository, refType, refName string) (refHash plumbing.Hash, err error) {
+ var ref *plumbing.Reference
+ switch refType {
+ case "":
+ if ref, err = repo.Head(); err != nil {
+ return
+ }
+ refHash = ref.Hash()
+ case "commit":
+ refHash = plumbing.NewHash(refName)
+ case "branch":
+ if ref, err = repo.Reference(plumbing.NewBranchReferenceName(refName), true); err != nil {
+ return
+ }
+ refHash = ref.Hash()
+ case "tag":
+ if ref, err = repo.Reference(plumbing.NewTagReferenceName(refName), true); err != nil {
+ return
+ }
+ refHash = ref.Hash()
+ default:
+ panic("Invalid ref type " + refType)
+ }
+ return
+}
diff --git a/forged/internal/unsorted/http_auth.go b/forged/internal/unsorted/http_auth.go
new file mode 100644
index 0000000..b0afa05
--- /dev/null
+++ b/forged/internal/unsorted/http_auth.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+)
+
+// getUserFromRequest returns the user ID and username associated with the
+// session cookie in a given [http.Request].
+func (s *Server) getUserFromRequest(request *http.Request) (id int, username string, err error) {
+ var sessionCookie *http.Cookie
+
+ if sessionCookie, err = request.Cookie("session"); err != nil {
+ return
+ }
+
+ err = s.database.QueryRow(
+ request.Context(),
+ "SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.session_id = $1;",
+ sessionCookie.Value,
+ ).Scan(&id, &username)
+
+ return
+}
diff --git a/forged/internal/unsorted/http_handle_branches.go b/forged/internal/unsorted/http_handle_branches.go
new file mode 100644
index 0000000..704e1d8
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_branches.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+// httpHandleRepoBranches provides the branches page in repos.
+func (s *Server) httpHandleRepoBranches(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
+ var repo *git.Repository
+ var repoName string
+ var groupPath []string
+ var err error
+ var notes []string
+ var branches []string
+ var branchesIter storer.ReferenceIter
+
+ repo, repoName, groupPath = params["repo"].(*git.Repository), params["repo_name"].(string), params["group_path"].([]string)
+
+ if strings.Contains(repoName, "\n") || misc.SliceContainsNewlines(groupPath) {
+ notes = append(notes, "Path contains newlines; HTTP Git access impossible")
+ }
+
+ branchesIter, err = repo.Branches()
+ if err == nil {
+ _ = branchesIter.ForEach(func(branch *plumbing.Reference) error {
+ branches = append(branches, branch.Name().Short())
+ return nil
+ })
+ }
+ params["branches"] = branches
+
+ params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, repoName)
+ params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, repoName)
+ params["notes"] = notes
+
+ s.renderTemplate(writer, "repo_branches", params)
+}
diff --git a/forged/internal/unsorted/http_handle_group_index.go b/forged/internal/unsorted/http_handle_group_index.go
new file mode 100644
index 0000000..ce28a1c
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_group_index.go
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "errors"
+ "net/http"
+ "path/filepath"
+ "strconv"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgtype"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleGroupIndex provides index pages for groups, which includes a list
+// of its subgroups and repos, as well as a form for group maintainers to
+// create repos.
+func (s *Server) httpHandleGroupIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ var groupPath []string
+ var repos []nameDesc
+ var subgroups []nameDesc
+ var err error
+ var groupID int
+ var groupDesc string
+
+ groupPath = params["group_path"].([]string)
+
+ // The group itself
+ err = s.database.QueryRow(request.Context(), `
+ WITH RECURSIVE group_path_cte AS (
+ SELECT
+ id,
+ parent_group,
+ name,
+ 1 AS depth
+ FROM groups
+ WHERE name = ($1::text[])[1]
+ AND parent_group IS NULL
+
+ UNION ALL
+
+ SELECT
+ g.id,
+ g.parent_group,
+ g.name,
+ group_path_cte.depth + 1
+ FROM groups g
+ JOIN group_path_cte ON g.parent_group = group_path_cte.id
+ WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
+ AND group_path_cte.depth + 1 <= cardinality($1::text[])
+ )
+ SELECT c.id, COALESCE(g.description, '')
+ FROM group_path_cte c
+ JOIN groups g ON g.id = c.id
+ WHERE c.depth = cardinality($1::text[])
+ `,
+ pgtype.FlatArray[string](groupPath),
+ ).Scan(&groupID, &groupDesc)
+
+ if errors.Is(err, pgx.ErrNoRows) {
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ } else if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting group: "+err.Error())
+ return
+ }
+
+ // ACL
+ var count int
+ err = s.database.QueryRow(request.Context(), `
+ SELECT COUNT(*)
+ FROM user_group_roles
+ WHERE user_id = $1
+ AND group_id = $2
+ `, params["user_id"].(int), groupID).Scan(&count)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error checking access: "+err.Error())
+ return
+ }
+ directAccess := (count > 0)
+
+ if request.Method == http.MethodPost {
+ if !directAccess {
+ web.ErrorPage403(s.templates, writer, params, "You do not have direct access to this group")
+ return
+ }
+
+ repoName := request.FormValue("repo_name")
+ repoDesc := request.FormValue("repo_desc")
+ contribReq := request.FormValue("repo_contrib")
+ if repoName == "" {
+ web.ErrorPage400(s.templates, writer, params, "Repo name is required")
+ return
+ }
+
+ var newRepoID int
+ err := s.database.QueryRow(
+ request.Context(),
+ `INSERT INTO repos (name, description, group_id, contrib_requirements)
+ VALUES ($1, $2, $3, $4)
+ RETURNING id`,
+ repoName,
+ repoDesc,
+ groupID,
+ contribReq,
+ ).Scan(&newRepoID)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error creating repo: "+err.Error())
+ return
+ }
+
+ filePath := filepath.Join(s.config.Git.RepoDir, strconv.Itoa(newRepoID)+".git")
+
+ _, err = s.database.Exec(
+ request.Context(),
+ `UPDATE repos
+ SET filesystem_path = $1
+ WHERE id = $2`,
+ filePath,
+ newRepoID,
+ )
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error updating repo path: "+err.Error())
+ return
+ }
+
+ if err = s.gitInit(filePath); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error initializing repo: "+err.Error())
+ return
+ }
+
+ misc.RedirectUnconditionally(writer, request)
+ return
+ }
+
+ // Repos
+ var rows pgx.Rows
+ rows, err = s.database.Query(request.Context(), `
+ SELECT name, COALESCE(description, '')
+ FROM repos
+ WHERE group_id = $1
+ `, groupID)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
+ return
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var name, description string
+ if err = rows.Scan(&name, &description); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
+ return
+ }
+ repos = append(repos, nameDesc{name, description})
+ }
+ if err = rows.Err(); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
+ return
+ }
+
+ // Subgroups
+ rows, err = s.database.Query(request.Context(), `
+ SELECT name, COALESCE(description, '')
+ FROM groups
+ WHERE parent_group = $1
+ `, groupID)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
+ return
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var name, description string
+ if err = rows.Scan(&name, &description); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
+ return
+ }
+ subgroups = append(subgroups, nameDesc{name, description})
+ }
+ if err = rows.Err(); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
+ return
+ }
+
+ params["repos"] = repos
+ params["subgroups"] = subgroups
+ params["description"] = groupDesc
+ params["direct_access"] = directAccess
+
+ s.renderTemplate(writer, "group", params)
+}
diff --git a/forged/internal/unsorted/http_handle_index.go b/forged/internal/unsorted/http_handle_index.go
new file mode 100644
index 0000000..a3141f4
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_index.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleIndex provides the main index page which includes a list of groups
+// and some global information such as SSH keys.
+func (s *Server) httpHandleIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ var err error
+ var groups []nameDesc
+
+ groups, err = s.queryNameDesc(request.Context(), "SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL")
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error querying groups: "+err.Error())
+ return
+ }
+ params["groups"] = groups
+
+ s.renderTemplate(writer, "index", params)
+}
diff --git a/forged/internal/unsorted/http_handle_login.go b/forged/internal/unsorted/http_handle_login.go
new file mode 100644
index 0000000..8adbe17
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_login.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/jackc/pgx/v5"
+ "go.lindenii.runxiyu.org/forge/forged/internal/argon2id"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleLogin provides the login page for local users.
+func (s *Server) httpHandleLogin(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ var username, password string
+ var userID int
+ var passwordHash string
+ var err error
+ var passwordMatches bool
+ var cookieValue string
+ var now time.Time
+ var expiry time.Time
+ var cookie http.Cookie
+
+ if request.Method != http.MethodPost {
+ s.renderTemplate(writer, "login", params)
+ return
+ }
+
+ username = request.PostFormValue("username")
+ password = request.PostFormValue("password")
+
+ err = s.database.QueryRow(request.Context(),
+ "SELECT id, COALESCE(password, '') FROM users WHERE username = $1",
+ username,
+ ).Scan(&userID, &passwordHash)
+ if err != nil {
+ if errors.Is(err, pgx.ErrNoRows) {
+ params["login_error"] = "Unknown username"
+ s.renderTemplate(writer, "login", params)
+ return
+ }
+ web.ErrorPage500(s.templates, writer, params, "Error querying user information: "+err.Error())
+ return
+ }
+ if passwordHash == "" {
+ params["login_error"] = "User has no password"
+ s.renderTemplate(writer, "login", params)
+ return
+ }
+
+ if passwordMatches, err = argon2id.ComparePasswordAndHash(password, passwordHash); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error comparing password and hash: "+err.Error())
+ return
+ }
+
+ if !passwordMatches {
+ params["login_error"] = "Invalid password"
+ s.renderTemplate(writer, "login", params)
+ return
+ }
+
+ if cookieValue, err = randomUrlsafeStr(16); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting random string: "+err.Error())
+ return
+ }
+
+ now = time.Now()
+ expiry = now.Add(time.Duration(s.config.HTTP.CookieExpiry) * time.Second)
+
+ cookie = http.Cookie{
+ Name: "session",
+ Value: cookieValue,
+ SameSite: http.SameSiteLaxMode,
+ HttpOnly: true,
+ Secure: false, // TODO
+ Expires: expiry,
+ Path: "/",
+ } //exhaustruct:ignore
+
+ http.SetCookie(writer, &cookie)
+
+ _, err = s.database.Exec(request.Context(), "INSERT INTO sessions (user_id, session_id) VALUES ($1, $2)", userID, cookieValue)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error inserting session: "+err.Error())
+ return
+ }
+
+ http.Redirect(writer, request, "/", http.StatusSeeOther)
+}
+
+// randomUrlsafeStr generates a random string of the given entropic size
+// using the URL-safe base64 encoding. The actual size of the string returned
+// will be 4*sz.
+func randomUrlsafeStr(sz int) (string, error) {
+ r := make([]byte, 3*sz)
+ _, err := rand.Read(r)
+ if err != nil {
+ return "", fmt.Errorf("error generating random string: %w", err)
+ }
+ return base64.RawURLEncoding.EncodeToString(r), nil
+}
diff --git a/forged/internal/unsorted/http_handle_repo_commit.go b/forged/internal/unsorted/http_handle_repo_commit.go
new file mode 100644
index 0000000..2afdf3a
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_commit.go
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/filemode"
+ "github.com/go-git/go-git/v5/plumbing/format/diff"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+ "go.lindenii.runxiyu.org/forge/forged/internal/oldgit"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// usableFilePatch is a [diff.FilePatch] that is structured in a way more
+// friendly for use in HTML templates.
+type usableFilePatch struct {
+ From diff.File
+ To diff.File
+ Chunks []usableChunk
+}
+
+// usableChunk is a [diff.Chunk] that is structured in a way more friendly for
+// use in HTML templates.
+type usableChunk struct {
+ Operation diff.Operation
+ Content string
+}
+
+func (s *Server) httpHandleRepoCommit(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ var repo *git.Repository
+ var commitIDStrSpec, commitIDStrSpecNoSuffix string
+ var commitID plumbing.Hash
+ var parentCommitHash plumbing.Hash
+ var commitObj *object.Commit
+ var commitIDStr string
+ var err error
+ var patch *object.Patch
+
+ repo, commitIDStrSpec = params["repo"].(*git.Repository), params["commit_id"].(string)
+
+ commitIDStrSpecNoSuffix = strings.TrimSuffix(commitIDStrSpec, ".patch")
+ commitID = plumbing.NewHash(commitIDStrSpecNoSuffix)
+ if commitObj, err = repo.CommitObject(commitID); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting commit object: "+err.Error())
+ return
+ }
+ if commitIDStrSpecNoSuffix != commitIDStrSpec {
+ var patchStr string
+ if patchStr, err = oldgit.FmtCommitPatch(commitObj); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error formatting patch: "+err.Error())
+ return
+ }
+ fmt.Fprintln(writer, patchStr)
+ return
+ }
+ commitIDStr = commitObj.Hash.String()
+
+ if commitIDStr != commitIDStrSpec {
+ http.Redirect(writer, request, commitIDStr, http.StatusSeeOther)
+ return
+ }
+
+ params["commit_object"] = commitObj
+ params["commit_id"] = commitIDStr
+
+ parentCommitHash, patch, err = oldgit.CommitToPatch(commitObj)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting patch from commit: "+err.Error())
+ return
+ }
+ params["parent_commit_hash"] = parentCommitHash.String()
+ params["patch"] = patch
+
+ params["file_patches"] = makeUsableFilePatches(patch)
+
+ s.renderTemplate(writer, "repo_commit", params)
+}
+
+type fakeDiffFile struct {
+ hash plumbing.Hash
+ mode filemode.FileMode
+ path string
+}
+
+func (f fakeDiffFile) Hash() plumbing.Hash {
+ return f.hash
+}
+
+func (f fakeDiffFile) Mode() filemode.FileMode {
+ return f.mode
+}
+
+func (f fakeDiffFile) Path() string {
+ return f.path
+}
+
+var nullFakeDiffFile = fakeDiffFile{ //nolint:gochecknoglobals
+ hash: plumbing.NewHash("0000000000000000000000000000000000000000"),
+ mode: misc.FirstOrPanic(filemode.New("100644")),
+ path: "",
+}
+
+func makeUsableFilePatches(patch diff.Patch) (usableFilePatches []usableFilePatch) {
+ // TODO: Remove unnecessary context
+ // TODO: Prepend "+"/"-"/" " instead of solely distinguishing based on color
+
+ for _, filePatch := range patch.FilePatches() {
+ var fromFile, toFile diff.File
+ var ufp usableFilePatch
+ chunks := []usableChunk{}
+
+ fromFile, toFile = filePatch.Files()
+ if fromFile == nil {
+ fromFile = nullFakeDiffFile
+ }
+ if toFile == nil {
+ toFile = nullFakeDiffFile
+ }
+ for _, chunk := range filePatch.Chunks() {
+ var content string
+
+ content = chunk.Content()
+ if len(content) > 0 && content[0] == '\n' {
+ content = "\n" + content
+ } // Horrible hack to fix how browsers newlines that immediately proceed <pre>
+ chunks = append(chunks, usableChunk{
+ Operation: chunk.Type(),
+ Content: content,
+ })
+ }
+ ufp = usableFilePatch{
+ Chunks: chunks,
+ From: fromFile,
+ To: toFile,
+ }
+ usableFilePatches = append(usableFilePatches, ufp)
+ }
+ return
+}
diff --git a/forged/internal/unsorted/http_handle_repo_contrib_index.go b/forged/internal/unsorted/http_handle_repo_contrib_index.go
new file mode 100644
index 0000000..5c68c08
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_contrib_index.go
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+
+ "github.com/jackc/pgx/v5"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// idTitleStatus describes properties of a merge request that needs to be
+// present in MR listings.
+type idTitleStatus struct {
+ ID int
+ Title string
+ Status string
+}
+
+// httpHandleRepoContribIndex provides an index to merge requests of a repo.
+func (s *Server) httpHandleRepoContribIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ var rows pgx.Rows
+ var result []idTitleStatus
+ var err error
+
+ if rows, err = s.database.Query(request.Context(),
+ "SELECT repo_local_id, COALESCE(title, 'Untitled'), status FROM merge_requests WHERE repo_id = $1",
+ params["repo_id"],
+ ); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error querying merge requests: "+err.Error())
+ return
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var mrID int
+ var mrTitle, mrStatus string
+ if err = rows.Scan(&mrID, &mrTitle, &mrStatus); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error scanning merge request: "+err.Error())
+ return
+ }
+ result = append(result, idTitleStatus{mrID, mrTitle, mrStatus})
+ }
+ if err = rows.Err(); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error ranging over merge requests: "+err.Error())
+ return
+ }
+ params["merge_requests"] = result
+
+ s.renderTemplate(writer, "repo_contrib_index", params)
+}
diff --git a/forged/internal/unsorted/http_handle_repo_contrib_one.go b/forged/internal/unsorted/http_handle_repo_contrib_one.go
new file mode 100644
index 0000000..1d733b0
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_contrib_one.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+ "strconv"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleRepoContribOne provides an interface to each merge request of a
+// repo.
+func (s *Server) httpHandleRepoContribOne(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ var mrIDStr string
+ var mrIDInt int
+ var err error
+ var title, status, srcRefStr, dstBranchStr string
+ var repo *git.Repository
+ var srcRefHash plumbing.Hash
+ var dstBranchHash plumbing.Hash
+ var srcCommit, dstCommit, mergeBaseCommit *object.Commit
+ var mergeBases []*object.Commit
+
+ mrIDStr = params["mr_id"].(string)
+ mrIDInt64, err := strconv.ParseInt(mrIDStr, 10, strconv.IntSize)
+ if err != nil {
+ web.ErrorPage400(s.templates, writer, params, "Merge request ID not an integer")
+ return
+ }
+ mrIDInt = int(mrIDInt64)
+
+ if err = s.database.QueryRow(request.Context(),
+ "SELECT COALESCE(title, ''), status, source_ref, COALESCE(destination_branch, '') FROM merge_requests WHERE repo_id = $1 AND repo_local_id = $2",
+ params["repo_id"], mrIDInt,
+ ).Scan(&title, &status, &srcRefStr, &dstBranchStr); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error querying merge request: "+err.Error())
+ return
+ }
+
+ repo = params["repo"].(*git.Repository)
+
+ if srcRefHash, err = getRefHash(repo, "branch", srcRefStr); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting source ref hash: "+err.Error())
+ return
+ }
+ if srcCommit, err = repo.CommitObject(srcRefHash); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting source commit: "+err.Error())
+ return
+ }
+ params["source_commit"] = srcCommit
+
+ if dstBranchStr == "" {
+ dstBranchStr = "HEAD"
+ dstBranchHash, err = getRefHash(repo, "", "")
+ } else {
+ dstBranchHash, err = getRefHash(repo, "branch", dstBranchStr)
+ }
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting destination branch hash: "+err.Error())
+ return
+ }
+
+ if dstCommit, err = repo.CommitObject(dstBranchHash); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting destination commit: "+err.Error())
+ return
+ }
+ params["destination_commit"] = dstCommit
+
+ if mergeBases, err = srcCommit.MergeBase(dstCommit); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting merge base: "+err.Error())
+ return
+ }
+
+ if len(mergeBases) < 1 {
+ web.ErrorPage500(s.templates, writer, params, "No merge base found for this merge request; these two branches do not share any common history")
+ // TODO
+ return
+ }
+
+ mergeBaseCommit = mergeBases[0]
+ params["merge_base"] = mergeBaseCommit
+
+ patch, err := mergeBaseCommit.Patch(srcCommit)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting patch: "+err.Error())
+ return
+ }
+ params["file_patches"] = makeUsableFilePatches(patch)
+
+ params["mr_title"], params["mr_status"], params["mr_source_ref"], params["mr_destination_branch"] = title, status, srcRefStr, dstBranchStr
+
+ s.renderTemplate(writer, "repo_contrib_one", params)
+}
diff --git a/forged/internal/unsorted/http_handle_repo_index.go b/forged/internal/unsorted/http_handle_repo_index.go
new file mode 100644
index 0000000..dd46dfe
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_index.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/git2c"
+ "go.lindenii.runxiyu.org/forge/forged/internal/render"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleRepoIndex provides the front page of a repo using git2d.
+func (s *Server) httpHandleRepoIndex(w http.ResponseWriter, req *http.Request, params map[string]any) {
+ repoName := params["repo_name"].(string)
+ groupPath := params["group_path"].([]string)
+
+ _, repoPath, _, _, _, _, _ := s.getRepoInfo(req.Context(), groupPath, repoName, "") // TODO: Don't use getRepoInfo
+
+ client, err := git2c.NewClient(s.config.Git.Socket)
+ if err != nil {
+ web.ErrorPage500(s.templates, w, params, err.Error())
+ return
+ }
+ defer client.Close()
+
+ commits, readme, err := client.CmdIndex(repoPath)
+ if err != nil {
+ web.ErrorPage500(s.templates, w, params, err.Error())
+ return
+ }
+
+ params["commits"] = commits
+ params["readme_filename"] = readme.Filename
+ _, params["readme"] = render.Readme(readme.Content, readme.Filename)
+
+ s.renderTemplate(w, "repo_index", params)
+
+ // TODO: Caching
+}
diff --git a/forged/internal/unsorted/http_handle_repo_info.go b/forged/internal/unsorted/http_handle_repo_info.go
new file mode 100644
index 0000000..e23b1d2
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_info.go
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "os/exec"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// httpHandleRepoInfo provides advertised refs of a repo for use in Git's Smart
+// HTTP protocol.
+//
+// TODO: Reject access from web browsers.
+func (s *Server) httpHandleRepoInfo(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
+ groupPath := params["group_path"].([]string)
+ repoName := params["repo_name"].(string)
+ var repoPath string
+
+ if err := s.database.QueryRow(request.Context(), `
+ WITH RECURSIVE group_path_cte AS (
+ -- Start: match the first name in the path where parent_group IS NULL
+ SELECT
+ id,
+ parent_group,
+ name,
+ 1 AS depth
+ FROM groups
+ WHERE name = ($1::text[])[1]
+ AND parent_group IS NULL
+
+ UNION ALL
+
+ -- Recurse: jion next segment of the path
+ SELECT
+ g.id,
+ g.parent_group,
+ g.name,
+ group_path_cte.depth + 1
+ FROM groups g
+ JOIN group_path_cte ON g.parent_group = group_path_cte.id
+ WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
+ AND group_path_cte.depth + 1 <= cardinality($1::text[])
+ )
+ SELECT r.filesystem_path
+ FROM group_path_cte c
+ JOIN repos r ON r.group_id = c.id
+ WHERE c.depth = cardinality($1::text[])
+ AND r.name = $2
+ `,
+ pgtype.FlatArray[string](groupPath),
+ repoName,
+ ).Scan(&repoPath); err != nil {
+ return err
+ }
+
+ writer.Header().Set("Content-Type", "application/x-git-upload-pack-advertisement")
+ writer.WriteHeader(http.StatusOK)
+
+ cmd := exec.Command("git", "upload-pack", "--stateless-rpc", "--advertise-refs", repoPath)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = stdout.Close()
+ }()
+ cmd.Stderr = cmd.Stdout
+
+ if err = cmd.Start(); err != nil {
+ return err
+ }
+
+ if err = packLine(writer, "# service=git-upload-pack\n"); err != nil {
+ return err
+ }
+
+ if err = packFlush(writer); err != nil {
+ return
+ }
+
+ if _, err = io.Copy(writer, stdout); err != nil {
+ return err
+ }
+
+ if err = cmd.Wait(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Taken from https://github.com/icyphox/legit, MIT license.
+func packLine(w io.Writer, s string) error {
+ _, err := fmt.Fprintf(w, "%04x%s", len(s)+4, s)
+ return err
+}
+
+// Taken from https://github.com/icyphox/legit, MIT license.
+func packFlush(w io.Writer) error {
+ _, err := fmt.Fprint(w, "0000")
+ return err
+}
diff --git a/forged/internal/unsorted/http_handle_repo_log.go b/forged/internal/unsorted/http_handle_repo_log.go
new file mode 100644
index 0000000..5d90871
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_log.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleRepoLog provides a page with a complete Git log.
+//
+// TODO: This currently provides all commits in the branch. It should be
+// paginated and cached instead.
+func (s *Server) httpHandleRepoLog(writer http.ResponseWriter, req *http.Request, params map[string]any) {
+ var repo *git.Repository
+ var refHash plumbing.Hash
+ var err error
+
+ repo = params["repo"].(*git.Repository)
+
+ if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting ref hash: "+err.Error())
+ return
+ }
+
+ logOptions := git.LogOptions{From: refHash} //exhaustruct:ignore
+ commitIter, err := repo.Log(&logOptions)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error getting recent commits: "+err.Error())
+ return
+ }
+ params["commits"], params["commits_err"] = commitIterSeqErr(req.Context(), commitIter)
+
+ s.renderTemplate(writer, "repo_log", params)
+}
diff --git a/forged/internal/unsorted/http_handle_repo_raw.go b/forged/internal/unsorted/http_handle_repo_raw.go
new file mode 100644
index 0000000..1127284
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_raw.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "fmt"
+ "html/template"
+ "net/http"
+ "strings"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/git2c"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleRepoRaw serves raw files, or directory listings that point to raw
+// files.
+func (s *Server) httpHandleRepoRaw(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ repoName := params["repo_name"].(string)
+ groupPath := params["group_path"].([]string)
+ rawPathSpec := params["rest"].(string)
+ pathSpec := strings.TrimSuffix(rawPathSpec, "/")
+ params["path_spec"] = pathSpec
+
+ _, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")
+
+ client, err := git2c.NewClient(s.config.Git.Socket)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, err.Error())
+ return
+ }
+ defer client.Close()
+
+ files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, err.Error())
+ return
+ }
+
+ switch {
+ case files != nil:
+ params["files"] = files
+ params["readme_filename"] = "README.md"
+ params["readme"] = template.HTML("<p>README rendering here is WIP again</p>") // TODO
+ s.renderTemplate(writer, "repo_raw_dir", params)
+ case content != "":
+ if misc.RedirectNoDir(writer, request) {
+ return
+ }
+ writer.Header().Set("Content-Type", "application/octet-stream")
+ fmt.Fprint(writer, content)
+ default:
+ web.ErrorPage500(s.templates, writer, params, "Unknown error fetching repo raw data")
+ }
+}
diff --git a/forged/internal/unsorted/http_handle_repo_tree.go b/forged/internal/unsorted/http_handle_repo_tree.go
new file mode 100644
index 0000000..4799ccb
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_tree.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "html/template"
+ "net/http"
+ "strings"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/git2c"
+ "go.lindenii.runxiyu.org/forge/forged/internal/render"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleRepoTree provides a friendly, syntax-highlighted view of
+// individual files, and provides directory views that link to these files.
+//
+// TODO: Do not highlight files that are too large.
+func (s *Server) httpHandleRepoTree(writer http.ResponseWriter, request *http.Request, params map[string]any) {
+ repoName := params["repo_name"].(string)
+ groupPath := params["group_path"].([]string)
+ rawPathSpec := params["rest"].(string)
+ pathSpec := strings.TrimSuffix(rawPathSpec, "/")
+ params["path_spec"] = pathSpec
+
+ _, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")
+
+ client, err := git2c.NewClient(s.config.Git.Socket)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, err.Error())
+ return
+ }
+ defer client.Close()
+
+ files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
+ if err != nil {
+ web.ErrorPage500(s.templates, writer, params, err.Error())
+ return
+ }
+
+ switch {
+ case files != nil:
+ params["files"] = files
+ params["readme_filename"] = "README.md"
+ params["readme"] = template.HTML("<p>README rendering here is WIP again</p>") // TODO
+ s.renderTemplate(writer, "repo_tree_dir", params)
+ case content != "":
+ rendered := render.Highlight(pathSpec, content)
+ params["file_contents"] = rendered
+ s.renderTemplate(writer, "repo_tree_file", params)
+ default:
+ web.ErrorPage500(s.templates, writer, params, "Unknown object type, something is seriously wrong")
+ }
+}
diff --git a/forged/internal/unsorted/http_handle_repo_upload_pack.go b/forged/internal/unsorted/http_handle_repo_upload_pack.go
new file mode 100644
index 0000000..df8bef4
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_repo_upload_pack.go
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "io"
+ "net/http"
+ "os"
+ "os/exec"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// httpHandleUploadPack handles incoming Git fetch/pull/clone's over the Smart
+// HTTP protocol.
+func (s *Server) httpHandleUploadPack(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
+ var groupPath []string
+ var repoName string
+ var repoPath string
+ var stdout io.ReadCloser
+ var stdin io.WriteCloser
+ var cmd *exec.Cmd
+
+ groupPath, repoName = params["group_path"].([]string), params["repo_name"].(string)
+
+ if err := s.database.QueryRow(request.Context(), `
+ WITH RECURSIVE group_path_cte AS (
+ -- Start: match the first name in the path where parent_group IS NULL
+ SELECT
+ id,
+ parent_group,
+ name,
+ 1 AS depth
+ FROM groups
+ WHERE name = ($1::text[])[1]
+ AND parent_group IS NULL
+
+ UNION ALL
+
+ -- Recurse: jion next segment of the path
+ SELECT
+ g.id,
+ g.parent_group,
+ g.name,
+ group_path_cte.depth + 1
+ FROM groups g
+ JOIN group_path_cte ON g.parent_group = group_path_cte.id
+ WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
+ AND group_path_cte.depth + 1 <= cardinality($1::text[])
+ )
+ SELECT r.filesystem_path
+ FROM group_path_cte c
+ JOIN repos r ON r.group_id = c.id
+ WHERE c.depth = cardinality($1::text[])
+ AND r.name = $2
+ `,
+ pgtype.FlatArray[string](groupPath),
+ repoName,
+ ).Scan(&repoPath); err != nil {
+ return err
+ }
+
+ writer.Header().Set("Content-Type", "application/x-git-upload-pack-result")
+ writer.Header().Set("Connection", "Keep-Alive")
+ writer.Header().Set("Transfer-Encoding", "chunked")
+ writer.WriteHeader(http.StatusOK)
+
+ cmd = exec.Command("git", "upload-pack", "--stateless-rpc", repoPath)
+ cmd.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket)
+ if stdout, err = cmd.StdoutPipe(); err != nil {
+ return err
+ }
+ cmd.Stderr = cmd.Stdout
+ defer func() {
+ _ = stdout.Close()
+ }()
+
+ if stdin, err = cmd.StdinPipe(); err != nil {
+ return err
+ }
+ defer func() {
+ _ = stdin.Close()
+ }()
+
+ if err = cmd.Start(); err != nil {
+ return err
+ }
+
+ if _, err = io.Copy(stdin, request.Body); err != nil {
+ return err
+ }
+
+ if err = stdin.Close(); err != nil {
+ return err
+ }
+
+ if _, err = io.Copy(writer, stdout); err != nil {
+ return err
+ }
+
+ if err = cmd.Wait(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/forged/internal/unsorted/http_handle_users.go b/forged/internal/unsorted/http_handle_users.go
new file mode 100644
index 0000000..b41ee44
--- /dev/null
+++ b/forged/internal/unsorted/http_handle_users.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/http"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// httpHandleUsers is a useless stub.
+func (s *Server) httpHandleUsers(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
+ web.ErrorPage501(s.templates, writer, params)
+}
diff --git a/forged/internal/unsorted/http_server.go b/forged/internal/unsorted/http_server.go
new file mode 100644
index 0000000..f6a1794
--- /dev/null
+++ b/forged/internal/unsorted/http_server.go
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "errors"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+ "go.lindenii.runxiyu.org/forge/forged/internal/web"
+)
+
+// ServeHTTP handles all incoming HTTP requests and routes them to the correct
+// location.
+//
+// TODO: This function is way too large.
+func (s *Server) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
+ var remoteAddr string
+ if s.config.HTTP.ReverseProxy {
+ remoteAddrs, ok := request.Header["X-Forwarded-For"]
+ if ok && len(remoteAddrs) == 1 {
+ remoteAddr = remoteAddrs[0]
+ } else {
+ remoteAddr = request.RemoteAddr
+ }
+ } else {
+ remoteAddr = request.RemoteAddr
+ }
+ slog.Info("incoming http", "addr", remoteAddr, "method", request.Method, "uri", request.RequestURI)
+
+ var segments []string
+ var err error
+ var sepIndex int
+ params := make(map[string]any)
+
+ if segments, _, err = misc.ParseReqURI(request.RequestURI); err != nil {
+ web.ErrorPage400(s.templates, writer, params, "Error parsing request URI: "+err.Error())
+ return
+ }
+ dirMode := false
+ if segments[len(segments)-1] == "" {
+ dirMode = true
+ segments = segments[:len(segments)-1]
+ }
+
+ params["url_segments"] = segments
+ params["dir_mode"] = dirMode
+ params["global"] = s.globalData
+ var userID int // 0 for none
+ userID, params["username"], err = s.getUserFromRequest(request)
+ params["user_id"] = userID
+ if err != nil && !errors.Is(err, http.ErrNoCookie) && !errors.Is(err, pgx.ErrNoRows) {
+ web.ErrorPage500(s.templates, writer, params, "Error getting user info from request: "+err.Error())
+ return
+ }
+
+ if userID == 0 {
+ params["user_id_string"] = ""
+ } else {
+ params["user_id_string"] = strconv.Itoa(userID)
+ }
+
+ for _, v := range segments {
+ if strings.Contains(v, ":") {
+ web.ErrorPage400Colon(s.templates, writer, params)
+ return
+ }
+ }
+
+ if len(segments) == 0 {
+ s.httpHandleIndex(writer, request, params)
+ return
+ }
+
+ if segments[0] == "-" {
+ if len(segments) < 2 {
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ } else if len(segments) == 2 && misc.RedirectDir(writer, request) {
+ return
+ }
+
+ switch segments[1] {
+ case "static":
+ s.staticHandler.ServeHTTP(writer, request)
+ return
+ case "source":
+ s.sourceHandler.ServeHTTP(writer, request)
+ return
+ }
+ }
+
+ if segments[0] == "-" {
+ switch segments[1] {
+ case "login":
+ s.httpHandleLogin(writer, request, params)
+ return
+ case "users":
+ s.httpHandleUsers(writer, request, params)
+ return
+ default:
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ }
+ }
+
+ sepIndex = -1
+ for i, part := range segments {
+ if part == "-" {
+ sepIndex = i
+ break
+ }
+ }
+
+ params["separator_index"] = sepIndex
+
+ var groupPath []string
+ var moduleType string
+ var moduleName string
+
+ if sepIndex > 0 {
+ groupPath = segments[:sepIndex]
+ } else {
+ groupPath = segments
+ }
+ params["group_path"] = groupPath
+
+ switch {
+ case sepIndex == -1:
+ if misc.RedirectDir(writer, request) {
+ return
+ }
+ s.httpHandleGroupIndex(writer, request, params)
+ case len(segments) == sepIndex+1:
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ case len(segments) == sepIndex+2:
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ default:
+ moduleType = segments[sepIndex+1]
+ moduleName = segments[sepIndex+2]
+ switch moduleType {
+ case "repos":
+ params["repo_name"] = moduleName
+
+ if len(segments) > sepIndex+3 {
+ switch segments[sepIndex+3] {
+ case "info":
+ if err = s.httpHandleRepoInfo(writer, request, params); err != nil {
+ web.ErrorPage500(s.templates, writer, params, err.Error())
+ }
+ return
+ case "git-upload-pack":
+ if err = s.httpHandleUploadPack(writer, request, params); err != nil {
+ web.ErrorPage500(s.templates, writer, params, err.Error())
+ }
+ return
+ }
+ }
+
+ if params["ref_type"], params["ref_name"], err = misc.GetParamRefTypeName(request); err != nil {
+ if errors.Is(err, misc.ErrNoRefSpec) {
+ params["ref_type"] = ""
+ } else {
+ web.ErrorPage400(s.templates, writer, params, "Error querying ref type: "+err.Error())
+ return
+ }
+ }
+
+ if params["repo"], params["repo_description"], params["repo_id"], _, err = s.openRepo(request.Context(), groupPath, moduleName); err != nil {
+ web.ErrorPage500(s.templates, writer, params, "Error opening repo: "+err.Error())
+ return
+ }
+
+ repoURLRoot := "/"
+ for _, part := range segments[:sepIndex+3] {
+ repoURLRoot = repoURLRoot + url.PathEscape(part) + "/"
+ }
+ params["repo_url_root"] = repoURLRoot
+ params["repo_patch_mailing_list"] = repoURLRoot[1:len(repoURLRoot)-1] + "@" + s.config.LMTP.Domain
+ params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, moduleName)
+ params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, moduleName)
+
+ if len(segments) == sepIndex+3 {
+ if misc.RedirectDir(writer, request) {
+ return
+ }
+ s.httpHandleRepoIndex(writer, request, params)
+ return
+ }
+
+ repoFeature := segments[sepIndex+3]
+ switch repoFeature {
+ case "tree":
+ if misc.AnyContain(segments[sepIndex+4:], "/") {
+ web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments")
+ return
+ }
+ if dirMode {
+ params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/"
+ } else {
+ params["rest"] = strings.Join(segments[sepIndex+4:], "/")
+ }
+ if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) {
+ return
+ }
+ s.httpHandleRepoTree(writer, request, params)
+ case "branches":
+ if misc.RedirectDir(writer, request) {
+ return
+ }
+ s.httpHandleRepoBranches(writer, request, params)
+ return
+ case "raw":
+ if misc.AnyContain(segments[sepIndex+4:], "/") {
+ web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments")
+ return
+ }
+ if dirMode {
+ params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/"
+ } else {
+ params["rest"] = strings.Join(segments[sepIndex+4:], "/")
+ }
+ if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) {
+ return
+ }
+ s.httpHandleRepoRaw(writer, request, params)
+ case "log":
+ if len(segments) > sepIndex+4 {
+ web.ErrorPage400(s.templates, writer, params, "Too many parameters")
+ return
+ }
+ if misc.RedirectDir(writer, request) {
+ return
+ }
+ s.httpHandleRepoLog(writer, request, params)
+ case "commit":
+ if len(segments) != sepIndex+5 {
+ web.ErrorPage400(s.templates, writer, params, "Incorrect number of parameters")
+ return
+ }
+ if misc.RedirectNoDir(writer, request) {
+ return
+ }
+ params["commit_id"] = segments[sepIndex+4]
+ s.httpHandleRepoCommit(writer, request, params)
+ case "contrib":
+ if misc.RedirectDir(writer, request) {
+ return
+ }
+ switch len(segments) {
+ case sepIndex + 4:
+ s.httpHandleRepoContribIndex(writer, request, params)
+ case sepIndex + 5:
+ params["mr_id"] = segments[sepIndex+4]
+ s.httpHandleRepoContribOne(writer, request, params)
+ default:
+ web.ErrorPage400(s.templates, writer, params, "Too many parameters")
+ }
+ default:
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ }
+ default:
+ web.ErrorPage404(s.templates, writer, params)
+ return
+ }
+ }
+}
diff --git a/forged/internal/unsorted/http_template.go b/forged/internal/unsorted/http_template.go
new file mode 100644
index 0000000..db44e4c
--- /dev/null
+++ b/forged/internal/unsorted/http_template.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "log/slog"
+ "net/http"
+)
+
+// renderTemplate abstracts out the annoyances of reporting template rendering
+// errors.
+func (s *Server) renderTemplate(w http.ResponseWriter, templateName string, params map[string]any) {
+ if err := s.templates.ExecuteTemplate(w, templateName, params); err != nil {
+ http.Error(w, "error rendering template: "+err.Error(), http.StatusInternalServerError)
+ slog.Error("error rendering template", "error", err.Error())
+ }
+}
diff --git a/forged/internal/unsorted/lmtp_handle_patch.go b/forged/internal/unsorted/lmtp_handle_patch.go
new file mode 100644
index 0000000..b258bfc
--- /dev/null
+++ b/forged/internal/unsorted/lmtp_handle_patch.go
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/bluekeyes/go-gitdiff/gitdiff"
+ "github.com/go-git/go-git/v5"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+func (s *Server) lmtpHandlePatch(session *lmtpSession, groupPath []string, repoName string, mbox io.Reader) (err error) {
+ var diffFiles []*gitdiff.File
+ var preamble string
+ if diffFiles, preamble, err = gitdiff.Parse(mbox); err != nil {
+ return fmt.Errorf("failed to parse patch: %w", err)
+ }
+
+ var header *gitdiff.PatchHeader
+ if header, err = gitdiff.ParsePatchHeader(preamble); err != nil {
+ return fmt.Errorf("failed to parse patch headers: %w", err)
+ }
+
+ var repo *git.Repository
+ var fsPath string
+ repo, _, _, fsPath, err = s.openRepo(session.ctx, groupPath, repoName)
+ if err != nil {
+ return fmt.Errorf("failed to open repo: %w", err)
+ }
+
+ headRef, err := repo.Head()
+ if err != nil {
+ return fmt.Errorf("failed to get repo head hash: %w", err)
+ }
+ headCommit, err := repo.CommitObject(headRef.Hash())
+ if err != nil {
+ return fmt.Errorf("failed to get repo head commit: %w", err)
+ }
+ headTree, err := headCommit.Tree()
+ if err != nil {
+ return fmt.Errorf("failed to get repo head tree: %w", err)
+ }
+
+ headTreeHash := headTree.Hash.String()
+
+ blobUpdates := make(map[string][]byte)
+ for _, diffFile := range diffFiles {
+ sourceFile, err := headTree.File(diffFile.OldName)
+ if err != nil {
+ return fmt.Errorf("failed to get file at old name %#v: %w", diffFile.OldName, err)
+ }
+ sourceString, err := sourceFile.Contents()
+ if err != nil {
+ return fmt.Errorf("failed to get contents: %w", err)
+ }
+
+ sourceBuf := bytes.NewReader(misc.StringToBytes(sourceString))
+ var patchedBuf bytes.Buffer
+ if err := gitdiff.Apply(&patchedBuf, sourceBuf, diffFile); err != nil {
+ return fmt.Errorf("failed to apply patch: %w", err)
+ }
+
+ var hashBuf bytes.Buffer
+
+ // It's really difficult to do this via go-git so we're just
+ // going to use upstream git for now.
+ // TODO
+ cmd := exec.CommandContext(session.ctx, "git", "hash-object", "-w", "-t", "blob", "--stdin")
+ cmd.Env = append(os.Environ(), "GIT_DIR="+fsPath)
+ cmd.Stdout = &hashBuf
+ cmd.Stdin = &patchedBuf
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("failed to run git hash-object: %w", err)
+ }
+
+ newHashStr := strings.TrimSpace(hashBuf.String())
+ newHash, err := hex.DecodeString(newHashStr)
+ if err != nil {
+ return fmt.Errorf("failed to decode hex string from git: %w", err)
+ }
+
+ blobUpdates[diffFile.NewName] = newHash
+ if diffFile.NewName != diffFile.OldName {
+ blobUpdates[diffFile.OldName] = nil // Mark for deletion.
+ }
+ }
+
+ newTreeSha, err := buildTreeRecursive(session.ctx, fsPath, headTreeHash, blobUpdates)
+ if err != nil {
+ return fmt.Errorf("failed to recursively build a tree: %w", err)
+ }
+
+ commitMsg := header.Title
+ if header.Body != "" {
+ commitMsg += "\n\n" + header.Body
+ }
+
+ env := append(os.Environ(),
+ "GIT_DIR="+fsPath,
+ "GIT_AUTHOR_NAME="+header.Author.Name,
+ "GIT_AUTHOR_EMAIL="+header.Author.Email,
+ "GIT_AUTHOR_DATE="+header.AuthorDate.Format(time.RFC3339),
+ )
+ commitCmd := exec.CommandContext(session.ctx, "git", "commit-tree", newTreeSha, "-p", headCommit.Hash.String(), "-m", commitMsg)
+ commitCmd.Env = env
+
+ var commitOut bytes.Buffer
+ commitCmd.Stdout = &commitOut
+ if err := commitCmd.Run(); err != nil {
+ return fmt.Errorf("failed to commit tree: %w", err)
+ }
+ newCommitSha := strings.TrimSpace(commitOut.String())
+
+ newBranchName := rand.Text()
+
+ refCmd := exec.CommandContext(session.ctx, "git", "update-ref", "refs/heads/contrib/"+newBranchName, newCommitSha) //#nosec G204
+ refCmd.Env = append(os.Environ(), "GIT_DIR="+fsPath)
+ if err := refCmd.Run(); err != nil {
+ return fmt.Errorf("failed to update ref: %w", err)
+ }
+
+ return nil
+}
diff --git a/forged/internal/unsorted/lmtp_server.go b/forged/internal/unsorted/lmtp_server.go
new file mode 100644
index 0000000..1e94894
--- /dev/null
+++ b/forged/internal/unsorted/lmtp_server.go
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+// SPDX-FileCopyrightText: Copyright (c) 2024 Robin Jarry <robin@jarry.cc>
+
+package unsorted
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/emersion/go-message"
+ "github.com/emersion/go-smtp"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+type lmtpHandler struct{}
+
+type lmtpSession struct {
+ from string
+ to []string
+ ctx context.Context
+ cancel context.CancelFunc
+ s Server
+}
+
+func (session *lmtpSession) Reset() {
+ session.from = ""
+ session.to = nil
+}
+
+func (session *lmtpSession) Logout() error {
+ session.cancel()
+ return nil
+}
+
+func (session *lmtpSession) AuthPlain(_, _ string) error {
+ return nil
+}
+
+func (session *lmtpSession) Mail(from string, _ *smtp.MailOptions) error {
+ session.from = from
+ return nil
+}
+
+func (session *lmtpSession) Rcpt(to string, _ *smtp.RcptOptions) error {
+ session.to = append(session.to, to)
+ return nil
+}
+
+func (*lmtpHandler) NewSession(_ *smtp.Conn) (smtp.Session, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ session := &lmtpSession{
+ ctx: ctx,
+ cancel: cancel,
+ }
+ return session, nil
+}
+
+func (s *Server) serveLMTP(listener net.Listener) error {
+ smtpServer := smtp.NewServer(&lmtpHandler{})
+ smtpServer.LMTP = true
+ smtpServer.Domain = s.config.LMTP.Domain
+ smtpServer.Addr = s.config.LMTP.Socket
+ smtpServer.WriteTimeout = time.Duration(s.config.LMTP.WriteTimeout) * time.Second
+ smtpServer.ReadTimeout = time.Duration(s.config.LMTP.ReadTimeout) * time.Second
+ smtpServer.EnableSMTPUTF8 = true
+ return smtpServer.Serve(listener)
+}
+
+func (session *lmtpSession) Data(r io.Reader) error {
+ var (
+ email *message.Entity
+ from string
+ to []string
+ err error
+ buf bytes.Buffer
+ data []byte
+ n int64
+ )
+
+ n, err = io.CopyN(&buf, r, session.s.config.LMTP.MaxSize)
+ switch {
+ case n == session.s.config.LMTP.MaxSize:
+ err = errors.New("Message too big.")
+ // drain whatever is left in the pipe
+ _, _ = io.Copy(io.Discard, r)
+ goto end
+ case errors.Is(err, io.EOF):
+ // message was smaller than max size
+ break
+ case err != nil:
+ goto end
+ }
+
+ data = buf.Bytes()
+
+ email, err = message.Read(bytes.NewReader(data))
+ if err != nil && message.IsUnknownCharset(err) {
+ goto end
+ }
+
+ switch strings.ToLower(email.Header.Get("Auto-Submitted")) {
+ case "auto-generated", "auto-replied":
+ // Disregard automatic emails like OOO replies
+ slog.Info("ignoring automatic message",
+ "from", session.from,
+ "to", strings.Join(session.to, ","),
+ "message-id", email.Header.Get("Message-Id"),
+ "subject", email.Header.Get("Subject"),
+ )
+ goto end
+ }
+
+ slog.Info("message received",
+ "from", session.from,
+ "to", strings.Join(session.to, ","),
+ "message-id", email.Header.Get("Message-Id"),
+ "subject", email.Header.Get("Subject"),
+ )
+
+ // Make local copies of the values before to ensure the references will
+ // still be valid when the task is run.
+ from = session.from
+ to = session.to
+
+ _ = from
+
+ for _, to := range to {
+ if !strings.HasSuffix(to, "@"+session.s.config.LMTP.Domain) {
+ continue
+ }
+ localPart := to[:len(to)-len("@"+session.s.config.LMTP.Domain)]
+ var segments []string
+ segments, err = misc.PathToSegments(localPart)
+ if err != nil {
+ // TODO: Should the entire email fail or should we just
+ // notify them out of band?
+ err = fmt.Errorf("cannot parse path: %w", err)
+ goto end
+ }
+ sepIndex := -1
+ for i, part := range segments {
+ if part == "-" {
+ sepIndex = i
+ break
+ }
+ }
+ if segments[len(segments)-1] == "" {
+ segments = segments[:len(segments)-1] // We don't care about dir or not.
+ }
+ if sepIndex == -1 || len(segments) <= sepIndex+2 {
+ err = errors.New("illegal path")
+ goto end
+ }
+
+ mbox := bytes.Buffer{}
+ if _, err = fmt.Fprint(&mbox, "From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001\r\n"); err != nil {
+ slog.Error("error handling patch... malloc???", "error", err)
+ goto end
+ }
+ data = bytes.ReplaceAll(data, []byte("\r\n"), []byte("\n"))
+ if _, err = mbox.Write(data); err != nil {
+ slog.Error("error handling patch... malloc???", "error", err)
+ goto end
+ }
+ // TODO: Is mbox's From escaping necessary here?
+
+ groupPath := segments[:sepIndex]
+ moduleType := segments[sepIndex+1]
+ moduleName := segments[sepIndex+2]
+ switch moduleType {
+ case "repos":
+ err = session.s.lmtpHandlePatch(session, groupPath, moduleName, &mbox)
+ if err != nil {
+ slog.Error("error handling patch", "error", err)
+ goto end
+ }
+ default:
+ err = errors.New("Emailing any endpoint other than repositories, is not supported yet.") // TODO
+ goto end
+ }
+ }
+
+end:
+ session.to = nil
+ session.from = ""
+ switch err {
+ case nil:
+ return nil
+ default:
+ return &smtp.SMTPError{
+ Code: 550,
+ Message: "Permanent failure: " + err.Error(),
+ EnhancedCode: [3]int{5, 7, 1},
+ }
+ }
+}
diff --git a/forged/internal/unsorted/remote_url.go b/forged/internal/unsorted/remote_url.go
new file mode 100644
index 0000000..f4d4c58
--- /dev/null
+++ b/forged/internal/unsorted/remote_url.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "net/url"
+ "strings"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+// We don't use path.Join because it collapses multiple slashes into one.
+
+// genSSHRemoteURL generates SSH remote URLs from a given group path and repo
+// name.
+func (s *Server) genSSHRemoteURL(groupPath []string, repoName string) string {
+ return strings.TrimSuffix(s.config.SSH.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName)
+}
+
+// genHTTPRemoteURL generates HTTP remote URLs from a given group path and repo
+// name.
+func (s *Server) genHTTPRemoteURL(groupPath []string, repoName string) string {
+ return strings.TrimSuffix(s.config.HTTP.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName)
+}
diff --git a/forged/internal/unsorted/resources.go b/forged/internal/unsorted/resources.go
new file mode 100644
index 0000000..692b454
--- /dev/null
+++ b/forged/internal/unsorted/resources.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "html/template"
+ "io/fs"
+
+ "github.com/tdewolff/minify/v2"
+ "github.com/tdewolff/minify/v2/html"
+ "go.lindenii.runxiyu.org/forge/forged/internal/embed"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+// loadTemplates minifies and loads HTML templates.
+func (s *Server) loadTemplates() (err error) {
+ minifier := minify.New()
+ minifierOptions := html.Minifier{
+ TemplateDelims: [2]string{"{{", "}}"},
+ KeepDefaultAttrVals: true,
+ } //exhaustruct:ignore
+ minifier.Add("text/html", &minifierOptions)
+
+ s.templates = template.New("templates").Funcs(template.FuncMap{
+ "first_line": misc.FirstLine,
+ "path_escape": misc.PathEscape,
+ "query_escape": misc.QueryEscape,
+ "dereference_error": misc.DereferenceOrZero[error],
+ "minus": misc.Minus,
+ })
+
+ err = fs.WalkDir(embed.Resources, "forged/templates", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ content, err := fs.ReadFile(embed.Resources, path)
+ if err != nil {
+ return err
+ }
+
+ minified, err := minifier.Bytes("text/html", content)
+ if err != nil {
+ return err
+ }
+
+ _, err = s.templates.Parse(misc.BytesToString(minified))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ return err
+}
diff --git a/forged/internal/unsorted/server.go b/forged/internal/unsorted/server.go
new file mode 100644
index 0000000..84379b0
--- /dev/null
+++ b/forged/internal/unsorted/server.go
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "errors"
+ "html/template"
+ "io/fs"
+ "log"
+ "log/slog"
+ "net"
+ "net/http"
+ _ "net/http/pprof"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/cmap"
+ "go.lindenii.runxiyu.org/forge/forged/internal/database"
+ "go.lindenii.runxiyu.org/forge/forged/internal/embed"
+ "go.lindenii.runxiyu.org/forge/forged/internal/irc"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+ goSSH "golang.org/x/crypto/ssh"
+)
+
+type Server struct {
+ config Config
+
+ database database.Database
+
+ sourceHandler http.Handler
+ staticHandler http.Handler
+
+ // globalData is passed as "global" when rendering HTML templates.
+ globalData map[string]any
+
+ serverPubkeyString string
+ serverPubkeyFP string
+ serverPubkey goSSH.PublicKey
+
+ // packPasses contains hook cookies mapped to their packPass.
+ packPasses cmap.Map[string, packPass]
+
+ templates *template.Template
+
+ ircBot *irc.Bot
+
+ ready bool
+}
+
+func NewServer(configPath string) (*Server, error) {
+ s := &Server{
+ globalData: make(map[string]any),
+ } //exhaustruct:ignore
+
+ s.sourceHandler = http.StripPrefix(
+ "/-/source/",
+ http.FileServer(http.FS(embed.Source)),
+ )
+ staticFS, err := fs.Sub(embed.Resources, "forged/static")
+ if err != nil {
+ return s, err
+ }
+ s.staticHandler = http.StripPrefix("/-/static/", http.FileServer(http.FS(staticFS)))
+ s.globalData = map[string]any{
+ "server_public_key_string": &s.serverPubkeyString,
+ "server_public_key_fingerprint": &s.serverPubkeyFP,
+ "forge_version": version,
+ // Some other ones are populated after config parsing
+ }
+
+ if err := s.loadConfig(configPath); err != nil {
+ return s, err
+ }
+
+ misc.NoneOrPanic(s.loadTemplates())
+ misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("git2d/git2d")), s.config.Git.DaemonPath))
+ misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("hookc/hookc")), filepath.Join(s.config.Hooks.Execs, "pre-receive")))
+ misc.NoneOrPanic(os.Chmod(filepath.Join(s.config.Hooks.Execs, "pre-receive"), 0o755))
+
+ s.ready = true
+
+ return s, nil
+}
+
+func (s *Server) Run() error {
+ if !s.ready {
+ return errors.New("not ready")
+ }
+
+ // Launch Git2D
+ go func() {
+ cmd := exec.Command(s.config.Git.DaemonPath, s.config.Git.Socket) //#nosec G204
+ cmd.Stderr = log.Writer()
+ cmd.Stdout = log.Writer()
+ if err := cmd.Run(); err != nil {
+ panic(err)
+ }
+ }()
+
+ // UNIX socket listener for hooks
+ {
+ hooksListener, err := net.Listen("unix", s.config.Hooks.Socket)
+ if errors.Is(err, syscall.EADDRINUSE) {
+ slog.Warn("removing existing socket", "path", s.config.Hooks.Socket)
+ if err = syscall.Unlink(s.config.Hooks.Socket); err != nil {
+ slog.Error("removing existing socket", "path", s.config.Hooks.Socket, "error", err)
+ os.Exit(1)
+ }
+ if hooksListener, err = net.Listen("unix", s.config.Hooks.Socket); err != nil {
+ slog.Error("listening hooks", "error", err)
+ os.Exit(1)
+ }
+ } else if err != nil {
+ slog.Error("listening hooks", "error", err)
+ os.Exit(1)
+ }
+ slog.Info("listening hooks on unix", "path", s.config.Hooks.Socket)
+ go func() {
+ if err = s.serveGitHooks(hooksListener); err != nil {
+ slog.Error("serving hooks", "error", err)
+ os.Exit(1)
+ }
+ }()
+ }
+
+ // UNIX socket listener for LMTP
+ {
+ lmtpListener, err := net.Listen("unix", s.config.LMTP.Socket)
+ if errors.Is(err, syscall.EADDRINUSE) {
+ slog.Warn("removing existing socket", "path", s.config.LMTP.Socket)
+ if err = syscall.Unlink(s.config.LMTP.Socket); err != nil {
+ slog.Error("removing existing socket", "path", s.config.LMTP.Socket, "error", err)
+ os.Exit(1)
+ }
+ if lmtpListener, err = net.Listen("unix", s.config.LMTP.Socket); err != nil {
+ slog.Error("listening LMTP", "error", err)
+ os.Exit(1)
+ }
+ } else if err != nil {
+ slog.Error("listening LMTP", "error", err)
+ os.Exit(1)
+ }
+ slog.Info("listening LMTP on unix", "path", s.config.LMTP.Socket)
+ go func() {
+ if err = s.serveLMTP(lmtpListener); err != nil {
+ slog.Error("serving LMTP", "error", err)
+ os.Exit(1)
+ }
+ }()
+ }
+
+ // SSH listener
+ {
+ sshListener, err := net.Listen(s.config.SSH.Net, s.config.SSH.Addr)
+ if errors.Is(err, syscall.EADDRINUSE) && s.config.SSH.Net == "unix" {
+ slog.Warn("removing existing socket", "path", s.config.SSH.Addr)
+ if err = syscall.Unlink(s.config.SSH.Addr); err != nil {
+ slog.Error("removing existing socket", "path", s.config.SSH.Addr, "error", err)
+ os.Exit(1)
+ }
+ if sshListener, err = net.Listen(s.config.SSH.Net, s.config.SSH.Addr); err != nil {
+ slog.Error("listening SSH", "error", err)
+ os.Exit(1)
+ }
+ } else if err != nil {
+ slog.Error("listening SSH", "error", err)
+ os.Exit(1)
+ }
+ slog.Info("listening SSH on", "net", s.config.SSH.Net, "addr", s.config.SSH.Addr)
+ go func() {
+ if err = s.serveSSH(sshListener); err != nil {
+ slog.Error("serving SSH", "error", err)
+ os.Exit(1)
+ }
+ }()
+ }
+
+ // HTTP listener
+ {
+ httpListener, err := net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr)
+ if errors.Is(err, syscall.EADDRINUSE) && s.config.HTTP.Net == "unix" {
+ slog.Warn("removing existing socket", "path", s.config.HTTP.Addr)
+ if err = syscall.Unlink(s.config.HTTP.Addr); err != nil {
+ slog.Error("removing existing socket", "path", s.config.HTTP.Addr, "error", err)
+ os.Exit(1)
+ }
+ if httpListener, err = net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr); err != nil {
+ slog.Error("listening HTTP", "error", err)
+ os.Exit(1)
+ }
+ } else if err != nil {
+ slog.Error("listening HTTP", "error", err)
+ os.Exit(1)
+ }
+ server := http.Server{
+ Handler: s,
+ ReadTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
+ WriteTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
+ IdleTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
+ } //exhaustruct:ignore
+ slog.Info("listening HTTP on", "net", s.config.HTTP.Net, "addr", s.config.HTTP.Addr)
+ go func() {
+ if err = server.Serve(httpListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ slog.Error("serving HTTP", "error", err)
+ os.Exit(1)
+ }
+ }()
+ }
+
+ // Pprof listener
+ {
+ pprofListener, err := net.Listen(s.config.Pprof.Net, s.config.Pprof.Addr)
+ if err != nil {
+ slog.Error("listening pprof", "error", err)
+ os.Exit(1)
+ }
+
+ slog.Info("listening pprof on", "net", s.config.Pprof.Net, "addr", s.config.Pprof.Addr)
+ go func() {
+ if err := http.Serve(pprofListener, nil); err != nil {
+ slog.Error("serving pprof", "error", err)
+ os.Exit(1)
+ }
+ }()
+ }
+
+ s.ircBot = irc.NewBot(&s.config.IRC)
+ // IRC bot
+ go s.ircBot.ConnectLoop()
+
+ select {}
+}
diff --git a/forged/internal/unsorted/ssh_handle_receive_pack.go b/forged/internal/unsorted/ssh_handle_receive_pack.go
new file mode 100644
index 0000000..a354273
--- /dev/null
+++ b/forged/internal/unsorted/ssh_handle_receive_pack.go
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+
+ gliderSSH "github.com/gliderlabs/ssh"
+ "github.com/go-git/go-git/v5"
+)
+
+// packPass contains information known when handling incoming SSH connections
+// that then needs to be used in hook socket connection handlers. See hookc(1).
+type packPass struct {
+ session gliderSSH.Session
+ repo *git.Repository
+ pubkey string
+ directAccess bool
+ repoPath string
+ userID int
+ userType string
+ repoID int
+ groupPath []string
+ repoName string
+ contribReq string
+}
+
+// sshHandleRecvPack handles attempts to push to repos.
+func (s *Server) sshHandleRecvPack(session gliderSSH.Session, pubkey, repoIdentifier string) (err error) {
+ groupPath, repoName, repoID, repoPath, directAccess, contribReq, userType, userID, err := s.getRepoInfo2(session.Context(), repoIdentifier, pubkey)
+ if err != nil {
+ return err
+ }
+ repo, err := git.PlainOpen(repoPath)
+ if err != nil {
+ return err
+ }
+
+ repoConf, err := repo.Config()
+ if err != nil {
+ return err
+ }
+
+ repoConfCore := repoConf.Raw.Section("core")
+ if repoConfCore == nil {
+ return errors.New("repository has no core section in config")
+ }
+
+ hooksPath := repoConfCore.OptionAll("hooksPath")
+ if len(hooksPath) != 1 || hooksPath[0] != s.config.Hooks.Execs {
+ return errors.New("repository has hooksPath set to an unexpected value")
+ }
+
+ if !directAccess {
+ switch contribReq {
+ case "closed":
+ if !directAccess {
+ return errors.New("you need direct access to push to this repo")
+ }
+ case "registered_user":
+ if userType != "registered" {
+ return errors.New("you need to be a registered user to push to this repo")
+ }
+ case "ssh_pubkey":
+ fallthrough
+ case "federated":
+ if pubkey == "" {
+ return errors.New("you need to have an SSH public key to push to this repo")
+ }
+ if userType == "" {
+ userID, err = s.addUserSSH(session.Context(), pubkey)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintln(session.Stderr(), "you are now registered as user ID", userID)
+ userType = "pubkey_only"
+ }
+
+ case "public":
+ default:
+ panic("unknown contrib_requirements value " + contribReq)
+ }
+ }
+
+ cookie, err := randomUrlsafeStr(16)
+ if err != nil {
+ fmt.Fprintln(session.Stderr(), "Error while generating cookie:", err)
+ }
+
+ s.packPasses.Store(cookie, packPass{
+ session: session,
+ pubkey: pubkey,
+ directAccess: directAccess,
+ repoPath: repoPath,
+ userID: userID,
+ repoID: repoID,
+ groupPath: groupPath,
+ repoName: repoName,
+ repo: repo,
+ contribReq: contribReq,
+ userType: userType,
+ })
+ defer s.packPasses.Delete(cookie)
+ // The Delete won't execute until proc.Wait returns unless something
+ // horribly wrong such as a panic occurs.
+
+ proc := exec.CommandContext(session.Context(), "git-receive-pack", repoPath)
+ proc.Env = append(os.Environ(),
+ "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket,
+ "LINDENII_FORGE_HOOKS_COOKIE="+cookie,
+ )
+ proc.Stdin = session
+ proc.Stdout = session
+ proc.Stderr = session.Stderr()
+
+ if err = proc.Start(); err != nil {
+ fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
+ return err
+ }
+
+ err = proc.Wait()
+ if err != nil {
+ fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
+ }
+
+ return err
+}
diff --git a/forged/internal/unsorted/ssh_handle_upload_pack.go b/forged/internal/unsorted/ssh_handle_upload_pack.go
new file mode 100644
index 0000000..735a053
--- /dev/null
+++ b/forged/internal/unsorted/ssh_handle_upload_pack.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+
+ glider_ssh "github.com/gliderlabs/ssh"
+)
+
+// sshHandleUploadPack handles clones/fetches. It just uses git-upload-pack
+// and has no ACL checks.
+func (s *Server) sshHandleUploadPack(session glider_ssh.Session, pubkey, repoIdentifier string) (err error) {
+ var repoPath string
+ if _, _, _, repoPath, _, _, _, _, err = s.getRepoInfo2(session.Context(), repoIdentifier, pubkey); err != nil {
+ return err
+ }
+
+ proc := exec.CommandContext(session.Context(), "git-upload-pack", repoPath)
+ proc.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket)
+ proc.Stdin = session
+ proc.Stdout = session
+ proc.Stderr = session.Stderr()
+
+ if err = proc.Start(); err != nil {
+ fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
+ return err
+ }
+
+ err = proc.Wait()
+ if err != nil {
+ fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
+ }
+
+ return err
+}
diff --git a/forged/internal/unsorted/ssh_server.go b/forged/internal/unsorted/ssh_server.go
new file mode 100644
index 0000000..43cc0c4
--- /dev/null
+++ b/forged/internal/unsorted/ssh_server.go
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "fmt"
+ "log/slog"
+ "net"
+ "os"
+ "strings"
+
+ gliderSSH "github.com/gliderlabs/ssh"
+ "go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+ goSSH "golang.org/x/crypto/ssh"
+)
+
+// serveSSH serves SSH on a [net.Listener]. The listener should generally be a
+// TCP listener, although AF_UNIX SOCK_STREAM listeners may be appropriate in
+// rare cases.
+func (s *Server) serveSSH(listener net.Listener) error {
+ var hostKeyBytes []byte
+ var hostKey goSSH.Signer
+ var err error
+ var server *gliderSSH.Server
+
+ if hostKeyBytes, err = os.ReadFile(s.config.SSH.Key); err != nil {
+ return err
+ }
+
+ if hostKey, err = goSSH.ParsePrivateKey(hostKeyBytes); err != nil {
+ return err
+ }
+
+ s.serverPubkey = hostKey.PublicKey()
+ s.serverPubkeyString = misc.BytesToString(goSSH.MarshalAuthorizedKey(s.serverPubkey))
+ s.serverPubkeyFP = goSSH.FingerprintSHA256(s.serverPubkey)
+
+ server = &gliderSSH.Server{
+ Handler: func(session gliderSSH.Session) {
+ clientPubkey := session.PublicKey()
+ var clientPubkeyStr string
+ if clientPubkey != nil {
+ clientPubkeyStr = strings.TrimSuffix(misc.BytesToString(goSSH.MarshalAuthorizedKey(clientPubkey)), "\n")
+ }
+
+ slog.Info("incoming ssh", "addr", session.RemoteAddr().String(), "key", clientPubkeyStr, "command", session.RawCommand())
+ fmt.Fprintln(session.Stderr(), ansiec.Blue+"Lindenii Forge "+version+", source at "+strings.TrimSuffix(s.config.HTTP.Root, "/")+"/-/source/"+ansiec.Reset+"\r")
+
+ cmd := session.Command()
+
+ if len(cmd) < 2 {
+ fmt.Fprintln(session.Stderr(), "Insufficient arguments\r")
+ return
+ }
+
+ switch cmd[0] {
+ case "git-upload-pack":
+ if len(cmd) > 2 {
+ fmt.Fprintln(session.Stderr(), "Too many arguments\r")
+ return
+ }
+ err = s.sshHandleUploadPack(session, clientPubkeyStr, cmd[1])
+ case "git-receive-pack":
+ if len(cmd) > 2 {
+ fmt.Fprintln(session.Stderr(), "Too many arguments\r")
+ return
+ }
+ err = s.sshHandleRecvPack(session, clientPubkeyStr, cmd[1])
+ default:
+ fmt.Fprintln(session.Stderr(), "Unsupported command: "+cmd[0]+"\r")
+ return
+ }
+ if err != nil {
+ fmt.Fprintln(session.Stderr(), err.Error())
+ return
+ }
+ },
+ PublicKeyHandler: func(_ gliderSSH.Context, _ gliderSSH.PublicKey) bool { return true },
+ KeyboardInteractiveHandler: func(_ gliderSSH.Context, _ goSSH.KeyboardInteractiveChallenge) bool { return true },
+ // It is intentional that we do not check any credentials and accept all connections.
+ // This allows all users to connect and clone repositories. However, the public key
+ // is passed to handlers, so e.g. the push handler could check the key and reject the
+ // push if it needs to.
+ } //exhaustruct:ignore
+
+ server.AddHostKey(hostKey)
+
+ if err = server.Serve(listener); err != nil {
+ slog.Error("error serving SSH", "error", err.Error())
+ os.Exit(1)
+ }
+
+ return nil
+}
diff --git a/forged/internal/unsorted/ssh_utils.go b/forged/internal/unsorted/ssh_utils.go
new file mode 100644
index 0000000..6f50a87
--- /dev/null
+++ b/forged/internal/unsorted/ssh_utils.go
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
+ "go.lindenii.runxiyu.org/forge/forged/internal/misc"
+)
+
+var errIllegalSSHRepoPath = errors.New("illegal SSH repo path")
+
+// getRepoInfo2 also fetches repo information... it should be deprecated and
+// implemented in individual handlers.
+func (s *Server) getRepoInfo2(ctx context.Context, sshPath, sshPubkey string) (groupPath []string, repoName string, repoID int, repoPath string, directAccess bool, contribReq, userType string, userID int, err error) {
+ var segments []string
+ var sepIndex int
+ var moduleType, moduleName string
+
+ segments, err = misc.PathToSegments(sshPath)
+ if err != nil {
+ return
+ }
+
+ for i, segment := range segments {
+ var err error
+ segments[i], err = url.PathUnescape(segment)
+ if err != nil {
+ return []string{}, "", 0, "", false, "", "", 0, err
+ }
+ }
+
+ if segments[0] == "-" {
+ return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
+ }
+
+ sepIndex = -1
+ for i, part := range segments {
+ if part == "-" {
+ sepIndex = i
+ break
+ }
+ }
+ if segments[len(segments)-1] == "" {
+ segments = segments[:len(segments)-1]
+ }
+
+ switch {
+ case sepIndex == -1:
+ return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
+ case len(segments) <= sepIndex+2:
+ return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
+ }
+
+ groupPath = segments[:sepIndex]
+ moduleType = segments[sepIndex+1]
+ moduleName = segments[sepIndex+2]
+ repoName = moduleName
+ switch moduleType {
+ case "repos":
+ _1, _2, _3, _4, _5, _6, _7 := s.getRepoInfo(ctx, groupPath, moduleName, sshPubkey)
+ return groupPath, repoName, _1, _2, _3, _4, _5, _6, _7
+ default:
+ return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
+ }
+}
+
+// writeRedError is a helper function that basically does a Fprintf but makes
+// the entire thing red, in terms of ANSI escape sequences. It's useful when
+// producing error messages on SSH connections.
+func writeRedError(w io.Writer, format string, args ...any) {
+ fmt.Fprintln(w, ansiec.Red+fmt.Sprintf(format, args...)+ansiec.Reset)
+}
diff --git a/forged/internal/unsorted/unsorted.go b/forged/internal/unsorted/unsorted.go
new file mode 100644
index 0000000..f26b0e4
--- /dev/null
+++ b/forged/internal/unsorted/unsorted.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package unsorted is where unsorted Go files from the old structure are kept.
+package unsorted
diff --git a/forged/internal/unsorted/users.go b/forged/internal/unsorted/users.go
new file mode 100644
index 0000000..0f72eed
--- /dev/null
+++ b/forged/internal/unsorted/users.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+)
+
+// addUserSSH adds a new user solely based on their SSH public key.
+//
+// TODO: Audit all users of this function.
+func (s *Server) addUserSSH(ctx context.Context, pubkey string) (userID int, err error) {
+ var txn pgx.Tx
+
+ if txn, err = s.database.Begin(ctx); err != nil {
+ return
+ }
+ defer func() {
+ _ = txn.Rollback(ctx)
+ }()
+
+ if err = txn.QueryRow(ctx, `INSERT INTO users (type) VALUES ('pubkey_only') RETURNING id`).Scan(&userID); err != nil {
+ return
+ }
+
+ if _, err = txn.Exec(ctx, `INSERT INTO ssh_public_keys (key_string, user_id) VALUES ($1, $2)`, pubkey, userID); err != nil {
+ return
+ }
+
+ err = txn.Commit(ctx)
+ return
+}
diff --git a/forged/internal/unsorted/version.go b/forged/internal/unsorted/version.go
new file mode 100644
index 0000000..52c0f32
--- /dev/null
+++ b/forged/internal/unsorted/version.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package unsorted
+
+var version = "unknown"
diff --git a/forged/internal/web/error_pages.go b/forged/internal/web/error_pages.go
new file mode 100644
index 0000000..2ba9a1a
--- /dev/null
+++ b/forged/internal/web/error_pages.go
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+package web
+
+import (
+ "html/template"
+ "net/http"
+)
+
+// ErrorPage404 renders a 404 Not Found error page using the "404" template.
+func ErrorPage404(templates *template.Template, w http.ResponseWriter, params map[string]any) {
+ w.WriteHeader(http.StatusNotFound)
+ _ = templates.ExecuteTemplate(w, "404", params)
+}
+
+// ErrorPage400 renders a 400 Bad Request error page using the "400" template.
+// The error message is passed via the "complete_error_msg" template param.
+func ErrorPage400(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
+ w.WriteHeader(http.StatusBadRequest)
+ params["complete_error_msg"] = msg
+ _ = templates.ExecuteTemplate(w, "400", params)
+}
+
+// ErrorPage400Colon renders a 400 Bad Request error page telling the user
+// that we migrated from : to -.
+func ErrorPage400Colon(templates *template.Template, w http.ResponseWriter, params map[string]any) {
+ w.WriteHeader(http.StatusBadRequest)
+ _ = templates.ExecuteTemplate(w, "400_colon", params)
+}
+
+// ErrorPage403 renders a 403 Forbidden error page using the "403" template.
+// The error message is passed via the "complete_error_msg" template param.
+func ErrorPage403(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
+ w.WriteHeader(http.StatusForbidden)
+ params["complete_error_msg"] = msg
+ _ = templates.ExecuteTemplate(w, "403", params)
+}
+
+// ErrorPage451 renders a 451 Unavailable For Legal Reasons error page using the "451" template.
+// The error message is passed via the "complete_error_msg" template param.
+func ErrorPage451(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
+ w.WriteHeader(http.StatusUnavailableForLegalReasons)
+ params["complete_error_msg"] = msg
+ _ = templates.ExecuteTemplate(w, "451", params)
+}
+
+// ErrorPage500 renders a 500 Internal Server Error page using the "500" template.
+// The error message is passed via the "complete_error_msg" template param.
+func ErrorPage500(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
+ w.WriteHeader(http.StatusInternalServerError)
+ params["complete_error_msg"] = msg
+ _ = templates.ExecuteTemplate(w, "500", params)
+}
+
+// ErrorPage501 renders a 501 Not Implemented error page using the "501" template.
+func ErrorPage501(templates *template.Template, w http.ResponseWriter, params map[string]any) {
+ w.WriteHeader(http.StatusNotImplemented)
+ _ = templates.ExecuteTemplate(w, "501", params)
+}
diff --git a/forged/internal/web/web.go b/forged/internal/web/web.go
new file mode 100644
index 0000000..f4d15f8
--- /dev/null
+++ b/forged/internal/web/web.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// Package web provides web-facing components of the forge.
+package web
diff --git a/forged/main.go b/forged/main.go
new file mode 100644
index 0000000..fde15d1
--- /dev/null
+++ b/forged/main.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+// The main entry point to the Lindenii Forge daemon.
+package main
+
+import (
+ "flag"
+
+ "go.lindenii.runxiyu.org/forge/forged/internal/unsorted"
+)
+
+func main() {
+ configPath := flag.String(
+ "config",
+ "/etc/lindenii/forge.scfg",
+ "path to configuration file",
+ )
+ flag.Parse()
+
+ s, err := unsorted.NewServer(*configPath)
+ if err != nil {
+ panic(err)
+ }
+
+ panic(s.Run())
+}
diff --git a/forged/static/.gitignore b/forged/static/.gitignore
new file mode 100644
index 0000000..812b75f
--- /dev/null
+++ b/forged/static/.gitignore
@@ -0,0 +1,2 @@
+/index.html
+# used for testing css without recompiling the server
diff --git a/forged/static/chroma.css b/forged/static/chroma.css
new file mode 100644
index 0000000..1f7219a
--- /dev/null
+++ b/forged/static/chroma.css
@@ -0,0 +1,152 @@
+/*
+ * SPDX-License-Identifier: MIT AND BSD-2-Clause
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2025 Pygments and Chroma authors
+ */
+
+@media (prefers-color-scheme: light) {
+ /* Background */ .bg { ; }
+ /* PreWrapper */ .chroma { ; }
+ /* Error */ .chroma .err { }
+ /* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit }
+ /* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }
+ /* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; }
+ /* LineHighlight */ .chroma .hl { background-color: #e5e5e5 }
+ /* LineNumbersTable */ .chroma .lnt { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f }
+ /* LineNumbers */ .chroma .ln { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f }
+ /* Line */ .chroma .line { display: flex; }
+ /* Keyword */ .chroma .k { color: #008000; font-weight: bold }
+ /* KeywordConstant */ .chroma .kc { color: #008000; font-weight: bold }
+ /* KeywordDeclaration */ .chroma .kd { color: #008000; font-weight: bold }
+ /* KeywordNamespace */ .chroma .kn { color: #008000; font-weight: bold }
+ /* KeywordPseudo */ .chroma .kp { color: #008000 }
+ /* KeywordReserved */ .chroma .kr { color: #008000; font-weight: bold }
+ /* KeywordType */ .chroma .kt { color: #b00040 }
+ /* NameAttribute */ .chroma .na { color: #7d9029 }
+ /* NameBuiltin */ .chroma .nb { color: #008000 }
+ /* NameClass */ .chroma .nc { color: #0000ff; font-weight: bold }
+ /* NameConstant */ .chroma .no { color: #880000 }
+ /* NameDecorator */ .chroma .nd { color: #aa22ff }
+ /* NameEntity */ .chroma .ni { color: #999999; font-weight: bold }
+ /* NameException */ .chroma .ne { color: #d2413a; font-weight: bold }
+ /* NameFunction */ .chroma .nf { color: #0000ff }
+ /* NameLabel */ .chroma .nl { color: #a0a000 }
+ /* NameNamespace */ .chroma .nn { color: #0000ff; font-weight: bold }
+ /* NameTag */ .chroma .nt { color: #008000; font-weight: bold }
+ /* NameVariable */ .chroma .nv { color: #19177c }
+ /* LiteralString */ .chroma .s { color: #ba2121 }
+ /* LiteralStringAffix */ .chroma .sa { color: #ba2121 }
+ /* LiteralStringBacktick */ .chroma .sb { color: #ba2121 }
+ /* LiteralStringChar */ .chroma .sc { color: #ba2121 }
+ /* LiteralStringDelimiter */ .chroma .dl { color: #ba2121 }
+ /* LiteralStringDoc */ .chroma .sd { color: #ba2121; font-style: italic }
+ /* LiteralStringDouble */ .chroma .s2 { color: #ba2121 }
+ /* LiteralStringEscape */ .chroma .se { color: #bb6622; font-weight: bold }
+ /* LiteralStringHeredoc */ .chroma .sh { color: #ba2121 }
+ /* LiteralStringInterpol */ .chroma .si { color: #bb6688; font-weight: bold }
+ /* LiteralStringOther */ .chroma .sx { color: #008000 }
+ /* LiteralStringRegex */ .chroma .sr { color: #bb6688 }
+ /* LiteralStringSingle */ .chroma .s1 { color: #ba2121 }
+ /* LiteralStringSymbol */ .chroma .ss { color: #19177c }
+ /* LiteralNumber */ .chroma .m { color: #666666 }
+ /* LiteralNumberBin */ .chroma .mb { color: #666666 }
+ /* LiteralNumberFloat */ .chroma .mf { color: #666666 }
+ /* LiteralNumberHex */ .chroma .mh { color: #666666 }
+ /* LiteralNumberInteger */ .chroma .mi { color: #666666 }
+ /* LiteralNumberIntegerLong */ .chroma .il { color: #666666 }
+ /* LiteralNumberOct */ .chroma .mo { color: #666666 }
+ /* Operator */ .chroma .o { color: #666666 }
+ /* OperatorWord */ .chroma .ow { color: #aa22ff; font-weight: bold }
+ /* Comment */ .chroma .c { color: #408080; font-style: italic }
+ /* CommentHashbang */ .chroma .ch { color: #408080; font-style: italic }
+ /* CommentMultiline */ .chroma .cm { color: #408080; font-style: italic }
+ /* CommentSingle */ .chroma .c1 { color: #408080; font-style: italic }
+ /* CommentSpecial */ .chroma .cs { color: #408080; font-style: italic }
+ /* CommentPreproc */ .chroma .cp { color: #bc7a00 }
+ /* CommentPreprocFile */ .chroma .cpf { color: #bc7a00 }
+ /* GenericDeleted */ .chroma .gd { color: #a00000 }
+ /* GenericEmph */ .chroma .ge { font-style: italic }
+ /* GenericError */ .chroma .gr { color: #ff0000 }
+ /* GenericHeading */ .chroma .gh { color: #000080; font-weight: bold }
+ /* GenericInserted */ .chroma .gi { color: #00a000 }
+ /* GenericOutput */ .chroma .go { color: #888888 }
+ /* GenericPrompt */ .chroma .gp { color: #000080; font-weight: bold }
+ /* GenericStrong */ .chroma .gs { font-weight: bold }
+ /* GenericSubheading */ .chroma .gu { color: #800080; font-weight: bold }
+ /* GenericTraceback */ .chroma .gt { color: #0044dd }
+ /* GenericUnderline */ .chroma .gl { text-decoration: underline }
+ /* TextWhitespace */ .chroma .w { color: #bbbbbb }
+}
+@media (prefers-color-scheme: dark) {
+ /* Background */ .bg { color: #e6edf3; background-color: #000000; }
+ /* PreWrapper */ .chroma { color: #e6edf3; background-color: #000000; }
+ /* Error */ .chroma .err { color: #f85149 }
+ /* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit }
+ /* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }
+ /* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; }
+ /* LineHighlight */ .chroma .hl { background-color: #6e7681 }
+ /* LineNumbersTable */ .chroma .lnt { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #737679 }
+ /* LineNumbers */ .chroma .ln { white-space: pre; -webkit-user-select: none; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #6e7681 }
+ /* Line */ .chroma .line { display: flex; }
+ /* Keyword */ .chroma .k { color: #ff7b72 }
+ /* KeywordConstant */ .chroma .kc { color: #79c0ff }
+ /* KeywordDeclaration */ .chroma .kd { color: #ff7b72 }
+ /* KeywordNamespace */ .chroma .kn { color: #ff7b72 }
+ /* KeywordPseudo */ .chroma .kp { color: #79c0ff }
+ /* KeywordReserved */ .chroma .kr { color: #ff7b72 }
+ /* KeywordType */ .chroma .kt { color: #ff7b72 }
+ /* NameClass */ .chroma .nc { color: #f0883e; font-weight: bold }
+ /* NameConstant */ .chroma .no { color: #79c0ff; font-weight: bold }
+ /* NameDecorator */ .chroma .nd { color: #d2a8ff; font-weight: bold }
+ /* NameEntity */ .chroma .ni { color: #ffa657 }
+ /* NameException */ .chroma .ne { color: #f0883e; font-weight: bold }
+ /* NameFunction */ .chroma .nf { color: #d2a8ff; font-weight: bold }
+ /* NameLabel */ .chroma .nl { color: #79c0ff; font-weight: bold }
+ /* NameNamespace */ .chroma .nn { color: #ff7b72 }
+ /* NameProperty */ .chroma .py { color: #79c0ff }
+ /* NameTag */ .chroma .nt { color: #7ee787 }
+ /* NameVariable */ .chroma .nv { color: #79c0ff }
+ /* Literal */ .chroma .l { color: #a5d6ff }
+ /* LiteralDate */ .chroma .ld { color: #79c0ff }
+ /* LiteralString */ .chroma .s { color: #a5d6ff }
+ /* LiteralStringAffix */ .chroma .sa { color: #79c0ff }
+ /* LiteralStringBacktick */ .chroma .sb { color: #a5d6ff }
+ /* LiteralStringChar */ .chroma .sc { color: #a5d6ff }
+ /* LiteralStringDelimiter */ .chroma .dl { color: #79c0ff }
+ /* LiteralStringDoc */ .chroma .sd { color: #a5d6ff }
+ /* LiteralStringDouble */ .chroma .s2 { color: #a5d6ff }
+ /* LiteralStringEscape */ .chroma .se { color: #79c0ff }
+ /* LiteralStringHeredoc */ .chroma .sh { color: #79c0ff }
+ /* LiteralStringInterpol */ .chroma .si { color: #a5d6ff }
+ /* LiteralStringOther */ .chroma .sx { color: #a5d6ff }
+ /* LiteralStringRegex */ .chroma .sr { color: #79c0ff }
+ /* LiteralStringSingle */ .chroma .s1 { color: #a5d6ff }
+ /* LiteralStringSymbol */ .chroma .ss { color: #a5d6ff }
+ /* LiteralNumber */ .chroma .m { color: #a5d6ff }
+ /* LiteralNumberBin */ .chroma .mb { color: #a5d6ff }
+ /* LiteralNumberFloat */ .chroma .mf { color: #a5d6ff }
+ /* LiteralNumberHex */ .chroma .mh { color: #a5d6ff }
+ /* LiteralNumberInteger */ .chroma .mi { color: #a5d6ff }
+ /* LiteralNumberIntegerLong */ .chroma .il { color: #a5d6ff }
+ /* LiteralNumberOct */ .chroma .mo { color: #a5d6ff }
+ /* Operator */ .chroma .o { color: #ff7b72; font-weight: bold }
+ /* OperatorWord */ .chroma .ow { color: #ff7b72; font-weight: bold }
+ /* Comment */ .chroma .c { color: #8b949e; font-style: italic }
+ /* CommentHashbang */ .chroma .ch { color: #8b949e; font-style: italic }
+ /* CommentMultiline */ .chroma .cm { color: #8b949e; font-style: italic }
+ /* CommentSingle */ .chroma .c1 { color: #8b949e; font-style: italic }
+ /* CommentSpecial */ .chroma .cs { color: #8b949e; font-weight: bold; font-style: italic }
+ /* CommentPreproc */ .chroma .cp { color: #8b949e; font-weight: bold; font-style: italic }
+ /* CommentPreprocFile */ .chroma .cpf { color: #8b949e; font-weight: bold; font-style: italic }
+ /* GenericDeleted */ .chroma .gd { color: #ffa198; background-color: #490202 }
+ /* GenericEmph */ .chroma .ge { font-style: italic }
+ /* GenericError */ .chroma .gr { color: #ffa198 }
+ /* GenericHeading */ .chroma .gh { color: #79c0ff; font-weight: bold }
+ /* GenericInserted */ .chroma .gi { color: #56d364; background-color: #0f5323 }
+ /* GenericOutput */ .chroma .go { color: #8b949e }
+ /* GenericPrompt */ .chroma .gp { color: #8b949e }
+ /* GenericStrong */ .chroma .gs { font-weight: bold }
+ /* GenericSubheading */ .chroma .gu { color: #79c0ff }
+ /* GenericTraceback */ .chroma .gt { color: #ff7b72 }
+ /* GenericUnderline */ .chroma .gl { text-decoration: underline }
+ /* TextWhitespace */ .chroma .w { color: #6e7681 }
+}
diff --git a/static/style.css b/forged/static/style.css
index e5398ce..51846a2 100644
--- a/static/style.css
+++ b/forged/static/style.css
@@ -1,15 +1,23 @@
/*
* SPDX-License-Identifier: AGPL-3.0-only
- * SPDX-FileContributor: Runxi Yu <https://runxiyu.org>
- * SPDX-FileContributor: luk3yx <https://luk3yx.github.io>
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ * SPDX-FileCopyrightText: Copyright (c) 2025 luk3yx <https://luk3yx.github.io>
+ * SPDX-FileCopyrightText: Copyright (c) 2017-2025 Drew DeVault <https://drewdevault.com>
+ *
+ * Drew did not directly contribute here but we took significant portions of
+ * SourceHut's CSS.
*/
+* {
+ box-sizing: border-box;
+}
+
/* Base styles and variables */
html {
font-family: sans-serif;
background-color: var(--background-color);
color: var(--text-color);
- --radius-1: 0.32rem;
+ font-size: 1rem;
--background-color: hsl(0, 0%, 100%);
--text-color: hsl(0, 0%, 0%);
--link-color: hsl(320, 50%, 36%);
@@ -76,48 +84,12 @@ html, code, pre {
display: table-row-group;
}
-table.rounded, table.rounded-footed {
- overflow: hidden;
- border-spacing: 0;
- border-collapse: separate;
- border-radius: var(--radius-1);
- border: var(--lighter-border-color) solid 1px;
-}
-
-table.rounded th, table.rounded td,
-table.rounded-footed th, table.rounded-footed td {
- border: none;
-}
-
-table.rounded th:not(:last-child),
-table.rounded td:not(:last-child),
-table.rounded-footed th:not(:last-child),
-table.rounded-footed td:not(:last-child) {
- border-right: var(--lighter-border-color) solid 1px;
-}
-
-table.rounded>thead>tr>th,
-table.rounded>thead>tr>td,
-table.rounded>tbody>tr:not(:last-child)>th,
-table.rounded>tbody>tr:not(:last-child)>td {
- border-bottom: var(--lighter-border-color) solid 1px;
-}
-
-table.rounded-footed>thead>tr>th,
-table.rounded-footed>thead>tr>td,
-table.rounded-footed>tbody>tr>th,
-table.rounded-footed>tbody>tr>td,
-table.rounded-footed>tfoot>tr:not(:last-child)>th,
-table.rounded-footed>tfoot>tr:not(:last-child)>td {
- border-bottom: var(--lighter-border-color) solid 1px;
-}
-
-
/* Footer styles */
footer {
margin-top: 1rem;
margin-left: auto;
margin-right: auto;
+ margin-bottom: 1rem;
display: block;
padding: 0 5px;
width: fit-content;
@@ -128,14 +100,8 @@ footer a:link, footer a:visited {
color: inherit;
}
-/* Padding containers */
-.padding-wrapper {
- margin: 1rem auto;
- max-width: 60rem;
- padding: 0 5px;
-}
.padding {
- padding: 0 5px;
+ padding: 0 1rem;
}
/* Link styles */
@@ -303,7 +269,6 @@ textarea,
input[type=text],
input[type=password] {
font-family: sans-serif;
- font-size: smaller;
background-color: var(--lighter-box-background-color);
color: var(--text-color);
border: none;
@@ -372,9 +337,7 @@ input[type=file]::file-selector-button {
display: inline-block;
width: auto;
min-width: fit-content;
- border-radius: var(--radius-1);
padding: .1rem .75rem;
- font-size: 0.9rem;
transition: background .1s linear;
cursor: pointer;
}
@@ -384,18 +347,50 @@ a.btn, a.btn-white, a.btn-danger, a.btn-normal, a.btn-primary {
/* Header layout */
header#main-header {
- background-color: var(--lighter-box-background-color);
+ /* background-color: var(--lighter-box-background-color); */
display: flex;
+ flex-direction: row;
+ align-items: center;
justify-content: space-between;
+ flex-wrap: wrap;
+ padding-top: 1rem;
+ padding-bottom: 1rem;
+ gap: 0.5rem;
+}
+#main-header a, #main-header a:link, main-header a:visited {
+ text-decoration: none;
+ color: inherit;
+}
+#main-header-forge-title {
+ white-space: nowrap;
+}
+#breadcrumb-nav {
+ display: flex;
align-items: center;
- padding: 10px;
+ flex: 1 1 auto;
+ min-width: 0;
+ overflow-x: auto;
+ gap: 0.25rem;
+ white-space: nowrap;
}
-header#main-header > div#main-header-forge-title {
- flex-grow: 1;
+.breadcrumb-separator {
+ margin: 0 0.25rem;
}
-header#main-header > div#main-header-user {
+#main-header-user {
display: flex;
align-items: center;
+ white-space: nowrap;
+}
+@media (max-width: 37.5rem) {
+ header#main-header {
+ flex-direction: column;
+ align-items: flex-start;
+ }
+
+ #breadcrumb-nav {
+ width: 100%;
+ overflow-x: auto;
+ }
}
/* Uncategorized */
@@ -408,3 +403,215 @@ td > ul {
margin-top: 0;
margin-bottom: 0;
}
+
+
+
+.complete-error-page hr {
+ border: 0;
+ border-bottom: 1px dashed;
+}
+
+
+
+
+
+
+.key-val-grid {
+ display: grid;
+ grid-template-columns: auto 1fr;
+ gap: 0;
+ border: var(--lighter-border-color) 1px solid;
+ overflow: auto;
+}
+
+.key-val-grid > .title-row {
+ grid-column: 1 / -1;
+ background-color: var(--lighter-box-background-color);
+ font-weight: bold;
+ padding: 3px 5px;
+ border-bottom: var(--lighter-border-color) 1px solid;
+}
+
+.key-val-grid > .row-label {
+ background-color: var(--lighter-box-background-color);
+ padding: 3px 5px;
+ border-bottom: var(--lighter-border-color) 1px solid;
+ border-right: var(--lighter-border-color) 1px solid;
+ text-align: left;
+ font-weight: normal;
+}
+
+.key-val-grid > .row-value {
+ padding: 3px 5px;
+ border-bottom: var(--lighter-border-color) 1px solid;
+ word-break: break-word;
+}
+
+.key-val-grid code {
+ font-family: monospace;
+}
+
+.key-val-grid ul {
+ margin: 0;
+ padding-left: 1.5rem;
+}
+
+.key-val-grid > .row-label:nth-last-of-type(2),
+.key-val-grid > .row-value:last-of-type {
+ border-bottom: none;
+}
+
+@media (max-width: 37.5rem) {
+ .key-val-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .key-val-grid > .row-label {
+ border-right: none;
+ }
+}
+.key-val-grid > .title-row {
+ grid-column: 1 / -1;
+ background-color: var(--lighter-box-background-color);
+ font-weight: bold;
+ padding: 3px 5px;
+ border-bottom: var(--lighter-border-color) 1px solid;
+ margin: 0;
+ text-align: center;
+}
+
+.key-val-grid-wrapper {
+ max-width: 100%;
+ width: fit-content;
+}
+
+/* Tab navigation */
+
+.nav-tabs-standalone {
+ border: none;
+ list-style: none;
+ margin: 0;
+ flex-grow: 1;
+ display: inline-flex;
+ flex-wrap: nowrap;
+ padding: 0;
+ border-bottom: 0.25rem var(--darker-box-background-color) solid;
+ width: 100%;
+ max-width: 100%;
+ min-width: 100%;
+}
+
+.nav-tabs-standalone > li {
+ align-self: flex-end;
+}
+.nav-tabs-standalone > li > a {
+ padding: 0 0.75rem;
+}
+
+.nav-item a.active {
+ background-color: var(--darker-box-background-color);
+}
+
+.nav-item a, .nav-item a:link, .nav-item a:visited {
+ text-decoration: none;
+ color: inherit;
+}
+
+.repo-header-extension {
+ margin-bottom: 1rem;
+ background-color: var(--darker-box-background-color);
+}
+
+.repo-header > h2 {
+ display: inline;
+ margin: 0;
+ padding-right: 1rem;
+}
+
+.repo-header > .nav-tabs-standalone {
+ border: none;
+ margin: 0;
+ flex-grow: 1;
+ display: inline-flex;
+ flex-wrap: nowrap;
+ padding: 0;
+}
+
+.repo-header {
+ display: flex;
+ flex-wrap: nowrap;
+}
+
+.repo-header-extension-content {
+ padding-top: 0.3rem;
+ padding-bottom: 0.2rem;
+}
+
+.repo-header, .padding-wrapper, .repo-header-extension-content, #main-header, .readingwidth, .commit-list-small {
+ padding-left: 1rem;
+ padding-right: 1rem;
+ max-width: 60rem;
+ width: 100%;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+.padding-wrapper {
+ margin-bottom: 1rem;
+}
+
+/* TODO */
+
+.commit-list-small .event {
+ background-color: var(--lighter-box-background-color);
+ padding: 0.5rem;
+ margin-bottom: 1rem;
+ max-width: 30rem;
+}
+
+.commit-list-small .event:last-child {
+ margin-bottom: 1rem;
+}
+
+.commit-list-small a {
+ color: var(--link-color);
+ text-decoration: none;
+ font-weight: 500;
+}
+
+.commit-list-small a:hover {
+ text-decoration: underline;
+ text-decoration-color: var(--text-decoration-color);
+}
+
+.commit-list-small .event > div {
+ font-size: 0.95rem;
+}
+
+.commit-list-small .pull-right {
+ float: right;
+ font-size: 0.85em;
+ margin-left: 1rem;
+}
+
+.commit-list-small pre.commit {
+ margin: 0.25rem 0 0 0;
+ padding: 0;
+ font-family: inherit;
+ font-size: 0.95rem;
+ color: var(--text-color);
+ white-space: pre-wrap;
+}
+
+.commit-list-small .commit-error {
+ color: var(--danger-color);
+ font-weight: bold;
+ margin-top: 1rem;
+}
+
+
+.breakable {
+ word-break: break-word;
+ /* overflow-wrap: break-word;
+ overflow: hidden; */
+}
diff --git a/forged/templates/400.tmpl b/forged/templates/400.tmpl
new file mode 100644
index 0000000..58ce768
--- /dev/null
+++ b/forged/templates/400.tmpl
@@ -0,0 +1,25 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "400" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>400 Bad Request &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="400">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>400 Bad Request</h1>
+ <p>{{- .complete_error_msg -}}</p>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/400_colon.tmpl b/forged/templates/400_colon.tmpl
new file mode 100644
index 0000000..470a685
--- /dev/null
+++ b/forged/templates/400_colon.tmpl
@@ -0,0 +1,26 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "400_colon" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>400 Bad Request &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="400-colon">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>400 Bad Request</h1>
+ <p>We recently switched URL schemes. Previously &ldquo;<code>:</code>&rdquo; was used as our URL group separator, but because OpenSMTPD does not implement local-part address quoting properly, we&rsquo;re unable to include &ldquo;<code>:</code>&rdquo; in URLs properly, hence we use &ldquo;<code>-</code>&rdquo; now.</p>
+ <p>As a precaution in case visitors get confused, this page was set up. <strong>You should probably replace the &ldquo;<code>:</code>&rdquo;s with &ldquo;<code>-</code>&rdquo;s in the URL bar.</strong> If there are colons in the URL that <em>is not</em> the group separator&mdash;that&rsquo;s an edge case that we&rsquo;ll fix later.</p>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/403.tmpl b/forged/templates/403.tmpl
new file mode 100644
index 0000000..86d5518
--- /dev/null
+++ b/forged/templates/403.tmpl
@@ -0,0 +1,25 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "403" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>403 Forbidden &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="403">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>403 Forbidden</h1>
+ <p>{{- .complete_error_msg -}}</p>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/404.tmpl b/forged/templates/404.tmpl
new file mode 100644
index 0000000..2eabb06
--- /dev/null
+++ b/forged/templates/404.tmpl
@@ -0,0 +1,24 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "404" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>404 Not Found &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="404">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>404 Not Found</h1>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/451.tmpl b/forged/templates/451.tmpl
new file mode 100644
index 0000000..ed6343c
--- /dev/null
+++ b/forged/templates/451.tmpl
@@ -0,0 +1,25 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "451" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>451 Unavailable For Legal Reasons &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="451">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>451 Unavailable For Legal Reasons</h1>
+ <p>{{- .complete_error_msg -}}</p>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/500.tmpl b/forged/templates/500.tmpl
new file mode 100644
index 0000000..3a540e6
--- /dev/null
+++ b/forged/templates/500.tmpl
@@ -0,0 +1,25 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "500" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>500 Internal Server Error &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="500">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>500 Internal Server Error</h1>
+ <p>{{- .complete_error_msg -}}</p>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/501.tmpl b/forged/templates/501.tmpl
new file mode 100644
index 0000000..b6ab2f0
--- /dev/null
+++ b/forged/templates/501.tmpl
@@ -0,0 +1,24 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "501" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>501 Not Implemented &ndash; {{ .global.forge_title }}</title>
+ </head>
+ <body class="501">
+ {{- template "header" . -}}
+ <div class="padding-wrapper complete-error-page">
+ <h1>501 Not Implemented</h1>
+ <hr />
+ <address>Lindenii Forge</address>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/_footer.tmpl b/forged/templates/_footer.tmpl
new file mode 100644
index 0000000..f71ea3e
--- /dev/null
+++ b/forged/templates/_footer.tmpl
@@ -0,0 +1,12 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "footer" -}}
+<a href="https://lindenii.runxiyu.org/forge/">Lindenii Forge</a>
+{{ .global.forge_version }}
+(<a href="/-/source/source.tar.gz">source</a>,
+<a href="https://forge.lindenii.runxiyu.org/forge/-/repos/server/">upstream</a>,
+<a href="/-/source/LICENSE">license</a>,
+<a href="https://webirc.runxiyu.org/kiwiirc/#lindenii">support</a>)
+{{- end -}}
diff --git a/forged/templates/_group_path.tmpl b/forged/templates/_group_path.tmpl
new file mode 100644
index 0000000..f5d3bf8
--- /dev/null
+++ b/forged/templates/_group_path.tmpl
@@ -0,0 +1,8 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "group_path_plain" -}}
+{{- $p := . -}}
+{{- range $i, $s := . -}}{{- $s -}}{{- if ne $i (minus (len $p) 1) -}}/{{- end -}}{{- end -}}
+{{- end -}}
diff --git a/forged/templates/_group_view.tmpl b/forged/templates/_group_view.tmpl
new file mode 100644
index 0000000..92b6639
--- /dev/null
+++ b/forged/templates/_group_view.tmpl
@@ -0,0 +1,56 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "group_view" -}}
+{{- if .subgroups -}}
+ <table class="wide">
+ <thead>
+ <tr>
+ <th colspan="2" class="title-row">Subgroups</th>
+ </tr>
+ <tr>
+ <th scope="col">Name</th>
+ <th scope="col">Description</th>
+ </tr>
+ </thead>
+ <tbody>
+ {{- range .subgroups -}}
+ <tr>
+ <td>
+ <a href="{{- .Name | path_escape -}}/">{{- .Name -}}</a>
+ </td>
+ <td>
+ {{- .Description -}}
+ </td>
+ </tr>
+ {{- end -}}
+ </tbody>
+ </table>
+{{- end -}}
+{{- if .repos -}}
+<table class="wide">
+ <thead>
+ <tr>
+ <th colspan="2" class="title-row">Repos</th>
+ <tr>
+ <th scope="col">Name</th>
+ <th scope="col">Description</th>
+ </tr>
+ </tr>
+ </thead>
+ <tbody>
+ {{- range .repos -}}
+ <tr>
+ <td>
+ <a href="-/repos/{{- .Name | path_escape -}}/">{{- .Name -}}</a>
+ </td>
+ <td>
+ {{- .Description -}}
+ </td>
+ </tr>
+ {{- end -}}
+ </tbody>
+</table>
+{{- end -}}
+{{- end -}}
diff --git a/forged/templates/_head.tmpl b/forged/templates/_head.tmpl
new file mode 100644
index 0000000..d6d6571
--- /dev/null
+++ b/forged/templates/_head.tmpl
@@ -0,0 +1,9 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "head_common" -}}
+<meta charset="utf-8" />
+<meta name="viewport" content="width=device-width, initial-scale=1" />
+<link rel="stylesheet" href="/-/static/style.css" />
+{{- end -}}
diff --git a/forged/templates/_header.tmpl b/forged/templates/_header.tmpl
new file mode 100644
index 0000000..340a2ac
--- /dev/null
+++ b/forged/templates/_header.tmpl
@@ -0,0 +1,35 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "header" -}}
+<header id="main-header">
+ <div id="main-header-forge-title">
+ <a href="/">{{- .global.forge_title -}}</a>
+ </div>
+ <nav id="breadcrumb-nav">
+ {{- $path := "" -}}
+ {{- $url_segments := .url_segments -}}
+ {{- $dir_mode := .dir_mode -}}
+ {{- $ref_type := .ref_type -}}
+ {{- $ref := .ref_name -}}
+ {{- $separator_index := .separator_index -}}
+ {{- if eq $separator_index -1 -}}
+ {{- $separator_index = len $url_segments -}}
+ {{- end -}}
+ {{- range $i := $separator_index -}}
+ {{- $segment := index $url_segments $i -}}
+ {{- $path = printf "%s/%s" $path $segment -}}
+ <span class="breadcrumb-separator">/</span>
+ <a href="{{ $path }}{{ if or (ne $i (minus (len $url_segments) 1)) $dir_mode }}/{{ end }}{{- if $ref_type -}}?{{- $ref_type -}}={{- $ref -}}{{- end -}}">{{ $segment }}</a>
+ {{- end -}}
+ </nav>
+ <div id="main-header-user">
+ {{- if ne .user_id_string "" -}}
+ <a href="/-/users/{{- .user_id_string -}}">{{- .username -}}</a>
+ {{- else -}}
+ <a href="/-/login/">Login</a>
+ {{- end -}}
+ </div>
+</header>
+{{- end -}}
diff --git a/forged/templates/_ref_query.tmpl b/forged/templates/_ref_query.tmpl
new file mode 100644
index 0000000..2f78955
--- /dev/null
+++ b/forged/templates/_ref_query.tmpl
@@ -0,0 +1,3 @@
+{{- define "ref_query" -}}
+{{- if .ref_type -}}?{{- .ref_type -}}={{- .ref_name -}}{{- end -}}
+{{- end -}}
diff --git a/forged/templates/group.tmpl b/forged/templates/group.tmpl
new file mode 100644
index 0000000..b15c316
--- /dev/null
+++ b/forged/templates/group.tmpl
@@ -0,0 +1,80 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "group" -}}
+{{- $group_path := .group_path -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>{{- range $i, $s := .group_path -}}{{- $s -}}{{- if ne $i (len $group_path) -}}/{{- end -}}{{- end }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="group">
+ {{- template "header" . -}}
+ <div class="padding-wrapper">
+ {{- if .description -}}
+ <p>{{- .description -}}</p>
+ {{- end -}}
+ {{- template "group_view" . -}}
+ </div>
+ {{- if .direct_access -}}
+ <div class="padding-wrapper">
+ <form method="POST" enctype="application/x-www-form-urlencoded">
+ <table>
+ <thead>
+ <tr>
+ <th class="title-row" colspan="2">
+ Create repo
+ </th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th scope="row">Name</th>
+ <td class="tdinput">
+ <input id="repo-name-input" name="repo_name" type="text" />
+ </td>
+ </tr>
+ <tr>
+ <th scope="row">Description</th>
+ <td class="tdinput">
+ <input id="repo-desc-input" name="repo_desc" type="text" />
+ </td>
+ </tr>
+ <tr>
+ <th scope="row">Contrib</th>
+ <td class="tdinput">
+ <select id="repo-contrib-input" name="repo_contrib">
+ <option value="public">Public</option>
+ <option value="ssh_pubkey">SSH public key</option>
+ <option value="federated">Federated service</option>
+ <option value="registered_user">Registered user</option>
+ <option value="closed">Closed</option>
+ </select>
+ </td>
+ </tr>
+ </tbody>
+ <tfoot>
+ <tr>
+ <td class="th-like" colspan="2">
+ <div class="flex-justify">
+ <div class="left">
+ </div>
+ <div class="right">
+ <input class="btn-primary" type="submit" value="Create" />
+ </div>
+ </div>
+ </td>
+ </tr>
+ </tfoot>
+ </table>
+ </form>
+ </div>
+ {{- end -}}
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/index.tmpl b/forged/templates/index.tmpl
new file mode 100644
index 0000000..bd81f04
--- /dev/null
+++ b/forged/templates/index.tmpl
@@ -0,0 +1,63 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "index" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>Index &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="index">
+ {{- template "header" . -}}
+ <div class="padding-wrapper">
+ <table class="wide">
+ <thead>
+ <tr>
+ <th colspan="2" class="title-row">Groups</th>
+ </tr>
+ <tr>
+ <th scope="col">Name</th>
+ <th scope="col">Description</th>
+ </tr>
+ </thead>
+ <tbody>
+ {{- range .groups -}}
+ <tr>
+ <td>
+ <a href="{{- .Name | path_escape -}}/">{{- .Name -}}</a>
+ </td>
+ <td>
+ {{- .Description -}}
+ </td>
+ </tr>
+ {{- end -}}
+ </tbody>
+ </table>
+ <table class="wide">
+ <thead>
+ <tr>
+ <th colspan="2" class="title-row">
+ Info
+ </th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th scope="row">SSH public key</th>
+ <td><code class="breakable">{{- .global.server_public_key_string -}}</code></td>
+ </tr>
+ <tr>
+ <th scope="row">SSH fingerprint</th>
+ <td><code class="breakable">{{- .global.server_public_key_fingerprint -}}</code></td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/login.tmpl b/forged/templates/login.tmpl
new file mode 100644
index 0000000..1e26c82
--- /dev/null
+++ b/forged/templates/login.tmpl
@@ -0,0 +1,59 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "login" -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>Login &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="index">
+ {{- .login_error -}}
+ <div class="padding-wrapper">
+ <form method="POST" enctype="application/x-www-form-urlencoded">
+ <table>
+ <thead>
+ <tr>
+ <th class="title-row" colspan="2">
+ Password authentication
+ </th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th scope="row">Username</th>
+ <td class="tdinput">
+ <input id="usernameinput" name="username" type="text" />
+ </td>
+ </tr>
+ <tr>
+ <th scope="row">Password</th>
+ <td class="tdinput">
+ <input id="passwordinput" name="password" type="password" />
+ </td>
+ </tr>
+ </tbody>
+ <tfoot>
+ <tr>
+ <td class="th-like" colspan="2">
+ <div class="flex-justify">
+ <div class="left">
+ </div>
+ <div class="right">
+ <input class="btn-primary" type="submit" value="Submit" />
+ </div>
+ </div>
+ </td>
+ </tr>
+ </tfoot>
+ </table>
+ </form>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_branches.tmpl b/forged/templates/repo_branches.tmpl
new file mode 100644
index 0000000..55ea0a6
--- /dev/null
+++ b/forged/templates/repo_branches.tmpl
@@ -0,0 +1,71 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_branches" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>{{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-branches">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="../branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding-wrapper">
+ <p>
+ <strong>
+ Warning: Due to various recent migrations, viewing non-HEAD refs may be broken.
+ </strong>
+ </p>
+ <table id="branches">
+ <thead>
+ <tr class="title-row">
+ <th colspan="1">Branches</th>
+ </tr>
+ </thead>
+ <tbody>
+ {{- range .branches -}}
+ <tr>
+ <td>
+ <a href="../?branch={{ . }}">{{ . }}</a>
+ </td>
+ </tr>
+ {{- end -}}
+ </tbody>
+ </table>
+ </div>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_commit.tmpl b/forged/templates/repo_commit.tmpl
new file mode 100644
index 0000000..470bba9
--- /dev/null
+++ b/forged/templates/repo_commit.tmpl
@@ -0,0 +1,117 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_commit" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>Commit {{ .commit_id }} &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-commit">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding-wrapper scroll">
+ <div class="key-val-grid-wrapper">
+ <section id="commit-info" class="key-val-grid">
+ <div class="title-row">Commit info</div>
+ <div class="row-label">ID</div>
+ <div class="row-value">{{- .commit_id -}}</div>
+ <div class="row-label">Author</div>
+ <div class="row-value">
+ <span>{{- .commit_object.Author.Name -}}</span> <span>&lt;<a href="mailto:{{- .commit_object.Author.Email -}}">{{- .commit_object.Author.Email -}}</a>&gt;</span>
+ </div>
+ <div class="row-label">Author date</div>
+ <div class="row-value">{{- .commit_object.Author.When.Format "Mon, 02 Jan 2006 15:04:05 -0700" -}}</div>
+ <div class="row-label">Committer</div>
+ <div class="row-value">
+ <span>{{- .commit_object.Committer.Name -}}</span> <span>&lt;<a href="mailto:{{- .commit_object.Committer.Email -}}">{{- .commit_object.Committer.Email -}}</a>&gt;</span>
+ </div>
+ <div class="row-label">Committer date</div>
+ <div class="row-value">{{- .commit_object.Committer.When.Format "Mon, 02 Jan 2006 15:04:05 -0700" -}}</div>
+ <div class="row-label">Actions</div>
+ <div class="row-value">
+ <a href="{{- .commit_object.Hash -}}.patch">Get patch</a>
+ </div>
+ </section>
+ </div>
+ </div>
+
+ <div class="padding-wrapper scroll" id="this-commit-message">
+ <pre>{{- .commit_object.Message -}}</pre>
+ </div>
+ <div class="padding-wrapper">
+ {{- $parent_commit_hash := .parent_commit_hash -}}
+ {{- $commit_object := .commit_object -}}
+ {{- range .file_patches -}}
+ <div class="file-patch toggle-on-wrapper">
+ <input type="checkbox" id="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-toggle toggle-on-toggle">
+ <label for="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-header toggle-on-header">
+ <div>
+ {{- if eq .From.Path "" -}}
+ --- /dev/null
+ {{- else -}}
+ --- a/<a href="../tree/{{- .From.Path -}}?commit={{- $parent_commit_hash -}}">{{- .From.Path -}}</a> {{ .From.Mode -}}
+ {{- end -}}
+ <br />
+ {{- if eq .To.Path "" -}}
+ +++ /dev/null
+ {{- else -}}
+ +++ b/<a href="../tree/{{- .To.Path -}}?commit={{- $commit_object.Hash -}}">{{- .To.Path -}}</a> {{ .To.Mode -}}
+ {{- end -}}
+ </div>
+ </label>
+ <div class="file-content toggle-on-content scroll">
+ {{- range .Chunks -}}
+ {{- if eq .Operation 0 -}}
+ <pre class="chunk chunk-unchanged">{{ .Content }}</pre>
+ {{- else if eq .Operation 1 -}}
+ <pre class="chunk chunk-addition">{{ .Content }}</pre>
+ {{- else if eq .Operation 2 -}}
+ <pre class="chunk chunk-deletion">{{ .Content }}</pre>
+ {{- else -}}
+ <pre class="chunk chunk-unknown">{{ .Content }}</pre>
+ {{- end -}}
+ {{- end -}}
+ </div>
+ </div>
+ {{- end -}}
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_contrib_index.tmpl b/forged/templates/repo_contrib_index.tmpl
new file mode 100644
index 0000000..172a079
--- /dev/null
+++ b/forged/templates/repo_contrib_index.tmpl
@@ -0,0 +1,82 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_contrib_index" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>Merge requests &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-contrib-index">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="../contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding-wrapper">
+ <h2>How to submit a merge request</h2>
+ <pre>git clone {{ .ssh_clone_url }}
+cd {{ .repo_name }}
+git checkout -b contrib/name_of_your_contribution
+# edit and commit stuff
+git push -u origin HEAD</pre>
+ <p>Pushes that update branches in other namespaces, or pushes to existing contribution branches belonging to other SSH keys, will be automatically
+rejected, unless you are an authenticated maintainer. Otherwise, a merge request is automatically opened, and the maintainers are notified via IRC.</p>
+ <p>Alternatively, you may <a href="https://git-send-email.io">email patches</a> to <a href="mailto:{{ .repo_patch_mailing_list }}">{{ .repo_patch_mailing_list }}</a>.</p>
+ </div>
+ <div class="padding-wrapper">
+ <table id="recent-merge_requests" class="wide">
+ <thead>
+ <tr>
+ <th scope="col">ID</th>
+ <th scope="col">Title</th>
+ <th scope="col">Status</th>
+ </tr>
+ </thead>
+ <tbody>
+ {{- range .merge_requests -}}
+ <tr>
+ <td class="merge_request-id">{{- .ID -}}</td>
+ <td class="merge_request-title"><a href="{{- .ID -}}/">{{- .Title -}}</a></td>
+ <td class="merge_request-status">{{- .Status -}}</td>
+ </tr>
+ {{- end -}}
+ </tbody>
+ </table>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_contrib_one.tmpl b/forged/templates/repo_contrib_one.tmpl
new file mode 100644
index 0000000..a5f35d3
--- /dev/null
+++ b/forged/templates/repo_contrib_one.tmpl
@@ -0,0 +1,123 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_contrib_one" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>Merge requests &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-contrib-one">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="../contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding-wrapper">
+ <table id="mr-info-table">
+ <thead>
+ <tr class="title-row">
+ <th colspan="2">Merge request info</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th scope="row">ID</th>
+ <td>{{- .mr_id -}}</td>
+ </tr>
+ <tr>
+ <th scope="row">Status</th>
+ <td>{{- .mr_status -}}</td>
+ </tr>
+ <tr>
+ <th scope="row">Title</th>
+ <td>{{- .mr_title -}}</td>
+ </tr>
+ <tr>
+ <th scope="row">Source ref</th>
+ <td>{{- .mr_source_ref -}}</td>
+ </tr>
+ <tr>
+ <th scope="row">Destination branch</th>
+ <td>{{- .mr_destination_branch -}}</td>
+ </tr>
+ <tr>
+ <th scope="row">Merge base</th>
+ <td>{{- .merge_base.Hash.String -}}</td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ <div class="padding-wrapper">
+ {{- $merge_base := .merge_base -}}
+ {{- $source_commit := .source_commit -}}
+ {{- range .file_patches -}}
+ <div class="file-patch toggle-on-wrapper">
+ <input type="checkbox" id="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-toggle toggle-on-toggle">
+ <label for="toggle-{{- .From.Hash -}}{{- .To.Hash -}}" class="file-header toggle-on-header">
+ <div>
+ {{- if eq .From.Path "" -}}
+ --- /dev/null
+ {{- else -}}
+ --- a/<a href="../../tree/{{- .From.Path -}}?commit={{- $merge_base.Hash -}}">{{- .From.Path -}}</a> {{ .From.Mode -}}
+ {{- end -}}
+ <br />
+ {{- if eq .To.Path "" -}}
+ +++ /dev/null
+ {{- else -}}
+ +++ b/<a href="../../tree/{{- .To.Path -}}?commit={{- $source_commit.Hash -}}">{{- .To.Path -}}</a> {{ .To.Mode -}}
+ {{- end -}}
+ </div>
+ </label>
+ <div class="file-content toggle-on-content scroll">
+ {{- range .Chunks -}}
+ {{- if eq .Operation 0 -}}
+ <pre class="chunk chunk-unchanged">{{ .Content }}</pre>
+ {{- else if eq .Operation 1 -}}
+ <pre class="chunk chunk-addition">{{ .Content }}</pre>
+ {{- else if eq .Operation 2 -}}
+ <pre class="chunk chunk-deletion">{{ .Content }}</pre>
+ {{- else -}}
+ <pre class="chunk chunk-unknown">{{ .Content }}</pre>
+ {{- end -}}
+ {{- end -}}
+ </div>
+ </div>
+ {{- end -}}
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_index.tmpl b/forged/templates/repo_index.tmpl
new file mode 100644
index 0000000..d040f3a
--- /dev/null
+++ b/forged/templates/repo_index.tmpl
@@ -0,0 +1,94 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_index" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>{{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-index">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link active" href="./{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ {{- if .notes -}}
+ <div id="notes">Notes</div>
+ <ul>
+ {{- range .notes -}}<li>{{- . -}}</li>{{- end -}}
+ </ul>
+ </div>
+ {{- end -}}
+ <p class="readingwidth"><code>{{- .ssh_clone_url -}}</code></p>
+ {{- if .ref_name -}}
+ <p class="readingwidth">
+ <strong>
+ Warning: Due to various recent migrations, viewing non-HEAD refs may be broken.
+ </strong>
+ </p>
+ {{- end -}}
+ {{- if .commits -}}
+ <div class="commit-list-small">
+ {{- range .commits -}}
+ <div class="event">
+ <div>
+ <a href="commit/{{- .Hash -}}" title="{{- .Hash -}}" rel="nofollow">
+ {{- .Hash | printf "%.8s" -}}
+ </a>
+ &nbsp;&mdash;&nbsp;<a href="mailto:{{- .Email -}}">{{- .Author -}}</a>
+ <small class="pull-right">
+ <span title="{{- .Date -}}">{{- .Date -}}</span>
+ </small>
+ </div>
+ <pre class="commit">{{- .Message | first_line -}}</pre>
+ </div>
+ {{- end -}}
+ {{- if dereference_error .commits_err -}}
+ <div class="commit-error">
+ Error while obtaining commit log: {{ .commits_err }}
+ </div>
+ {{- end -}}
+ </div>
+ {{- end -}}
+ {{- if .readme -}}
+ <div class="padding-wrapper" id="readme">
+ {{- .readme -}}
+ </div>
+ {{- end -}}
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_log.tmpl b/forged/templates/repo_log.tmpl
new file mode 100644
index 0000000..2262902
--- /dev/null
+++ b/forged/templates/repo_log.tmpl
@@ -0,0 +1,90 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_log" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>Log &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-log">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="../{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="../log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="../settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="scroll">
+ {{- if .ref_name -}}
+ <p>
+ <strong>
+ Warning: Due to various recent migrations, viewing non-HEAD refs may be broken.
+ </strong>
+ </p>
+ {{- end -}}
+ <table id="commits" class="wide">
+ <thead>
+ <tr class="title-row">
+ <th colspan="4">Commits {{ if .ref_name }} on {{ .ref_name }}{{ end -}}</th>
+ </tr>
+ <tr>
+ <th scope="col">ID</th>
+ <th scope="col">Title</th>
+ <th scope="col">Author</th>
+ <th scope="col">Author date</th>
+ </tr>
+ </thead>
+ <tbody>
+ {{- range .commits -}}
+ <tr>
+ <td class="commit-id"><a href="../commit/{{- .Hash -}}">{{- .Hash -}}</a></td>
+ <td class="commit-title">{{- .Message | first_line -}}</td>
+ <td class="commit-author">
+ <a class="email-name" href="mailto:{{- .Author.Email -}}">{{- .Author.Name -}}</a>
+ </td>
+ <td class="commit-time">
+ {{- .Author.When.Format "2006-01-02 15:04:05 -0700" -}}
+ </td>
+ </tr>
+ {{- end -}}
+ {{- if dereference_error .commits_err -}}
+ Error while obtaining commit log: {{ .commits_err }}
+ {{- end -}}
+ </tbody>
+ </table>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_raw_dir.tmpl b/forged/templates/repo_raw_dir.tmpl
new file mode 100644
index 0000000..a33da4a
--- /dev/null
+++ b/forged/templates/repo_raw_dir.tmpl
@@ -0,0 +1,88 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_raw_dir" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>/{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }} &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-raw-dir">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="{{- .repo_url_root -}}tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding-wrapper scroll">
+ {{- if .ref_name -}}
+ <p>
+ <strong>
+ Warning: Due to various recent migrations, viewing non-HEAD refs may be broken.
+ </strong>
+ </p>
+ {{- end -}}
+ <table id="file-tree" class="wide">
+ <thead>
+ <tr class="title-row">
+ <th colspan="3">
+ (Raw) /{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }}{{ if .ref_name }} on {{ .ref_name }}{{ end -}}
+ </th>
+ </tr>
+ <tr>
+ <th scope="col">Mode</th>
+ <th scope="col">Filename</th>
+ <th scope="col">Size</th>
+ </tr>
+ </thead>
+ <tbody>
+ {{- $path_spec := .path_spec -}}
+ {{- range .files -}}
+ <tr>
+ <td class="file-mode">{{- .Mode -}}</td>
+ <td class="file-name"><a href="{{- .Name -}}{{- if not .IsFile -}}/{{- end -}}{{- template "ref_query" $root -}}">{{- .Name -}}</a>{{- if not .IsFile -}}/{{- end -}}</td>
+ <td class="file-size">{{- .Size -}}</td>
+ </tr>
+ {{- end -}}
+ </tbody>
+ </table>
+ </div>
+ <div class="padding-wrapper">
+ <div id="refs">
+ </div>
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_tree_dir.tmpl b/forged/templates/repo_tree_dir.tmpl
new file mode 100644
index 0000000..fc06646
--- /dev/null
+++ b/forged/templates/repo_tree_dir.tmpl
@@ -0,0 +1,93 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_tree_dir" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <title>/{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }} &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-tree-dir">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="{{- .repo_url_root -}}tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding-wrapper scroll">
+ {{- if .ref_name -}}
+ <p>
+ <strong>
+ Warning: Due to various recent migrations, viewing non-HEAD refs may be broken.
+ </strong>
+ </p>
+ {{- end -}}
+ <table id="file-tree" class="wide">
+ <thead>
+ <tr class="title-row">
+ <th colspan="3">
+ /{{ .path_spec }}{{ if ne .path_spec "" }}/{{ end }}{{ if .ref_name }} on {{ .ref_name }}{{ end -}}
+ </th>
+ <tr>
+ <th scope="col">Mode</th>
+ <th scope="col">Filename</th>
+ <th scope="col">Size</th>
+ </tr>
+ </tr>
+ </thead>
+ <tbody>
+ {{- $path_spec := .path_spec -}}
+ {{- range .files -}}
+ <tr>
+ <td class="file-mode">{{- .Mode -}}</td>
+ <td class="file-name"><a href="{{- .Name -}}{{- if not .IsFile -}}/{{- end -}}{{- template "ref_query" $root -}}">{{- .Name -}}</a>{{- if not .IsFile -}}/{{- end -}}</td>
+ <td class="file-size">{{- .Size -}}</td>
+ </tr>
+ {{- end -}}
+ </tbody>
+ </table>
+ </div>
+ <div class="padding-wrapper">
+ <div id="refs">
+ </div>
+ </div>
+ {{- if .readme -}}
+ <div class="padding-wrapper" id="readme">
+ {{- .readme -}}
+ </div>
+ {{- end -}}
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/forged/templates/repo_tree_file.tmpl b/forged/templates/repo_tree_file.tmpl
new file mode 100644
index 0000000..76404a9
--- /dev/null
+++ b/forged/templates/repo_tree_file.tmpl
@@ -0,0 +1,65 @@
+{{/*
+ SPDX-License-Identifier: AGPL-3.0-only
+ SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+*/}}
+{{- define "repo_tree_file" -}}
+{{- $root := . -}}
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ {{- template "head_common" . -}}
+ <link rel="stylesheet" href="/-/static/chroma.css" />
+ <title>/{{ .path_spec }} &ndash; {{ .repo_name }} &ndash; {{ template "group_path_plain" .group_path }} &ndash; {{ .global.forge_title -}}</title>
+ </head>
+ <body class="repo-tree-file">
+ {{- template "header" . -}}
+ <div class="repo-header">
+ <h2>{{- .repo_name -}}</h2>
+ <ul class="nav-tabs-standalone">
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}{{- template "ref_query" $root -}}">Summary</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link active" href="{{- .repo_url_root -}}tree/{{- template "ref_query" $root -}}">Tree</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}log/{{- template "ref_query" $root -}}">Log</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}branches/">Branches</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}tags/">Tags</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}contrib/">Merge requests</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" href="{{- .repo_url_root -}}settings/">Settings</a>
+ </li>
+ </ul>
+ </div>
+ <div class="repo-header-extension">
+ <div class="repo-header-extension-content">
+ {{- .repo_description -}}
+ </div>
+ </div>
+ <div class="padding">
+ {{- if .ref_name -}}
+ <p>
+ <strong>
+ Warning: Due to various recent migrations, viewing non-HEAD refs may be broken.
+ </strong>
+ </p>
+ {{- end -}}
+ <p>
+ /{{ .path_spec }} (<a href="/{{ template "group_path_plain" .group_path }}/-/repos/{{ .repo_name }}/raw/{{ .path_spec }}{{- template "ref_query" $root -}}">raw</a>)
+ </p>
+ {{- .file_contents -}}
+ </div>
+ <footer>
+ {{- template "footer" . -}}
+ </footer>
+ </body>
+</html>
+{{- end -}}
diff --git a/git2d/.gitignore b/git2d/.gitignore
new file mode 100644
index 0000000..635d84d
--- /dev/null
+++ b/git2d/.gitignore
@@ -0,0 +1 @@
+/git2d
diff --git a/git2d/bare.c b/git2d/bare.c
new file mode 100644
index 0000000..b580980
--- /dev/null
+++ b/git2d/bare.c
@@ -0,0 +1,309 @@
+/*-
+ * SPDX-License-Identifier: MIT
+ * SPDX-FileCopyrightText: Copyright (c) 2022 Frank Smit <https://61924.nl/>
+ */
+
+#include <string.h>
+#include <stdbool.h>
+
+#include "bare.h"
+
+#define UNUSED(x) (void)(x)
+
+enum {
+ U8SZ = 1,
+ U16SZ = 2,
+ U32SZ = 4,
+ U64SZ = 8,
+ MAXVARINTSZ = 10,
+};
+
+bare_error bare_put_uint(struct bare_writer *ctx, uint64_t x)
+{
+ uint64_t i = 0;
+ uint8_t b[MAXVARINTSZ];
+
+ while (x >= 0x80) {
+ b[i] = (uint8_t) x | 0x80;
+ x >>= 7;
+ i++;
+ }
+
+ b[i] = (uint8_t) x;
+ i++;
+
+ return ctx->write(ctx->buffer, b, i);
+}
+
+bare_error bare_get_uint(struct bare_reader *ctx, uint64_t *x)
+{
+ bare_error err = BARE_ERROR_NONE;
+
+ uint8_t shift = 0;
+ uint64_t result = 0;
+
+ for (uint8_t i = 0; i < 10; i++) {
+ uint8_t b;
+
+ err = ctx->read(ctx->buffer, &b, U8SZ);
+ if (err != BARE_ERROR_NONE) {
+ break;
+ }
+
+ if (b < 0x80) {
+ result |= (uint64_t) b << shift;
+ break;
+ } else {
+ result |= ((uint64_t) b & 0x7f) << shift;
+ shift += 7;
+ }
+ }
+
+ *x = result;
+
+ return err;
+}
+
+bare_error bare_put_int(struct bare_writer *ctx, int64_t x)
+{
+ uint64_t ux = (uint64_t) x << 1;
+
+ if (x < 0) {
+ ux = ~ux;
+ }
+
+ return bare_put_uint(ctx, ux);
+}
+
+bare_error bare_get_int(struct bare_reader *ctx, int64_t *x)
+{
+ uint64_t ux;
+
+ bare_error err = bare_get_uint(ctx, &ux);
+
+ if (err == BARE_ERROR_NONE) {
+ *x = (int64_t) (ux >> 1);
+
+ if ((ux & 1) != 0) {
+ *x = ~(*x);
+ }
+ }
+
+ return err;
+}
+
+bare_error bare_put_u8(struct bare_writer *ctx, uint8_t x)
+{
+ return ctx->write(ctx->buffer, &x, U8SZ);
+}
+
+bare_error bare_get_u8(struct bare_reader *ctx, uint8_t *x)
+{
+ return ctx->read(ctx->buffer, x, U8SZ);
+}
+
+bare_error bare_put_u16(struct bare_writer *ctx, uint16_t x)
+{
+ return ctx->write(ctx->buffer, (uint8_t[U16SZ]) {
+ x, x >> 8}
+ , U16SZ);
+}
+
+bare_error bare_get_u16(struct bare_reader *ctx, uint16_t *x)
+{
+ bare_error err = ctx->read(ctx->buffer, x, U16SZ);
+
+ if (err == BARE_ERROR_NONE) {
+ *x = (uint16_t) ((uint8_t *) x)[0]
+ | (uint16_t) ((uint8_t *) x)[1] << 8;
+ }
+
+ return err;
+}
+
+bare_error bare_put_u32(struct bare_writer *ctx, uint32_t x)
+{
+ uint8_t buf[U32SZ];
+
+ buf[0] = (uint8_t) (x);
+ buf[1] = (uint8_t) (x >> 8);
+ buf[2] = (uint8_t) (x >> 16);
+ buf[3] = (uint8_t) (x >> 24);
+
+ return ctx->write(ctx->buffer, buf, U32SZ);
+}
+
+bare_error bare_get_u32(struct bare_reader *ctx, uint32_t *x)
+{
+ bare_error err = ctx->read(ctx->buffer, x, U32SZ);
+
+ if (err == BARE_ERROR_NONE) {
+ *x = (uint32_t) (((uint8_t *) x)[0])
+ | (uint32_t) (((uint8_t *) x)[1] << 8)
+ | (uint32_t) (((uint8_t *) x)[2] << 16)
+ | (uint32_t) (((uint8_t *) x)[3] << 24);
+ }
+
+ return err;
+}
+
+bare_error bare_put_u64(struct bare_writer *ctx, uint64_t x)
+{
+ uint8_t buf[U64SZ];
+
+ buf[0] = x;
+ buf[1] = x >> 8;
+ buf[2] = x >> 16;
+ buf[3] = x >> 24;
+ buf[4] = x >> 32;
+ buf[5] = x >> 40;
+ buf[6] = x >> 48;
+ buf[7] = x >> 56;
+
+ return ctx->write(ctx->buffer, buf, U64SZ);
+}
+
+bare_error bare_get_u64(struct bare_reader *ctx, uint64_t *x)
+{
+ bare_error err = ctx->read(ctx->buffer, x, U64SZ);
+
+ if (err == BARE_ERROR_NONE) {
+ *x = (uint64_t) ((uint8_t *) x)[0]
+ | (uint64_t) ((uint8_t *) x)[1] << 8
+ | (uint64_t) ((uint8_t *) x)[2] << 16
+ | (uint64_t) ((uint8_t *) x)[3] << 24
+ | (uint64_t) ((uint8_t *) x)[4] << 32
+ | (uint64_t) ((uint8_t *) x)[5] << 40
+ | (uint64_t) ((uint8_t *) x)[6] << 48
+ | (uint64_t) ((uint8_t *) x)[7] << 56;
+ }
+
+ return err;
+}
+
+bare_error bare_put_i8(struct bare_writer *ctx, int8_t x)
+{
+ return bare_put_u8(ctx, x);
+}
+
+bare_error bare_get_i8(struct bare_reader *ctx, int8_t *x)
+{
+ return bare_get_u8(ctx, (uint8_t *) x);
+}
+
+bare_error bare_put_i16(struct bare_writer *ctx, int16_t x)
+{
+ return bare_put_u16(ctx, x);
+}
+
+bare_error bare_get_i16(struct bare_reader *ctx, int16_t *x)
+{
+ return bare_get_u16(ctx, (uint16_t *) x);
+}
+
+bare_error bare_put_i32(struct bare_writer *ctx, int32_t x)
+{
+ return bare_put_u32(ctx, x);
+}
+
+bare_error bare_get_i32(struct bare_reader *ctx, int32_t *x)
+{
+ return bare_get_u32(ctx, (uint32_t *) x);
+}
+
+bare_error bare_put_i64(struct bare_writer *ctx, int64_t x)
+{
+ return bare_put_u64(ctx, x);
+}
+
+bare_error bare_get_i64(struct bare_reader *ctx, int64_t *x)
+{
+ return bare_get_u64(ctx, (uint64_t *) x);
+}
+
+bare_error bare_put_f32(struct bare_writer *ctx, float x)
+{
+ uint32_t b;
+ memcpy(&b, &x, U32SZ);
+
+ return bare_put_u32(ctx, b);
+}
+
+bare_error bare_get_f32(struct bare_reader *ctx, float *x)
+{
+ return ctx->read(ctx->buffer, x, U32SZ);
+}
+
+bare_error bare_put_f64(struct bare_writer *ctx, double x)
+{
+ uint64_t b;
+ memcpy(&b, &x, U64SZ);
+
+ return bare_put_u64(ctx, b);
+}
+
+bare_error bare_get_f64(struct bare_reader *ctx, double *x)
+{
+ return ctx->read(ctx->buffer, x, U64SZ);
+}
+
+bare_error bare_put_bool(struct bare_writer *ctx, bool x)
+{
+ return bare_put_u8(ctx, (uint8_t) x);
+}
+
+bare_error bare_get_bool(struct bare_reader *ctx, bool *x)
+{
+ return bare_get_u8(ctx, (uint8_t *) x);
+}
+
+bare_error
+bare_put_fixed_data(struct bare_writer *ctx, const uint8_t *src, uint64_t sz)
+{
+ return ctx->write(ctx->buffer, (void *)src, sz);
+}
+
+bare_error
+bare_get_fixed_data(struct bare_reader *ctx, uint8_t *dst, uint64_t sz)
+{
+ return ctx->read(ctx->buffer, dst, sz);
+}
+
+bare_error
+bare_put_data(struct bare_writer *ctx, const uint8_t *src, uint64_t sz)
+{
+ bare_error err = BARE_ERROR_NONE;
+
+ err = bare_put_uint(ctx, sz);
+
+ if (err == BARE_ERROR_NONE) {
+ err = bare_put_fixed_data(ctx, src, sz);
+ }
+
+ return err;
+}
+
+bare_error bare_get_data(struct bare_reader *ctx, uint8_t *dst, uint64_t sz)
+{
+ bare_error err = BARE_ERROR_NONE;
+ uint64_t ssz = 0;
+
+ err = bare_get_uint(ctx, &ssz);
+
+ if (err == BARE_ERROR_NONE) {
+ err = ssz <= sz ? bare_get_fixed_data(ctx, dst, ssz)
+ : BARE_ERROR_BUFFER_TOO_SMALL;
+ }
+
+ return err;
+}
+
+bare_error bare_put_str(struct bare_writer *ctx, const char *src, uint64_t sz)
+{
+ return bare_put_data(ctx, (uint8_t *) src, sz);
+}
+
+bare_error bare_get_str(struct bare_reader *ctx, char *dst, uint64_t sz)
+{
+ return bare_get_data(ctx, (uint8_t *) dst, sz);
+}
diff --git a/git2d/bare.h b/git2d/bare.h
new file mode 100644
index 0000000..e813464
--- /dev/null
+++ b/git2d/bare.h
@@ -0,0 +1,72 @@
+/*-
+ * SPDX-License-Identifier: MIT
+ * SPDX-FileCopyrightText: Copyright (c) 2022 Frank Smit <https://61924.nl/>
+ */
+
+#ifndef BARE_H
+#define BARE_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+typedef enum {
+ BARE_ERROR_NONE,
+ BARE_ERROR_WRITE_FAILED,
+ BARE_ERROR_READ_FAILED,
+ BARE_ERROR_BUFFER_TOO_SMALL,
+ BARE_ERROR_INVALID_UTF8,
+} bare_error;
+
+typedef bare_error (*bare_write_func)(void *buffer, const void *src, uint64_t sz);
+typedef bare_error (*bare_read_func)(void *buffer, void *dst, uint64_t sz);
+
+struct bare_writer {
+ void *buffer;
+ bare_write_func write;
+};
+
+struct bare_reader {
+ void *buffer;
+ bare_read_func read;
+};
+
+bare_error bare_put_uint(struct bare_writer *ctx, uint64_t x); /* varuint */
+bare_error bare_get_uint(struct bare_reader *ctx, uint64_t *x); /* varuint */
+bare_error bare_put_u8(struct bare_writer *ctx, uint8_t x);
+bare_error bare_get_u8(struct bare_reader *ctx, uint8_t *x);
+bare_error bare_put_u16(struct bare_writer *ctx, uint16_t x);
+bare_error bare_get_u16(struct bare_reader *ctx, uint16_t *x);
+bare_error bare_put_u32(struct bare_writer *ctx, uint32_t x);
+bare_error bare_get_u32(struct bare_reader *ctx, uint32_t *x);
+bare_error bare_put_u64(struct bare_writer *ctx, uint64_t x);
+bare_error bare_get_u64(struct bare_reader *ctx, uint64_t *x);
+
+bare_error bare_put_int(struct bare_writer *ctx, int64_t x); /* varint */
+bare_error bare_get_int(struct bare_reader *ctx, int64_t *x); /* varint */
+bare_error bare_put_i8(struct bare_writer *ctx, int8_t x);
+bare_error bare_get_i8(struct bare_reader *ctx, int8_t *x);
+bare_error bare_put_i16(struct bare_writer *ctx, int16_t x);
+bare_error bare_get_i16(struct bare_reader *ctx, int16_t *x);
+bare_error bare_put_i32(struct bare_writer *ctx, int32_t x);
+bare_error bare_get_i32(struct bare_reader *ctx, int32_t *x);
+bare_error bare_put_i64(struct bare_writer *ctx, int64_t x);
+bare_error bare_get_i64(struct bare_reader *ctx, int64_t *x);
+
+bare_error bare_put_f32(struct bare_writer *ctx, float x);
+bare_error bare_get_f32(struct bare_reader *ctx, float *x);
+bare_error bare_put_f64(struct bare_writer *ctx, double x);
+bare_error bare_get_f64(struct bare_reader *ctx, double *x);
+
+bare_error bare_put_bool(struct bare_writer *ctx, bool x);
+bare_error bare_get_bool(struct bare_reader *ctx, bool *x);
+
+bare_error bare_put_fixed_data(struct bare_writer *ctx, const uint8_t *src, uint64_t sz);
+bare_error bare_get_fixed_data(struct bare_reader *ctx, uint8_t *dst, uint64_t sz);
+bare_error bare_put_data(struct bare_writer *ctx, const uint8_t *src, uint64_t sz);
+bare_error bare_get_data(struct bare_reader *ctx, uint8_t *dst, uint64_t sz);
+bare_error bare_put_str(struct bare_writer *ctx, const char *src, uint64_t sz);
+bare_error bare_get_str(struct bare_reader *ctx, char *dst, uint64_t sz);
+
+/* Note that the _str implementation here does not check for UTF-8 validity. */
+
+#endif /* BARE_H */
diff --git a/git2d/cmd1.c b/git2d/cmd1.c
new file mode 100644
index 0000000..a7d8b07
--- /dev/null
+++ b/git2d/cmd1.c
@@ -0,0 +1,129 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ */
+
+#include "x.h"
+
+int cmd_index(git_repository *repo, struct bare_writer *writer)
+{
+ /* HEAD tree */
+
+ git_object *obj = NULL;
+ int err = git_revparse_single(&obj, repo, "HEAD^{tree}");
+ if (err != 0) {
+ bare_put_uint(writer, 4);
+ return -1;
+ }
+ git_tree *tree = (git_tree *) obj;
+
+ /* README */
+
+ git_tree_entry *entry = NULL;
+ err = git_tree_entry_bypath(&entry, tree, "README.md");
+ if (err != 0) {
+ bare_put_uint(writer, 5);
+ git_tree_free(tree);
+ return -1;
+ }
+ git_otype objtype = git_tree_entry_type(entry);
+ if (objtype != GIT_OBJECT_BLOB) {
+ bare_put_uint(writer, 6);
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+ return -1;
+ }
+ git_object *obj2 = NULL;
+ err = git_tree_entry_to_object(&obj2, repo, entry);
+ if (err != 0) {
+ bare_put_uint(writer, 7);
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+ return -1;
+ }
+ git_blob *blob = (git_blob *) obj2;
+ const void *content = git_blob_rawcontent(blob);
+ if (content == NULL) {
+ bare_put_uint(writer, 8);
+ git_blob_free(blob);
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+ return -1;
+ }
+ bare_put_uint(writer, 0);
+ bare_put_data(writer, content, git_blob_rawsize(blob));
+
+ /* Commits */
+
+ /* TODO BUG: This might be a different commit from the displayed README due to races */
+
+ git_revwalk *walker = NULL;
+ if (git_revwalk_new(&walker, repo) != 0) {
+ bare_put_uint(writer, 9);
+ git_blob_free(blob);
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+ return -1;
+ }
+
+ if (git_revwalk_push_head(walker) != 0) {
+ bare_put_uint(writer, 9);
+ git_revwalk_free(walker);
+ git_blob_free(blob);
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+ return -1;
+ }
+
+ int count = 0;
+ git_oid oid;
+ while (count < 3 && git_revwalk_next(&oid, walker) == 0) {
+ git_commit *commit = NULL;
+ if (git_commit_lookup(&commit, repo, &oid) != 0)
+ break;
+
+ const char *msg = git_commit_summary(commit);
+ const git_signature *author = git_commit_author(commit);
+
+ /* ID */
+ bare_put_data(writer, oid.id, GIT_OID_RAWSZ);
+
+ /* Title */
+ size_t msg_len = msg ? strlen(msg) : 0;
+ bare_put_data(writer, (const uint8_t *)(msg ? msg : ""),
+ msg_len);
+
+ /* Author's name */
+ const char *author_name = author ? author->name : "";
+ bare_put_data(writer, (const uint8_t *)author_name,
+ strlen(author_name));
+
+ /* Author's email */
+ const char *author_email = author ? author->email : "";
+ bare_put_data(writer, (const uint8_t *)author_email,
+ strlen(author_email));
+
+ /* Author's date */
+ /* TODO: Pass the integer instead of a string */
+ time_t time = git_commit_time(commit);
+ char timebuf[64];
+ struct tm *tm = localtime(&time);
+ if (tm)
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%d %H:%M:%S",
+ tm);
+ else
+ strcpy(timebuf, "unknown");
+ bare_put_data(writer, (const uint8_t *)timebuf,
+ strlen(timebuf));
+
+ git_commit_free(commit);
+ count++;
+ }
+
+ git_revwalk_free(walker);
+ git_blob_free(blob);
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+
+ return 0;
+}
diff --git a/git2d/cmd2.c b/git2d/cmd2.c
new file mode 100644
index 0000000..dd72ddb
--- /dev/null
+++ b/git2d/cmd2.c
@@ -0,0 +1,126 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ */
+
+#include "x.h"
+
+int
+cmd_treeraw(git_repository *repo, struct bare_reader *reader,
+ struct bare_writer *writer)
+{
+ /* Path */
+ char path[4096] = { 0 };
+ int err = bare_get_data(reader, (uint8_t *) path, sizeof(path) - 1);
+ if (err != BARE_ERROR_NONE) {
+ bare_put_uint(writer, 11);
+ return -1;
+ }
+ path[sizeof(path) - 1] = '\0';
+
+ /* HEAD^{tree} */
+ git_object *head_obj = NULL;
+ err = git_revparse_single(&head_obj, repo, "HEAD^{tree}");
+ if (err != 0) {
+ bare_put_uint(writer, 4);
+ return -1;
+ }
+ git_tree *tree = (git_tree *) head_obj;
+
+ /* Path in tree */
+ git_tree_entry *entry = NULL;
+ git_otype objtype;
+ if (strlen(path) == 0) {
+ entry = NULL;
+ objtype = GIT_OBJECT_TREE;
+ } else {
+ err = git_tree_entry_bypath(&entry, tree, path);
+ if (err != 0) {
+ bare_put_uint(writer, 3);
+ git_tree_free(tree);
+ return 0;
+ }
+ objtype = git_tree_entry_type(entry);
+ }
+
+ if (objtype == GIT_OBJECT_TREE) {
+ /* Tree */
+ git_object *tree_obj = NULL;
+ if (entry == NULL) {
+ tree_obj = (git_object *) tree;
+ } else {
+ err = git_tree_entry_to_object(&tree_obj, repo, entry);
+ if (err != 0) {
+ bare_put_uint(writer, 7);
+ goto cleanup;
+ }
+ }
+ git_tree *subtree = (git_tree *) tree_obj;
+
+ size_t count = git_tree_entrycount(subtree);
+ bare_put_uint(writer, 0);
+ bare_put_uint(writer, 1);
+ bare_put_uint(writer, count);
+ for (size_t i = 0; i < count; i++) {
+ const git_tree_entry *subentry =
+ git_tree_entry_byindex(subtree, i);
+ const char *name = git_tree_entry_name(subentry);
+ git_otype type = git_tree_entry_type(subentry);
+ uint32_t mode = git_tree_entry_filemode(subentry);
+
+ uint8_t entry_type = 0;
+ uint64_t size = 0;
+
+ if (type == GIT_OBJECT_TREE) {
+ entry_type = 1;
+ } else if (type == GIT_OBJECT_BLOB) {
+ entry_type = 2;
+
+ git_object *subobj = NULL;
+ if (git_tree_entry_to_object
+ (&subobj, repo, subentry) == 0) {
+ git_blob *b = (git_blob *) subobj;
+ size = git_blob_rawsize(b);
+ git_blob_free(b);
+ }
+ }
+
+ bare_put_uint(writer, entry_type);
+ bare_put_uint(writer, mode);
+ bare_put_uint(writer, size);
+ bare_put_data(writer, (const uint8_t *)name,
+ strlen(name));
+ }
+ if (entry != NULL) {
+ git_tree_free(subtree);
+ }
+ } else if (objtype == GIT_OBJECT_BLOB) {
+ /* Blob */
+ git_object *blob_obj = NULL;
+ err = git_tree_entry_to_object(&blob_obj, repo, entry);
+ if (err != 0) {
+ bare_put_uint(writer, 7);
+ goto cleanup;
+ }
+ git_blob *blob = (git_blob *) blob_obj;
+ const void *content = git_blob_rawcontent(blob);
+ if (content == NULL) {
+ bare_put_uint(writer, 8);
+ git_blob_free(blob);
+ goto cleanup;
+ }
+ bare_put_uint(writer, 0);
+ bare_put_uint(writer, 2);
+ bare_put_data(writer, content, git_blob_rawsize(blob));
+ git_blob_free(blob);
+ } else {
+ /* Unknown */
+ bare_put_uint(writer, -1);
+ }
+
+ cleanup:
+ if (entry != NULL)
+ git_tree_entry_free(entry);
+ git_tree_free(tree);
+ return 0;
+}
diff --git a/git2d/main.c b/git2d/main.c
new file mode 100644
index 0000000..9140c1d
--- /dev/null
+++ b/git2d/main.c
@@ -0,0 +1,82 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ */
+
+/*
+ * TODO: Pool repositories (and take care of thread safety)
+ * libgit2 has a nice builtin per-repo cache that we could utilize this way.
+ */
+
+#include "x.h"
+
+int main(int argc, char **argv)
+{
+ if (argc != 2) {
+ errx(1, "provide one argument: the socket path");
+ }
+
+ signal(SIGPIPE, SIG_IGN);
+
+ git_libgit2_init();
+
+ int sock;
+ if ((sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0)) < 0)
+ err(1, "socket");
+
+ struct sockaddr_un addr;
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strcpy(addr.sun_path, argv[1]);
+
+ umask(0077);
+
+ if (bind(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_un))) {
+ if (errno == EADDRINUSE) {
+ unlink(argv[1]);
+ if (bind
+ (sock, (struct sockaddr *)&addr,
+ sizeof(struct sockaddr_un)))
+ err(1, "bind");
+ } else {
+ err(1, "bind");
+ }
+ }
+
+ listen(sock, 128);
+
+ pthread_attr_t pthread_attr;
+
+ if (pthread_attr_init(&pthread_attr) != 0)
+ err(1, "pthread_attr_init");
+
+ if (pthread_attr_setdetachstate(&pthread_attr, PTHREAD_CREATE_DETACHED) != 0)
+ err(1, "pthread_attr_setdetachstate");
+
+ for (;;) {
+ int *conn = malloc(sizeof(int));
+ if (conn == NULL) {
+ warn("malloc");
+ continue;
+ }
+
+ *conn = accept(sock, 0, 0);
+ if (*conn == -1) {
+ free(conn);
+ warn("accept");
+ continue;
+ }
+
+ pthread_t thread;
+
+ if (pthread_create (&thread, &pthread_attr, session, (void *)conn) != 0) {
+ close(*conn);
+ free(conn);
+ warn("pthread_create");
+ }
+ }
+
+ close(sock);
+
+ git_libgit2_shutdown();
+}
diff --git a/git2d/rw.c b/git2d/rw.c
new file mode 100644
index 0000000..09398c2
--- /dev/null
+++ b/git2d/rw.c
@@ -0,0 +1,34 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ */
+
+#include "x.h"
+
+bare_error conn_read(void *buffer, void *dst, uint64_t sz)
+{
+ conn_io_t *io = buffer;
+ ssize_t rsz = read(io->fd, dst, sz);
+ return (rsz == (ssize_t) sz) ? BARE_ERROR_NONE : BARE_ERROR_READ_FAILED;
+}
+
+bare_error conn_write(void *buffer, const void *src, uint64_t sz)
+{
+ conn_io_t *io = buffer;
+ const uint8_t *data = src;
+ uint64_t total = 0;
+
+ while (total < sz) {
+ ssize_t written = write(io->fd, data + total, sz - total);
+ if (written < 0) {
+ if (errno == EINTR)
+ continue;
+ return BARE_ERROR_WRITE_FAILED;
+ }
+ if (written == 0)
+ break;
+ total += written;
+ }
+
+ return (total == sz) ? BARE_ERROR_NONE : BARE_ERROR_WRITE_FAILED;
+}
diff --git a/git2d/session.c b/git2d/session.c
new file mode 100644
index 0000000..0a945ee
--- /dev/null
+++ b/git2d/session.c
@@ -0,0 +1,78 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ */
+
+#include "x.h"
+
+void *session(void *_conn)
+{
+ int conn = *(int *)_conn;
+ free((int *)_conn);
+
+ int err;
+
+ conn_io_t io = {.fd = conn };
+ struct bare_reader reader = {
+ .buffer = &io,
+ .read = conn_read,
+ };
+ struct bare_writer writer = {
+ .buffer = &io,
+ .write = conn_write,
+ };
+
+ /* Repo path */
+ char path[4096] = { 0 };
+ err = bare_get_data(&reader, (uint8_t *) path, sizeof(path) - 1);
+ if (err != BARE_ERROR_NONE) {
+ goto close;
+ }
+ path[sizeof(path) - 1] = '\0';
+
+ /* Open repo */
+ git_repository *repo = NULL;
+ err =
+ git_repository_open_ext(&repo, path,
+ GIT_REPOSITORY_OPEN_NO_SEARCH |
+ GIT_REPOSITORY_OPEN_BARE |
+ GIT_REPOSITORY_OPEN_NO_DOTGIT, NULL);
+ if (err != 0) {
+ bare_put_uint(&writer, 1);
+ goto close;
+ }
+
+ /* Command */
+ uint64_t cmd = 0;
+ err = bare_get_uint(&reader, &cmd);
+ if (err != BARE_ERROR_NONE) {
+ bare_put_uint(&writer, 2);
+ goto free_repo;
+ }
+ switch (cmd) {
+ case 1:
+ err = cmd_index(repo, &writer);
+ if (err != 0)
+ goto free_repo;
+ break;
+ case 2:
+ err = cmd_treeraw(repo, &reader, &writer);
+ if (err != 0)
+ goto free_repo;
+ break;
+ case 0:
+ bare_put_uint(&writer, 3);
+ goto free_repo;
+ default:
+ bare_put_uint(&writer, 3);
+ goto free_repo;
+ }
+
+ free_repo:
+ git_repository_free(repo);
+
+ close:
+ close(conn);
+
+ return NULL;
+}
diff --git a/git2d/x.h b/git2d/x.h
new file mode 100644
index 0000000..a6da50f
--- /dev/null
+++ b/git2d/x.h
@@ -0,0 +1,38 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ */
+
+#ifndef X_H
+#define X_H
+
+#include <err.h>
+#include <errno.h>
+#include <git2.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "bare.h"
+
+typedef struct {
+ int fd;
+} conn_io_t;
+
+
+bare_error conn_read(void *buffer, void *dst, uint64_t sz);
+bare_error conn_write(void *buffer, const void *src, uint64_t sz);
+
+void * session(void *_conn);
+
+int cmd_index(git_repository *repo, struct bare_writer *writer);
+int cmd_treeraw(git_repository *repo, struct bare_reader *reader, struct bare_writer *writer);
+
+#endif // X_H
diff --git a/global.ha b/global.ha
deleted file mode 100644
index ac5ac14..0000000
--- a/global.ha
+++ /dev/null
@@ -1,11 +0,0 @@
-let global: struct {
- title: str,
- version: str,
- ssh_pubkey: str,
- ssh_fp: str,
-} = struct {
- title: str = "Test Forge",
- version: str = VERSION,
- ssh_pubkey: str = "pubkey",
- ssh_fp: str = "fp",
-};
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..6a309f0
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,50 @@
+module go.lindenii.runxiyu.org/forge
+
+go 1.24.1
+
+require (
+ github.com/alecthomas/chroma/v2 v2.18.0
+ github.com/bluekeyes/go-gitdiff v0.8.1
+ github.com/emersion/go-message v0.18.2
+ github.com/emersion/go-smtp v0.22.0
+ github.com/gliderlabs/ssh v0.3.8
+ github.com/go-git/go-git/v5 v5.16.1
+ github.com/jackc/pgx/v5 v5.7.5
+ github.com/microcosm-cc/bluemonday v1.0.27
+ github.com/tdewolff/minify/v2 v2.23.8
+ github.com/yuin/goldmark v1.7.12
+ golang.org/x/crypto v0.39.0
+)
+
+require (
+ dario.cat/mergo v1.0.2 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
+ github.com/ProtonMail/go-crypto v1.3.0 // indirect
+ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
+ github.com/aymerick/douceur v0.2.0 // indirect
+ github.com/cloudflare/circl v1.6.1 // indirect
+ github.com/cyphar/filepath-securejoin v0.4.1 // indirect
+ github.com/dlclark/regexp2 v1.11.5 // indirect
+ github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
+ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
+ github.com/go-git/go-billy/v5 v5.6.2 // indirect
+ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
+ github.com/gorilla/css v1.0.1 // indirect
+ github.com/jackc/pgpassfile v1.0.0 // indirect
+ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
+ github.com/jackc/puddle/v2 v2.2.2 // indirect
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/pjbgf/sha1cd v0.3.2 // indirect
+ github.com/sergi/go-diff v1.4.0 // indirect
+ github.com/skeema/knownhosts v1.3.1 // indirect
+ github.com/tdewolff/parse/v2 v2.8.1 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
+ golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
+ golang.org/x/net v0.41.0 // indirect
+ golang.org/x/sync v0.15.0 // indirect
+ golang.org/x/sys v0.33.0 // indirect
+ golang.org/x/text v0.26.0 // indirect
+ gopkg.in/warnings.v0 v0.1.2 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..b21ede0
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,186 @@
+dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
+dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
+github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
+github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
+github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
+github.com/alecthomas/chroma/v2 v2.18.0 h1:6h53Q4hW83SuF+jcsp7CVhLsMozzvQvO8HBbKQW+gn4=
+github.com/alecthomas/chroma/v2 v2.18.0/go.mod h1:RVX6AvYm4VfYe/zsk7mjHueLDZor3aWCNE14TFlepBk=
+github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
+github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
+github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
+github.com/bluekeyes/go-gitdiff v0.8.1 h1:lL1GofKMywO17c0lgQmJYcKek5+s8X6tXVNOLxy4smI=
+github.com/bluekeyes/go-gitdiff v0.8.1/go.mod h1:WWAk1Mc6EgWarCrPFO+xeYlujPu98VuLW3Tu+B/85AE=
+github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
+github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
+github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
+github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
+github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
+github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
+github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg=
+github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA=
+github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 h1:oP4q0fw+fOSWn3DfFi4EXdT+B+gTtzx8GC9xsc26Znk=
+github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6/go.mod h1:iL2twTeMvZnrg54ZoPDNfJaJaqy0xIQFuBdrLsmspwQ=
+github.com/emersion/go-smtp v0.22.0 h1:/d3HWxkZZ4riB+0kzfoODh9X+xyCrLEezMnAAa1LEMU=
+github.com/emersion/go-smtp v0.22.0/go.mod h1:ZtRRkbTyp2XTHCA+BmyTFTrj8xY4I+b4McvHxCU2gsQ=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
+github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
+github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
+github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
+github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
+github.com/go-git/go-git/v5 v5.16.0 h1:k3kuOEpkc0DeY7xlL6NaaNg39xdgQbtH5mwCafHO9AQ=
+github.com/go-git/go-git/v5 v5.16.0/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
+github.com/go-git/go-git/v5 v5.16.1 h1:TuxMBWNL7R05tXsUGi0kh1vi4tq0WfXNLlIrAkXG1k8=
+github.com/go-git/go-git/v5 v5.16.1/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
+github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs=
+github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
+github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
+github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
+github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
+github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
+github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
+github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
+github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
+github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
+github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tdewolff/minify/v2 v2.23.8 h1:tvjHzRer46kwOfpdCBCWsDblCw3QtnLJRd61pTVkyZ8=
+github.com/tdewolff/minify/v2 v2.23.8/go.mod h1:VW3ISUd3gDOZuQ/jwZr4sCzsuX+Qvsx87FDMjk6Rvno=
+github.com/tdewolff/parse/v2 v2.8.1 h1:J5GSHru6o3jF1uLlEKVXkDxxcVx6yzOlIVIotK4w2po=
+github.com/tdewolff/parse/v2 v2.8.1/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo=
+github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE=
+github.com/tdewolff/test v1.0.11/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/goldmark v1.7.12 h1:YwGP/rrea2/CnCtUHgjuolG/PnMxdQtPMO5PvaE2/nY=
+github.com/yuin/goldmark v1.7.12/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
+golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
+golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
+golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
+golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
+golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/hookc/.gitignore b/hookc/.gitignore
new file mode 100644
index 0000000..7348daa
--- /dev/null
+++ b/hookc/.gitignore
@@ -0,0 +1 @@
+/hookc
diff --git a/hookc/hookc.c b/hookc/hookc.c
new file mode 100644
index 0000000..15a36e3
--- /dev/null
+++ b/hookc/hookc.c
@@ -0,0 +1,310 @@
+/*-
+ * SPDX-License-Identifier: AGPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+ * SPDX-FileCopyrightText: Copyright (c) 2025 Test_User <hax@runxiyu.org>
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#include <string.h>
+#include <fcntl.h>
+#include <signal.h>
+#ifdef __linux__
+#include <linux/limits.h>
+#include <sys/sendfile.h>
+#define USE_SPLICE 1
+#else
+#define USE_SPLICE 0
+#endif
+
+int
+main(int argc, char *argv[])
+{
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) {
+ perror("signal");
+ return EXIT_FAILURE;
+ }
+
+ const char *socket_path = getenv("LINDENII_FORGE_HOOKS_SOCKET_PATH");
+ if (socket_path == NULL) {
+ dprintf(STDERR_FILENO, "environment variable LINDENII_FORGE_HOOKS_SOCKET_PATH undefined\n");
+ return EXIT_FAILURE;
+ }
+
+ const char *cookie = getenv("LINDENII_FORGE_HOOKS_COOKIE");
+ if (cookie == NULL) {
+ dprintf(STDERR_FILENO, "environment variable LINDENII_FORGE_HOOKS_COOKIE undefined\n");
+ return EXIT_FAILURE;
+ }
+ if (strlen(cookie) != 64) {
+ dprintf(STDERR_FILENO, "environment variable LINDENII_FORGE_HOOKS_COOKIE is not 64 characters long\n");
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * All hooks in git (see builtin/receive-pack.c) use a pipe by
+ * setting .in = -1 on the child_process struct, which enables us to
+ * use splice(2) to move the data to the UNIX domain socket.
+ */
+
+ struct stat stdin_stat;
+ if (fstat(STDIN_FILENO, &stdin_stat) == -1) {
+ perror("fstat on stdin");
+ return EXIT_FAILURE;
+ }
+
+ if (!S_ISFIFO(stdin_stat.st_mode)) {
+ dprintf(STDERR_FILENO, "stdin must be a pipe\n");
+ return EXIT_FAILURE;
+ }
+
+#if USE_SPLICE
+ int stdin_pipe_size = fcntl(STDIN_FILENO, F_GETPIPE_SZ);
+ if (stdin_pipe_size == -1) {
+ perror("fcntl on stdin");
+ return EXIT_FAILURE;
+ }
+#else
+ int stdin_pipe_size = 65536;
+#endif
+
+ if (stdin_pipe_size == -1) {
+ perror("fcntl on stdin");
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Same for stderr.
+ */
+ struct stat stderr_stat;
+ if (fstat(STDERR_FILENO, &stderr_stat) == -1) {
+ perror("fstat on stderr");
+ return EXIT_FAILURE;
+ }
+ if (!S_ISFIFO(stderr_stat.st_mode)) {
+ dprintf(STDERR_FILENO, "stderr must be a pipe\n");
+ return EXIT_FAILURE;
+ }
+
+#if USE_SPLICE
+ int stderr_pipe_size = fcntl(STDERR_FILENO, F_GETPIPE_SZ);
+ if (stderr_pipe_size == -1) {
+ perror("fcntl on stderr");
+ return EXIT_FAILURE;
+ }
+#else
+ int stderr_pipe_size = 65536;
+#endif
+
+ if (stderr_pipe_size == -1) {
+ perror("fcntl on stderr");
+ return EXIT_FAILURE;
+ }
+
+ /* Connecting back to the main daemon */
+ int sock;
+ struct sockaddr_un addr;
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock == -1) {
+ perror("internal socket creation");
+ return EXIT_FAILURE;
+ }
+ memset(&addr, 0, sizeof(struct sockaddr_un));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path) - 1);
+ if (connect(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_un)) == -1) {
+ perror("internal socket connect");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Send the 64-byte cookit back.
+ */
+ ssize_t cookie_bytes_sent = send(sock, cookie, 64, 0);
+ switch (cookie_bytes_sent) {
+ case -1:
+ perror("send cookie");
+ close(sock);
+ return EXIT_FAILURE;
+ case 64:
+ break;
+ default:
+ dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Report arguments.
+ */
+ uint64_t argc64 = (uint64_t) argc;
+ ssize_t bytes_sent = send(sock, &argc64, sizeof(argc64), 0);
+ switch (bytes_sent) {
+ case -1:
+ perror("send argc");
+ close(sock);
+ return EXIT_FAILURE;
+ case sizeof(argc64):
+ break;
+ default:
+ dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+ for (int i = 0; i < argc; i++) {
+ unsigned long len = strlen(argv[i]) + 1;
+ bytes_sent = send(sock, argv[i], len, 0);
+ if (bytes_sent == -1) {
+ perror("send argv");
+ close(sock);
+ exit(EXIT_FAILURE);
+ } else if ((unsigned long)bytes_sent == len) {
+ } else {
+ dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n");
+ close(sock);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /*
+ * Report GIT_* environment.
+ */
+ extern char **environ;
+ for (char **env = environ; *env != NULL; env++) {
+ if (strncmp(*env, "GIT_", 4) == 0) {
+ unsigned long len = strlen(*env) + 1;
+ bytes_sent = send(sock, *env, len, 0);
+ if (bytes_sent == -1) {
+ perror("send env");
+ close(sock);
+ exit(EXIT_FAILURE);
+ } else if ((unsigned long)bytes_sent == len) {
+ } else {
+ dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n");
+ close(sock);
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+ bytes_sent = send(sock, "", 1, 0);
+ if (bytes_sent == -1) {
+ perror("send env terminator");
+ close(sock);
+ exit(EXIT_FAILURE);
+ } else if (bytes_sent == 1) {
+ } else {
+ dprintf(STDERR_FILENO, "send returned unexpected value on internal socket\n");
+ close(sock);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Splice stdin to the daemon. For pre-receive it's just old/new/ref.
+ */
+#if USE_SPLICE
+ ssize_t stdin_bytes_spliced;
+ while ((stdin_bytes_spliced = splice(STDIN_FILENO, NULL, sock, NULL, stdin_pipe_size, SPLICE_F_MORE)) > 0) {
+ }
+ if (stdin_bytes_spliced == -1) {
+ perror("splice stdin to internal socket");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+#else
+ char buf[65536];
+ ssize_t n;
+ while ((n = read(STDIN_FILENO, buf, sizeof(buf))) > 0) {
+ if (write(sock, buf, n) != n) {
+ perror("write to internal socket");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+ }
+ if (n < 0) {
+ perror("read from stdin");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+#endif
+
+ /*
+ * The sending part of the UNIX socket should be shut down, to let
+ * io.Copy on the Go side return.
+ */
+ if (shutdown(sock, SHUT_WR) == -1) {
+ perror("shutdown internal socket");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * The first byte of the response from the UNIX domain socket is the
+ * status code to return.
+ *
+ * FIXME: It doesn't make sense to require the return value to be
+ * sent before the log message. However, if we were to keep splicing,
+ * it's difficult to get the last byte before EOF. Perhaps we could
+ * hack together some sort of OOB message or ancillary data, or
+ * perhaps even use signals.
+ */
+ char status_buf[1];
+ ssize_t bytes_read = read(sock, status_buf, 1);
+ switch (bytes_read) {
+ case -1:
+ perror("read status code from internal socket");
+ close(sock);
+ return EXIT_FAILURE;
+ case 0:
+ dprintf(STDERR_FILENO, "unexpected EOF on internal socket\n");
+ close(sock);
+ return EXIT_FAILURE;
+ case 1:
+ break;
+ default:
+ dprintf(STDERR_FILENO, "read returned unexpected value on internal socket\n");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Now we can splice data from the UNIX domain socket to stderr. This
+ * data is directly passed to the user (with "remote: " prepended).
+ *
+ * We usually don't actually use this as the daemon could easily
+ * write to the SSH connection's stderr directly anyway.
+ */
+
+#if USE_SPLICE
+ ssize_t stderr_bytes_spliced;
+ while ((stderr_bytes_spliced = splice(sock, NULL, STDERR_FILENO, NULL, stderr_pipe_size, SPLICE_F_MORE)) > 0) {
+ }
+ if (stderr_bytes_spliced == -1 && errno != ECONNRESET) {
+ perror("splice internal socket to stderr");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+#else
+ while ((n = read(sock, buf, sizeof(buf))) > 0) {
+ if (write(STDERR_FILENO, buf, n) != n) {
+ perror("write to stderr");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+ }
+ if (n < 0 && errno != ECONNRESET) {
+ perror("read from internal socket");
+ close(sock);
+ return EXIT_FAILURE;
+ }
+#endif
+
+ close(sock);
+ return *status_buf;
+}
diff --git a/main.ha b/main.ha
deleted file mode 100644
index fc41240..0000000
--- a/main.ha
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
-// Adapted from template by Willow Barraco <contact@willowbarraco.fr>
-
-use fs;
-use getopt;
-use log;
-use net;
-use net::dial;
-use net::http;
-use net::ip;
-use net::tcp;
-use net::uri;
-use os;
-use memio;
-use io;
-use fmt;
-use bufio;
-
-const usage: [_]getopt::help = [
- "Lindenii Forge Server",
- ('c', "config", "path to configuration file")
-];
-
-let static_fs: nullable *fs::fs = null;
-
-export fn main() void = {
- const cmd = getopt::parse(os::args, usage...);
- defer getopt::finish(&cmd);
-
- let port: u16 = 8080;
- let ip_addr: ip::addr4 = [127, 0, 0, 1];
-
- for (let opt .. cmd.opts) {
- switch (opt.0) {
- case 'c' => yield; // TODO: actually handle the config
- case => abort("unreachable");
- };
- };
-
- static_fs = os::diropen("static")!;
-
- const server = match (http::listen(ip_addr, port, net::tcp::reuseport, net::tcp::reuseaddr)) {
- case let this: *http::server =>
- yield this;
- case => abort("failure while listening");
- };
- defer http::server_finish(server);
-
- for (true) {
- const serv_req = match (http::serve(server)) {
- case let this: *http::server_request =>
- yield this;
- case =>
- log::println("failure while serving");
- continue;
- };
- defer http::serve_finish(serv_req);
-
- match (handlereq(serv_req.socket, &serv_req.request)) {
- case void => yield;
- case => log::println("error while handling request");
- };
- };
-};
diff --git a/req.ha b/req.ha
deleted file mode 100644
index 59f6438..0000000
--- a/req.ha
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
-
-use fmt;
-use fs;
-use htmpl;
-use io;
-use mime;
-use net::http;
-use net::uri;
-use strconv;
-use strings;
-
-fn handlereq(conn: io::handle, request: *http::request) (void | io::error | nomem | fs::error) = {
- let segments = match(segments_from_path(request.target.raw_path)) {
- case let s: []str =>
- yield s;
- case uri::invalid =>
- start_response(conn, 400, "text/plain")?;
- fmt::fprintln(conn, "Invalid URI")?;
- return void;
- case nomem =>
- return nomem;
- case =>
- abort("unreachable");
- };
- defer strings::freeall(segments);
-
- let trailing_slash: bool = false;
-
- if (segments[len(segments) - 1] == "") {
- trailing_slash = true;
- free(segments[len(segments) - 1]);
- segments = segments[.. len(segments) - 1];
- };
-
- if (len(segments) == 0) {
- start_response(conn, 200, "text/html")?;
- return tp_index(conn);
- };
-
- if (segments[0] == ":") {
- if (len(segments) == 1) {
- start_response(conn, 404, "text/plain")?;
- fmt::fprintln(conn, "Error: Blank system endpoint")?;
- return;
- };
-
- switch (segments[1]) {
- case "static" =>
- if (len(segments) == 2) {
- start_response(conn, 404, "text/plain")?;
- fmt::fprintln(conn, "Error: Blank static endpoint")?;
- return;
- };
-
- let fs_segments = segments[2 ..];
- for (let fs_segment .. fs_segments) {
- if (strings::contains(fs_segment, "/")) {
- start_response(conn, 400, "text/plain")?;
- fmt::fprintln(conn, "Error: Slash found in filesystem path")?;
- return;
- };
- };
- let fs_segment_path = strings::join("/", fs_segments...)?;
- defer free(fs_segment_path);
-
- let file = match (fs::open(static_fs as *fs::fs, fs_segment_path)) {
- case let f: io::handle => yield f;
- case fs::error =>
- start_response(conn, 500, "text/plain")?;
- fmt::fprintln(conn, "Filesystem error")?;
- return;
- };
- defer io::close(file)!;
-
- let ext = strings::rcut(fs_segments[len(fs_segments) - 1], ".").1;
-
- let mimetype = match (mime::lookup_ext(ext)) {
- case let m: *mime::mimetype => yield m.mime;
- case null => yield "application/octet-stream";
- };
-
- start_response(conn, 200, mimetype)?;
- io::copy(conn, file)?;
-
- case =>
- start_response(conn, 404, "text/plain")?;
- fmt::fprintln(conn, "Error: Unknown system endpoint")?;
- };
- };
-};
-
-fn start_response(conn: io::handle, status: uint, content_type: str) (void | io::error | nomem) = { // TODO: add len and other headers
- fmt::fprint(conn, "HTTP/1.1 ")?;
- fmt::fprint(conn, strconv::utos(status))?;
- fmt::fprint(conn, " ")?;
- fmt::fprint(conn, http::status_reason(status))?;
- fmt::fprint(conn, "\r\n")?;
- fmt::fprint(conn, "Content-Type: ")?;
- fmt::fprint(conn, content_type)?;
- fmt::fprint(conn, "\r\n")?;
- fmt::fprint(conn, "\r\n")?;
-};
diff --git a/scripts/update_deps b/scripts/update_deps
new file mode 100755
index 0000000..8983a60
--- /dev/null
+++ b/scripts/update_deps
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: AGPL-3.0-only
+# SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+set -eux
+
+go get -t -u -x -v ./...
+go mod tidy -x -v
diff --git a/sql/schema.sql b/sql/schema.sql
new file mode 100644
index 0000000..a6efc39
--- /dev/null
+++ b/sql/schema.sql
@@ -0,0 +1,195 @@
+-- SPDX-License-Identifier: AGPL-3.0-only
+-- SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
+
+CREATE TABLE groups (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ name TEXT NOT NULL,
+ parent_group INTEGER REFERENCES groups(id) ON DELETE CASCADE,
+ description TEXT,
+ UNIQUE NULLS NOT DISTINCT (parent_group, name)
+);
+
+CREATE TABLE repos (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, -- I mean, should be CASCADE but deleting Git repos on disk also needs to be considered
+ contrib_requirements TEXT NOT NULL CHECK (contrib_requirements IN ('closed', 'registered_user', 'federated', 'ssh_pubkey', 'public')),
+ name TEXT NOT NULL,
+ UNIQUE(group_id, name),
+ description TEXT,
+ filesystem_path TEXT
+);
+
+CREATE TABLE mailing_lists (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE RESTRICT,
+ name TEXT NOT NULL,
+ UNIQUE(group_id, name),
+ description TEXT
+);
+
+CREATE TABLE mailing_list_emails (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ list_id INTEGER NOT NULL REFERENCES mailing_lists(id) ON DELETE CASCADE,
+ title TEXT NOT NULL,
+ sender TEXT NOT NULL,
+ date TIMESTAMP NOT NULL,
+ content BYTEA NOT NULL
+);
+
+CREATE TABLE users (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ username TEXT UNIQUE,
+ type TEXT NOT NULL CHECK (type IN ('pubkey_only', 'federated', 'registered')),
+ password TEXT
+);
+
+CREATE TABLE ssh_public_keys (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ key_string TEXT NOT NULL,
+ user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ CONSTRAINT unique_key_string EXCLUDE USING HASH (key_string WITH =)
+);
+
+CREATE TABLE sessions (
+ user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ session_id TEXT PRIMARY KEY NOT NULL,
+ UNIQUE(user_id, session_id)
+);
+
+CREATE TABLE user_group_roles (
+ group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
+ user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ PRIMARY KEY(user_id, group_id)
+);
+
+CREATE TABLE federated_identities (
+ user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ service TEXT NOT NULL,
+ remote_username TEXT NOT NULL,
+ PRIMARY KEY(user_id, service)
+);
+
+-- Ticket tracking
+
+CREATE TABLE ticket_trackers (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE RESTRICT,
+ name TEXT NOT NULL,
+ description TEXT,
+ UNIQUE(group_id, name)
+);
+
+CREATE TABLE tickets (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ tracker_id INTEGER NOT NULL REFERENCES ticket_trackers(id) ON DELETE CASCADE,
+ tracker_local_id INTEGER NOT NULL,
+ title TEXT NOT NULL,
+ description TEXT,
+ UNIQUE(tracker_id, tracker_local_id)
+);
+
+CREATE OR REPLACE FUNCTION create_tracker_ticket_sequence()
+RETURNS TRIGGER AS $$
+DECLARE
+ seq_name TEXT := 'tracker_ticket_seq_' || NEW.id;
+BEGIN
+ EXECUTE format('CREATE SEQUENCE %I', seq_name);
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER after_insert_ticket_tracker
+AFTER INSERT ON ticket_trackers
+FOR EACH ROW
+EXECUTE FUNCTION create_tracker_ticket_sequence();
+
+CREATE OR REPLACE FUNCTION drop_tracker_ticket_sequence()
+RETURNS TRIGGER AS $$
+DECLARE
+ seq_name TEXT := 'tracker_ticket_seq_' || OLD.id;
+BEGIN
+ EXECUTE format('DROP SEQUENCE IF EXISTS %I', seq_name);
+ RETURN OLD;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER before_delete_ticket_tracker
+BEFORE DELETE ON ticket_trackers
+FOR EACH ROW
+EXECUTE FUNCTION drop_tracker_ticket_sequence();
+
+CREATE OR REPLACE FUNCTION assign_tracker_local_id()
+RETURNS TRIGGER AS $$
+DECLARE
+ seq_name TEXT := 'tracker_ticket_seq_' || NEW.tracker_id;
+BEGIN
+ IF NEW.tracker_local_id IS NULL THEN
+ EXECUTE format('SELECT nextval(%L)', seq_name)
+ INTO NEW.tracker_local_id;
+ END IF;
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER before_insert_ticket
+BEFORE INSERT ON tickets
+FOR EACH ROW
+EXECUTE FUNCTION assign_tracker_local_id();
+
+-- Merge requests
+
+CREATE TABLE merge_requests (
+ id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ repo_id INTEGER NOT NULL REFERENCES repos(id) ON DELETE CASCADE,
+ repo_local_id INTEGER NOT NULL,
+ title TEXT,
+ creator INTEGER REFERENCES users(id) ON DELETE SET NULL,
+ source_ref TEXT NOT NULL,
+ destination_branch TEXT,
+ status TEXT NOT NULL CHECK (status IN ('open', 'merged', 'closed')),
+ UNIQUE (repo_id, repo_local_id),
+ UNIQUE (repo_id, source_ref, destination_branch)
+);
+
+CREATE OR REPLACE FUNCTION create_repo_mr_sequence()
+RETURNS TRIGGER AS $$
+DECLARE
+ seq_name TEXT := 'repo_mr_seq_' || NEW.id;
+BEGIN
+ EXECUTE format('CREATE SEQUENCE %I', seq_name);
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER after_insert_repo
+AFTER INSERT ON repos
+FOR EACH ROW
+EXECUTE FUNCTION create_repo_mr_sequence();
+
+CREATE OR REPLACE FUNCTION drop_repo_mr_sequence()
+RETURNS TRIGGER AS $$
+DECLARE
+ seq_name TEXT := 'repo_mr_seq_' || OLD.id;
+BEGIN
+ EXECUTE format('DROP SEQUENCE IF EXISTS %I', seq_name);
+ RETURN OLD;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER before_delete_repo
+BEFORE DELETE ON repos
+FOR EACH ROW
+EXECUTE FUNCTION drop_repo_mr_sequence();
+
+
+CREATE OR REPLACE FUNCTION assign_repo_local_id()
+RETURNS TRIGGER AS $$
+DECLARE
+ seq_name TEXT := 'repo_mr_seq_' || NEW.repo_id;
+BEGIN
+ IF NEW.repo_local_id IS NULL THEN
+ EXECUTE format('SELECT nextval(%L)', seq_name)
+ INTO NEW.repo_local_id;
+ END IF;
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER before_insert_merge_request
+BEFORE INSERT ON merge_requests
+FOR EACH ROW
+EXECUTE FUNCTION assign_repo_local_id();
diff --git a/templates/_footer.htmpl b/templates/_footer.htmpl
deleted file mode 100644
index a0d1987..0000000
--- a/templates/_footer.htmpl
+++ /dev/null
@@ -1,9 +0,0 @@
-{{ define _tp_footer(handle: io::handle) (void | io::error | nomem) }}
-<a href="https://lindenii.runxiyu.org/forge/">Lindenii Forge</a>
-{{ " " }}
-{{ global.version }}
-{{ " " }}
-(<a href="/:/source/">source</a>,
-{{ " " }}
-<a href="https://forge.lindenii.runxiyu.org/lindenii/forge/:/repos/server/?branch=hare">upstream</a>)
-{{ end }}
diff --git a/templates/_head_common.htmpl b/templates/_head_common.htmpl
deleted file mode 100644
index bc14cb6..0000000
--- a/templates/_head_common.htmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{{ define _tp_head_common(handle: io::handle) (void | io::error | nomem) }}
-<meta charset="utf-8" />
-<meta name="viewport" content="width=device-width, initial-scale=1" />
-<link rel="stylesheet" href="/:/static/style.css" />
-{{ end }}
diff --git a/templates/_header.htmpl b/templates/_header.htmpl
deleted file mode 100644
index 2eb8d19..0000000
--- a/templates/_header.htmpl
+++ /dev/null
@@ -1,14 +0,0 @@
-{{ define _tp_header(handle: io::handle, user_id_str: str, username: str) (void | io::error | nomem) }}
-<header id="main-header">
- <div id="main-header-forge-title">
- <a href="/">{{ global.title }}</a>
- </div>
- <div id="main-header-user">
- {{ if user_id_str != "" }}
- <a href="/:/users/{{ user_id_str }}">{{ username }}</a>
- {{ else }}
- <a href="/:/login/">Login</a>
- {{ end }}
- </div>
-</header>
-{{ end }}
diff --git a/templates/index.htmpl b/templates/index.htmpl
deleted file mode 100644
index e67cc09..0000000
--- a/templates/index.htmpl
+++ /dev/null
@@ -1,50 +0,0 @@
-{{ define tp_index(handle: io::handle) (void | io::error | nomem) }}
-<!DOCTYPE html>
-<html lang="en">
-<head>
-{{ render _tp_head_common(handle) }}
-<title>Index &ndash; {{ global.title }}</title>
-</head>
-<body>
-{{ render _tp_header(handle, "test", "test") }}
-<div class="padding-wrapper">
-<table class="wide rounded">
- <thead>
- <tr>
- <th colspan="2" class="title-row">Groups</th>
- </tr>
- <tr>
- <th scope="col">Name</th>
- <th scope="col">Description</th>
- </tr>
- </thead>
- <tbody>
- </tbody>
-</table>
-<div class="padding-wrapper">
- <table class="wide rounded">
- <thead>
- <tr>
- <th colspan="2" class="title-row">
- Info
- </th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <th scope="row">SSH public key</th>
- <td><code>{{ global.ssh_pubkey }}</code></td>
- </tr>
- <tr>
- <th scope="row">SSH fingerprint</th>
- <td><code>{{ global.ssh_fp }}</code></td>
- </tr>
- </tbody>
- </table>
-</div>
-<footer>
- {{ render _tp_footer(handle) }}
-</footer>
-</body>
-</html>
-{{ end }}
diff --git a/url.ha b/url.ha
deleted file mode 100644
index 1c511ba..0000000
--- a/url.ha
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: AGPL-3.0-only
-// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
-
-use strings;
-use net::uri;
-
-// The result, if not erroring out, must be freed with strings::freeall.
-fn segments_from_path(s: str) ([]str | nomem | uri::invalid) = {
- let sp: []str = strings::split(s, "/")?;
- for (let i = 1z; i < len(sp); i += 1) {
- match (uri::percent_decode(sp[i])) {
- case let s: str =>
- sp[i - 1] = s;
- case uri::invalid =>
- strings::freeall(sp[.. i - 1]);
- return uri::invalid;
- };
- };
- return sp[.. len(sp) - 1];
-};
diff --git a/utils/.gitignore b/utils/.gitignore
new file mode 100644
index 0000000..0d965ce
--- /dev/null
+++ b/utils/.gitignore
@@ -0,0 +1 @@
+/colb
diff --git a/utils/colb.c b/utils/colb.c
new file mode 100644
index 0000000..79ed06d
--- /dev/null
+++ b/utils/colb.c
@@ -0,0 +1,28 @@
+/*-
+ * SPDX-License-Identifier: GPL-3.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2021 June McEnroe <june@causal.agency>
+ */
+
+#include <locale.h>
+#include <stdio.h>
+#include <wchar.h>
+
+int
+main(void)
+{
+ wint_t next, prev = WEOF;
+
+ setlocale(LC_CTYPE, "C.UTF-8");
+
+ while (WEOF != (next = getwchar())) {
+ if (next == L'\b') {
+ prev = WEOF;
+ } else {
+ if (prev != WEOF)
+ putwchar(prev);
+ prev = next;
+ }
+ }
+ if (prev != WEOF)
+ putwchar(prev);
+}