diff --git a/.gitignore b/.gitignore
index 2abad62..3d82ed3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,5 +5,5 @@
/resources
/services
/services-src/eternity-web
-/config.json
+/config.conf
fulgens.log
\ No newline at end of file
diff --git a/config.conf.example b/config.conf.example
new file mode 100644
index 0000000..5019a73
--- /dev/null
+++ b/config.conf.example
@@ -0,0 +1,120 @@
+// NOTE: This is NOT a valid JSON file.
+// Comments are added here, which are stripped out by fulgens. This is not standard behavior for JSON and will only work with fulgens.
+
+{
+ // Global configuration
+ "global": {
+ // IP defines the IP address to bind to.
+ "ip": "0.0.0.0",
+ // httpPort defines the port to bind to for HTTP.
+ "httpPort": "8080",
+ // httpsPort defines the port to bind to for HTTPS (TLS).
+ "httpsPort": "8443",
+ // serviceDirectory defines the directory to look for services in.
+ "serviceDirectory": "./services",
+ // resourceDirectory defines the directory to look for resources in.
+ "resourceDirectory": "./resources",
+ // compression defines the compression settings on a global level - per-route settings override these.
+ "compression": {
+ // algorithm defines the compression algorithm to use, possible values are "gzip", "brotli" and "zstd".
+ "algorithm": "gzip",
+ // level defines the compression level to use, possible values are 1-9 for gzip, 0-11 for brotli and 1-22 for zstd.
+ "level": 5
+ },
+ // logging defines the logging settings.
+ "logging": {
+ // enabled defines whether logging is enabled.
+ "enabled": true,
+ // file defines the file to log to, relative to the working directory.
+ "file": "fulgens.log"
+ },
+ // database defines the database settings.
+ "database": {
+ // type defines the type of database to use, possible values are "sqlite" and "postgres".
+ "type": "sqlite",
+ // path defines the path to the directory to store database files in (sqlite only).
+ "path": "./databases",
+ // connectionString defines the connection string to use for the database (postgres only).
+ "connectionString": "postgres://user:password@localhost:5432/database"
+ }
+ },
+ // Routes define per-subdomain routing settings.
+ "routes": [
+ {
+ // none is a special subdomain that matches all requests without a subdomain (Host header).
+ "subdomain": "none",
+ // services defines the services to use for this route. Services must be defined on a per-subdomain basis.
+ // Each service may not be used more than once globally. The server will fail to start if this is violated.
+ "services": ["authentication"]
+ },
+ {
+ // any subdomain value that isn't "none" will match that specific subdomain.
+ "subdomain": "www.localhost",
+ // https defines the HTTPS settings for this route.
+ "https": {
+ // certificate defines the path to the certificate file.
+ "certificate": "./certs/localhost.crt",
+ // key defines the path to the key file.
+ "key": "./certs/localhost.key"
+ },
+ // paths defines per-path settings (NOT for services, which MUST be defined on a per-subdomain basis).
+ "paths": [
+ {
+ // path defines the path to match. They can contain wildcards.
+ "path": "/static/*",
+ // static defines the static file serving settings for this path. This conflicts with proxy.
+ // If both proxy and static are defined, static will take precedence.
+ "static": {
+ // root defines the root directory to serve static files from.
+ "root": "./static",
+ // directoryListing defines whether to show a directory listing when a directory is requested.
+ // if it is false or unset, a 403 Forbidden will be returned instead.
+ "directoryListing": true
+ }
+ },
+ {
+ // path defines the path to match. They can contain wildcards.
+ "path": "/proxy/*",
+ // proxy defines the proxy settings for this path. This conflicts with static.
+ // If both proxy and static are defined, static will take precedence.
+ "proxy": {
+ // url defines the URL to proxy requests to.
+ "url": "http://localhost:8000",
+ // stripPrefix defines whether to strip the prefix from the path before proxying.
+ "stripPrefix": true
+ // TODO: In a future update, passing X-Forwarded-For and X-Forwarded-Proto headers will be supported.
+ // TODO: In a future update, forbidding certain headers from being passed will be supported.
+ // TODO: In a future update, passing X-Powered-By and Server headers will be supported.
+ // TODO: In a future update, passing Host header will be supported.
+ }
+ }
+ ]
+ }
+ ],
+
+ // Services define the settings for services.
+ "services": {
+ // authentication defines the settings for the authentication service, which is built-in.
+ "authentication": {
+ // privacyPolicy defines the URL to the privacy policy.
+ "privacyPolicy": "https://git.ailur.dev/Paperwork/nucleus/src/commit/5d191eea87cffae8bdca42017ac26dc19e6cb3de/Privacy.md",
+ // url defines the publicly-facing URL of the service, in case of it being behind a reverse proxy.
+ "url": "http://localhost:8000",
+ // identifier defines the identifier for the service, in the form of [Identifier] Accounts.
+ "identifier": "Authenticator",
+ // adminKey defines the key to use for administrative operations, such as listing all users.
+ "adminKey": "supersecretkey",
+ // testAppIsInternalApp defines whether the test app is an internal app, which allows it to bypass the user consent screen.
+ "testAppIsInternalApp": true,
+ // testAppEnabled defines whether the test app is enabled, which is recommended for testing purposes.
+ "testAppEnabled": true
+ },
+ // storage defines the settings for the storage service, which is built-in.
+ "storage": {
+ // path defines the path to store blobs in.
+ "path": "./blob",
+ // defaultQuota defines the default quota for users in bytes.
+ "defaultQuota": 50000000
+ },
+ }
+}
\ No newline at end of file
diff --git a/config.json.example b/config.json.example
deleted file mode 100644
index cb5b3d1..0000000
--- a/config.json.example
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "global": {
- "ip": "0.0.0.0",
- "port": "8000",
- "serviceDirectory": "./services",
- "resourceDirectory": "./resources"
- },
- "logging": {
- "enabled": true,
- "file": "fulgens.log"
- },
- "database": {
- "databaseType": "sqlite",
- "databasePath": "./databases"
- },
- "services": {
- "storage": {
- "path": "./blob",
- "defaultQuota": 50000000
- },
- "authentication": {
- "privacyPolicy": "https://git.ailur.dev/Paperwork/nucleus/src/commit/5d191eea87cffae8bdca42017ac26dc19e6cb3de/Privacy.md",
- "url": "http://localhost:8000",
- "identifier": "Authenticator",
- "adminKey": "supersecretkey",
- "testAppIsInternalApp": true,
- "testAppEnabled": true
- }
- }
-}
diff --git a/go.mod b/go.mod
index 14947b3..56272a4 100644
--- a/go.mod
+++ b/go.mod
@@ -6,10 +6,10 @@ require (
git.ailur.dev/ailur/fg-library/v2 v2.1.1
git.ailur.dev/ailur/fg-nucleus-library v1.0.3
git.ailur.dev/ailur/pow v1.0.2
+ github.com/BurntSushi/toml v1.4.0
github.com/andybalholm/brotli v1.1.1
github.com/cespare/xxhash/v2 v2.3.0
github.com/go-chi/chi/v5 v5.1.0
- github.com/go-chi/hostrouter v0.2.0
github.com/go-playground/validator/v10 v10.22.1
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/google/uuid v1.6.0
diff --git a/go.sum b/go.sum
index 368b098..36b34e0 100644
--- a/go.sum
+++ b/go.sum
@@ -4,8 +4,8 @@ git.ailur.dev/ailur/fg-nucleus-library v1.0.3 h1:C0xgfZg7bkULhh9Ci7ZoAcx4QIqxLh+
git.ailur.dev/ailur/fg-nucleus-library v1.0.3/go.mod h1:RbBVFRwtQgYvCWoru1mC3vUJ1dMftkNbvd7hVFtREFw=
git.ailur.dev/ailur/pow v1.0.2 h1:8tb6mXZdyQYjrKRW+AUmWMi5wJoHh9Ch3oRqiJr/ivs=
git.ailur.dev/ailur/pow v1.0.2/go.mod h1:fjFb1z5KtF6V14HRhGWiDmmJKggO8KyAP20Lr5OJI/g=
-github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
-github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -14,11 +14,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc=
github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc=
-github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
-github.com/go-chi/hostrouter v0.2.0 h1:GwC7TZz8+SlJN/tV/aeJgx4F+mI5+sp+5H1PelQUjHM=
-github.com/go-chi/hostrouter v0.2.0/go.mod h1:pJ49vWVmtsKRKZivQx0YMYv4h0aX+Gcn6V23Np9Wf1s=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
@@ -31,8 +28,6 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
@@ -45,6 +40,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
diff --git a/main.go b/main.go
index 396a065..24288da 100644
--- a/main.go
+++ b/main.go
@@ -5,25 +5,29 @@ import (
"errors"
"io"
- "io/fs"
"log"
+ "mime"
"os"
"plugin"
+ "regexp"
"sort"
+ "strconv"
"strings"
"sync"
"time"
"compress/gzip"
+ "crypto/tls"
"database/sql"
"encoding/json"
"log/slog"
"net/http"
+ "net/http/httputil"
+ "net/url"
"path/filepath"
"github.com/andybalholm/brotli"
"github.com/go-chi/chi/v5"
- "github.com/go-chi/hostrouter"
"github.com/go-playground/validator/v10"
"github.com/google/uuid"
"github.com/klauspost/compress/zstd"
@@ -34,28 +38,48 @@ import (
type Config struct {
Global struct {
- IP string `json:"ip" validate:"required,ip_addr"`
- Port string `json:"port" validate:"required"`
- ServiceDirectory string `json:"serviceDirectory" validate:"required"`
- ResourceDirectory string `json:"resourceDirectory" validate:"required"`
- Compression string `json:"compression" validate:"omitempty,oneof=gzip brotli zstd"`
- CompressionLevelJN json.Number `json:"compressionLevel" validate:"required_with=Compression"`
- CompressionLevel int
+ IP string `json:"ip" validate:"required,ip_addr"`
+ HTTPPort string `json:"httpPort" validate:"required"`
+ HTTPSPort string `json:"httpsPort" validate:"required"`
+ ServiceDirectory string `json:"serviceDirectory" validate:"required"`
+ ResourceDirectory string `json:"resourceDirectory" validate:"required"`
+ Compression struct {
+ Algorithm string `json:"algorithm" validate:"omitempty,oneof=gzip brotli zstd"`
+ Level float64 `json:"level" validate:"omitempty,min=1,max=22"`
+ } `json:"compression"`
+ Logging struct {
+ Enabled bool `json:"enabled"`
+ File string `json:"file" validate:"required_if=Enabled true"`
+ } `json:"logging"`
+ Database struct {
+ Type string `json:"type" validate:"required,oneof=sqlite postgres"`
+ ConnectionString string `json:"connectionString" validate:"required_if=Type postgres"`
+ Path string `json:"path" validate:"required_if=Type sqlite"`
+ } `json:"database" validate:"required"`
} `json:"global" validate:"required"`
- Logging struct {
- Enabled bool `json:"enabled"`
- File string `json:"file" validate:"required_if=Enabled true"`
- } `json:"logging"`
- Database struct {
- DatabaseType string `json:"databaseType" validate:"required,oneof=sqlite postgres"`
- ConnectionString string `json:"connectionString" validate:"required_if=DatabaseType postgres"`
- DatabasePath string `json:"databasePath" validate:"required_if=DatabaseType sqlite"`
- } `json:"database" validate:"required"`
- Static []struct {
- Subdomain string `json:"subdomain"`
- Directory string `json:"directory" validate:"required,isDirectory"`
- Pattern string `json:"pattern"`
- } `json:"static"`
+ Routes []struct {
+ Subdomain string `json:"subdomain" validate:"required"`
+ Services []string `json:"services"`
+ Paths []struct {
+ Path string `json:"path" validate:"required"`
+ Proxy struct {
+ URL string `json:"url" validate:"required"`
+ StripPrefix bool `json:"stripPrefix"`
+ } `json:"proxy" validate:"required_without=Static"`
+ Static struct {
+ Root string `json:"root" validate:"required,isDirectory"`
+ DirectoryListing bool `json:"directoryListing"`
+ } `json:"static" validate:"required_without=Proxy"`
+ } `json:"paths"`
+ HTTPS struct {
+ CertificatePath string `json:"certificatePath" validate:"required"`
+ KeyPath string `json:"keyPath" validate:"required"`
+ } `json:"https"`
+ Compression struct {
+ Algorithm string `json:"algorithm" validate:"omitempty,oneof=gzip brotli zstd"`
+ Level float64 `json:"level" validate:"omitempty,min=1,max=22"`
+ } `json:"compression"`
+ } `json:"routes"`
Services map[string]interface{} `json:"services"`
}
@@ -70,6 +94,11 @@ type ResponseWriterWrapper struct {
io.Writer
}
+type CompressionSettings struct {
+ Level int
+ Algorithm string
+}
+
func (w *ResponseWriterWrapper) WriteHeader(statusCode int) {
w.ResponseWriter.WriteHeader(statusCode)
}
@@ -78,115 +107,656 @@ func (w *ResponseWriterWrapper) Write(p []byte) (int, error) {
return w.Writer.Write(p)
}
+func checkCompressionAlgorithm(algorithm string, handler http.Handler) http.Handler {
+ switch algorithm {
+ case "gzip":
+ return gzipHandler(handler)
+ case "brotli":
+ return brotliHandler(handler)
+ case "zstd":
+ return zStandardHandler(handler)
+ default:
+ return handler
+ }
+}
+
+func logger(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ slog.Info(r.Method + " " + r.URL.Path)
+ })
+}
+
+func serverChanger(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Server", "Fulgens HTTP Server")
+ w.Header().Set("X-Powered-By", "Go net/http")
+ next.ServeHTTP(w, r)
+ })
+}
+
+func gzipHandler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ var compressionLevel int
+ var host string
+ if r.Header.Get("Host") != "" {
+ host = r.Header.Get("Host")
+ } else {
+ host = "none"
+ }
+
+ compressionSettings, ok := compression[host]
+ if !ok {
+ compressionLevel = int(config.Global.Compression.Level)
+ } else {
+ compressionLevel = compressionSettings.Level
+ }
+
+ gzipWriter, err := gzip.NewWriterLevel(w, compressionLevel)
+ if err != nil {
+ slog.Error("Error creating gzip writer: " + err.Error())
+ next.ServeHTTP(w, r)
+ return
+ }
+ defer func() {
+ w.Header().Del("Content-Length")
+ err := gzipWriter.Close()
+ if errors.Is(err, http.ErrBodyNotAllowed) {
+ // This is fine, all it means is that they have it cached, and we don't need to send it
+ return
+ } else if err != nil {
+ slog.Error("Error closing gzip writer: " + err.Error())
+ }
+ }()
+ gzipResponseWriter := &ResponseWriterWrapper{ResponseWriter: w, Writer: gzipWriter}
+ if w.Header().Get("Content-Encoding") != "" {
+ w.Header().Set("Content-Encoding", w.Header().Get("Content-Encoding")+", gzip")
+ } else {
+ w.Header().Set("Content-Encoding", "gzip")
+ }
+ next.ServeHTTP(gzipResponseWriter, r)
+ } else {
+ next.ServeHTTP(w, r)
+ }
+ })
+}
+func brotliHandler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if strings.Contains(r.Header.Get("Accept-Encoding"), "br") {
+ var compressionLevel int
+ var host string
+ if r.Header.Get("Host") != "" {
+ host = r.Header.Get("Host")
+ } else {
+ host = "none"
+ }
+
+ compressionSettings, ok := compression[host]
+ if !ok {
+ compressionLevel = int(config.Global.Compression.Level)
+ } else {
+ compressionLevel = compressionSettings.Level
+ }
+
+ brotliWriter := brotli.NewWriterV2(w, compressionLevel)
+ defer func() {
+ w.Header().Del("Content-Length")
+ err := brotliWriter.Close()
+ if errors.Is(err, http.ErrBodyNotAllowed) {
+ // This is fine, all it means is that they have it cached, and we don't need to send it
+ return
+ } else if err != nil {
+ slog.Error("Error closing Brotli writer: " + err.Error())
+ }
+ }()
+ brotliResponseWriter := &ResponseWriterWrapper{ResponseWriter: w, Writer: brotliWriter}
+ if w.Header().Get("Content-Encoding") != "" {
+ w.Header().Set("Content-Encoding", w.Header().Get("Content-Encoding")+", br")
+ } else {
+ w.Header().Set("Content-Encoding", "br")
+ }
+ next.ServeHTTP(brotliResponseWriter, r)
+ } else {
+ next.ServeHTTP(w, r)
+ }
+ })
+}
+
+func zStandardHandler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") {
+ var compressionLevel int
+ var host string
+ if r.Header.Get("Host") != "" {
+ host = r.Header.Get("Host")
+ } else {
+ host = "none"
+ }
+
+ compressionSettings, ok := compression[host]
+ if !ok {
+ compressionLevel = int(config.Global.Compression.Level)
+ } else {
+ compressionLevel = compressionSettings.Level
+ }
+
+ zStandardWriter, err := zstd.NewWriter(w, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(compressionLevel)))
+ if err != nil {
+ slog.Error("Error creating ZStandard writer: " + err.Error())
+ next.ServeHTTP(w, r)
+ return
+ }
+ defer func() {
+ w.Header().Del("Content-Length")
+ err := zStandardWriter.Close()
+ if err != nil {
+ if errors.Is(err, http.ErrBodyNotAllowed) {
+ // This is fine, all it means is that they have it cached, and we don't need to send it
+ return
+ } else {
+ slog.Error("Error closing ZStandard writer: " + err.Error())
+ }
+ }
+ }()
+ gzipResponseWriter := &ResponseWriterWrapper{ResponseWriter: w, Writer: zStandardWriter}
+ if w.Header().Get("Content-Encoding") != "" {
+ w.Header().Set("Content-Encoding", w.Header().Get("Content-Encoding")+", zstd")
+ } else {
+ w.Header().Set("Content-Encoding", "zstd")
+ }
+ next.ServeHTTP(gzipResponseWriter, r)
+ } else {
+ next.ServeHTTP(w, r)
+ }
+ })
+}
+
+func listDirectory(w http.ResponseWriter, r *http.Request, root string) {
+ // Provide a directory listing
+ w.WriteHeader(200)
+ w.Header().Set("Content-Type", "text/html")
+ _, err := w.Write([]byte("
Directory listing
"))
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing directory listing: " + err.Error())
+ return
+ }
+ err = filepath.Walk(filepath.Join(root, filepath.FromSlash(r.URL.Path)), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ relPath, err := filepath.Rel(root, path)
+ if err != nil {
+ return err
+ }
+ if relPath == "." {
+ return nil
+ }
+ _, err = w.Write([]byte("- " + info.Name() + "
"))
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing directory: " + err.Error())
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error walking directory: " + err.Error())
+ return
+ }
+ _, err = w.Write([]byte("
"))
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing directory listing: " + err.Error())
+ return
+ }
+}
+
+func parseEndRange(w http.ResponseWriter, file *os.File, end string) {
+ endI64, err := strconv.ParseInt(end, 10, 64)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error parsing range: " + err.Error())
+ return
+ }
+ _, err = file.Seek(-endI64, io.SeekEnd)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error seeking file: " + err.Error())
+ return
+ }
+ _, err = io.Copy(w, file)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing file: " + err.Error())
+ return
+ }
+}
+
+func parseBeginningRange(w http.ResponseWriter, file *os.File, beginning string) {
+ beginningI64, err := strconv.ParseInt(beginning, 10, 64)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error parsing range: " + err.Error())
+ return
+ }
+ _, err = file.Seek(beginningI64, io.SeekStart)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error seeking file: " + err.Error())
+ return
+ }
+ _, err = io.Copy(w, file)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing file: " + err.Error())
+ return
+ }
+}
+
+func parsePartRange(w http.ResponseWriter, file *os.File, beginning, end string) {
+ beginningI64, err := strconv.ParseInt(beginning, 10, 64)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error parsing range: " + err.Error())
+ return
+ }
+ endI64, err := strconv.ParseInt(end, 10, 64)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error parsing range: " + err.Error())
+ return
+ }
+ _, err = file.Seek(beginningI64, io.SeekStart)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error seeking file: " + err.Error())
+ return
+ }
+ _, err = io.CopyN(w, file, endI64-beginningI64)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing file: " + err.Error())
+ return
+ }
+}
+
+func newFileServer(root string, directoryListing bool) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ stat, err := os.Stat(filepath.Join(root, filepath.FromSlash(r.URL.Path)))
+ if err != nil {
+ serverError(w, 404)
+ return
+ }
+
+ if stat.IsDir() {
+ if directoryListing {
+ listDirectory(w, r, root)
+ } else {
+ serverError(w, 403)
+ }
+ return
+ }
+
+ file, err := os.Open(filepath.Join(root, filepath.FromSlash(r.URL.Path)))
+ if err != nil {
+ serverError(w, 500)
+ return
+ }
+
+ w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(r.URL.Path)))
+
+ if strings.HasPrefix(r.Header.Get("Range"), "bytes=") {
+ // Parse the range header. If there is an int-int, seek to the first int then return a limitedReader.
+ // If there is an int-, seek to the first int and return the rest of the file.
+ // If there is an -int, seek to the end of the file minus int and return the last int bytes.
+ for _, item := range strings.Split(strings.TrimPrefix(r.Header.Get("Range"), "bytes="), ", ") {
+ if strings.Contains(item, "-") {
+ beginning := strings.Split(item, "-")[0]
+ end := strings.Split(item, "-")[1]
+ if beginning == "" {
+ parseEndRange(w, file, end)
+ } else if end == "" {
+ parseBeginningRange(w, file, beginning)
+ } else {
+ parsePartRange(w, file, beginning, end)
+ }
+ } else {
+ serverError(w, 416)
+ return
+ }
+ }
+ } else {
+ _, err = io.Copy(w, file)
+ if err != nil {
+ serverError(w, 500)
+ slog.Error("Error writing file: " + err.Error())
+ return
+ }
+
+ err = file.Close()
+ if err != nil {
+ slog.Error("Error closing file: " + err.Error())
+ }
+ }
+ })
+}
+
+func serverError(w http.ResponseWriter, status int) {
+ w.Header().Set("Content-Type", "text/html")
+ w.WriteHeader(status)
+ _, err := w.Write([]byte("" + strconv.Itoa(status) + " " + http.StatusText(status) + "
Fulgens HTTP Server"))
+ if err != nil {
+ slog.Error("Error writing " + strconv.Itoa(status) + ": " + err.Error())
+ return
+ }
+}
+
+func hostRouter(w http.ResponseWriter, r *http.Request) {
+ host := strings.Split(r.Host, ":")[0]
+ router, ok := subdomains[host]
+ if !ok {
+ router, ok = subdomains["none"]
+ if !ok {
+ serverError(w, 404)
+ slog.Error("No subdomain found for " + host)
+ }
+
+ }
+
+ compressionSettings, ok := compression[host]
+ if !ok {
+ checkCompressionAlgorithm(config.Global.Compression.Algorithm, router).ServeHTTP(w, r)
+ } else {
+ checkCompressionAlgorithm(compressionSettings.Algorithm, router).ServeHTTP(w, r)
+ }
+}
+
var (
- logger = func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
- slog.Info(r.Method + " " + r.URL.Path)
- })
+ validate *validator.Validate
+ services = make(map[uuid.UUID]Service)
+ lock sync.RWMutex
+ config Config
+ certificates = make(map[string]*tls.Certificate)
+ compression = make(map[string]CompressionSettings)
+ subdomains = make(map[string]*chi.Mux)
+ serviceSubdomains = make(map[string]string)
+)
+
+func loadTLSCertificate(certificatePath, keyPath string) (*tls.Certificate, error) {
+ certificate, err := tls.LoadX509KeyPair(certificatePath, keyPath)
+ if err != nil {
+ return nil, err
+ } else {
+ return &certificate, nil
}
- serverChanger = func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Server", "Fulgens HTTP Server")
- next.ServeHTTP(w, r)
- })
+}
+
+func getTLSCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ cert, ok := certificates[hello.ServerName]
+ if !ok {
+ return nil, errors.New("no certificate found")
+ } else {
+ return cert, nil
}
- gzipHandler = func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
- gzipWriter, err := gzip.NewWriterLevel(w, config.Global.CompressionLevel)
- if err != nil {
- slog.Error("Error creating gzip writer: ", err)
- next.ServeHTTP(w, r)
- return
+}
+
+func svInit(message library.InterServiceMessage) {
+ // Service database initialization message
+ // Check if the service has the necessary permissions
+ if services[message.ServiceID].ServiceMetadata.Permissions.Database {
+ // Check if we are using sqlite or postgres
+ if config.Global.Database.Type == "sqlite" {
+ // Open the database and return the connection
+ pluginConn, err := sql.Open("sqlite3", filepath.Join(config.Global.Database.Path, message.ServiceID.String()+".db"))
+ if err != nil {
+ // Report an error
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: err,
}
- if w.Header().Get("Content-Encoding") != "" {
- w.Header().Set("Content-Encoding", w.Header().Get("Content-Encoding")+", gzip")
- } else {
- w.Header().Set("Content-Encoding", "gzip")
- }
- defer func() {
- w.Header().Del("Content-Length")
- err := gzipWriter.Close()
- if errors.Is(err, http.ErrBodyNotAllowed) {
- // This is fine, all it means is that they have it cached, and we don't need to send it
- return
- } else if err != nil {
- slog.Error("Error closing gzip writer: ", err)
- }
- }()
- gzipResponseWriter := &ResponseWriterWrapper{ResponseWriter: w, Writer: gzipWriter}
- next.ServeHTTP(gzipResponseWriter, r)
} else {
- next.ServeHTTP(w, r)
- }
- })
- }
- brotliHandler = func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if strings.Contains(r.Header.Get("Accept-Encoding"), "br") {
- brotliWriter := brotli.NewWriterV2(w, config.Global.CompressionLevel)
- if w.Header().Get("Content-Encoding") != "" {
- w.Header().Set("Content-Encoding", w.Header().Get("Content-Encoding")+", br")
- } else {
- w.Header().Set("Content-Encoding", "br")
+ // Report a successful activation
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 2,
+ SentAt: time.Now(),
+ Message: library.Database{
+ DB: pluginConn,
+ DBType: library.Sqlite,
+ },
+ }
+ }
+ } else if config.Global.Database.Type == "postgres" {
+ // Connect to the database
+ conn, err := sql.Open("postgres", config.Global.Database.ConnectionString)
+ if err != nil {
+ // Report an error
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: err,
}
- defer func() {
- w.Header().Del("Content-Length")
- err := brotliWriter.Close()
- if errors.Is(err, http.ErrBodyNotAllowed) {
- // This is fine, all it means is that they have it cached, and we don't need to send it
- return
- } else if err != nil {
- slog.Error("Error closing Brotli writer: ", err)
- }
- }()
- brotliResponseWriter := &ResponseWriterWrapper{ResponseWriter: w, Writer: brotliWriter}
- next.ServeHTTP(brotliResponseWriter, r)
} else {
- next.ServeHTTP(w, r)
- }
- })
- }
- zStandardHandler = func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") {
- zStandardWriter, err := zstd.NewWriter(w, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(config.Global.CompressionLevel)))
+ // Try to create the schema
+ _, err = conn.Exec("CREATE SCHEMA IF NOT EXISTS \"" + message.ServiceID.String() + "\"")
if err != nil {
- slog.Error("Error creating ZStandard writer: ", err)
- next.ServeHTTP(w, r)
- return
- }
- if w.Header().Get("Content-Encoding") != "" {
- w.Header().Set("Content-Encoding", w.Header().Get("Content-Encoding")+", zstd")
+ // Report an error
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: err,
+ }
} else {
- w.Header().Set("Content-Encoding", "zstd")
- }
- defer func() {
- w.Header().Del("Content-Length")
- err := zStandardWriter.Close()
+ // Create a new connection to the database
+ var connectionString string
+ if strings.Contains(config.Global.Database.ConnectionString, "?") {
+ connectionString = config.Global.Database.ConnectionString + "&search_path=\"" + message.ServiceID.String() + "\""
+ } else {
+ connectionString = config.Global.Database.ConnectionString + "?search_path=\"" + message.ServiceID.String() + "\""
+ }
+ pluginConn, err := sql.Open("postgres", connectionString)
if err != nil {
- if errors.Is(err, http.ErrBodyNotAllowed) {
- // This is fine, all it means is that they have it cached, and we don't need to send it
- return
+ // Report an error
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: err,
+ }
+ } else {
+ // Test the connection
+ err = pluginConn.Ping()
+ if err != nil {
+ // Report an error
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: err,
+ }
} else {
- slog.Error("Error closing ZStandard writer: ", err)
+ // Report a successful activation
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 2,
+ SentAt: time.Now(),
+ Message: library.Database{
+ DB: pluginConn,
+ DBType: library.Postgres,
+ },
+ }
}
}
- }()
- gzipResponseWriter := &ResponseWriterWrapper{ResponseWriter: w, Writer: zStandardWriter}
- next.ServeHTTP(gzipResponseWriter, r)
- } else {
- next.ServeHTTP(w, r)
+ }
}
- })
+ }
+ } else {
+ // Report an error
+ services[message.ServiceID].Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("database access not permitted"),
+ }
}
- validate *validator.Validate
- services = make(map[uuid.UUID]Service)
- lock sync.RWMutex
- hostRouter = hostrouter.New()
- config Config
-)
+}
+
+func tryAuthAccess(message library.InterServiceMessage) {
+ // We need to check if the service is allowed to access the Authentication service
+ serviceMetadata, ok := services[message.ServiceID]
+ if ok && serviceMetadata.ServiceMetadata.Permissions.Authenticate {
+ // Send message to Authentication service
+ service, ok := services[uuid.MustParse("00000000-0000-0000-0000-000000000004")]
+ if ok {
+ service.Inbox <- message
+ } else if !ok {
+ // Send error message
+ service, ok := services[message.ServiceID]
+ if ok {
+ service.Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("authentication service not found"),
+ }
+ } else {
+ // This should never happen
+ slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
+ os.Exit(1)
+ }
+ } else {
+ // Send error message
+ service, ok := services[message.ServiceID]
+ if ok {
+ service.Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("authentication service not yet available"),
+ }
+ } else {
+ // This should never happen
+ slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
+ os.Exit(1)
+ }
+ }
+ } else {
+ // Send error message
+ service, ok := services[message.ServiceID]
+ if ok {
+ service.Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("authentication not permitted"),
+ }
+ } else {
+ // This should never happen
+ slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
+ os.Exit(1)
+ }
+ }
+}
+
+func tryStorageAccess(message library.InterServiceMessage) {
+ // We need to check if the service is allowed to access the Blob Storage service
+ serviceMetadata, ok := services[message.ServiceID]
+ if ok && serviceMetadata.ServiceMetadata.Permissions.BlobStorage {
+ // Send message to Blob Storage service
+ service, ok := services[uuid.MustParse("00000000-0000-0000-0000-000000000003")]
+ if ok {
+ service.Inbox <- message
+ } else if !ok {
+ // Send error message
+ service, ok := services[message.ServiceID]
+ if ok {
+ service.Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("blob storage service not found"),
+ }
+ } else {
+ // This should never happen
+ slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
+ os.Exit(1)
+ }
+ } else {
+ // Send error message
+ service, ok := services[message.ServiceID]
+ if ok {
+ service.Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("blob storage is not yet available"),
+ }
+ } else {
+ // This should never happen
+ slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
+ os.Exit(1)
+ }
+ }
+ } else {
+ // Send error message
+ service, ok := services[message.ServiceID]
+ if ok {
+ service.Inbox <- library.InterServiceMessage{
+ ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
+ ForServiceID: message.ServiceID,
+ MessageType: 1,
+ SentAt: time.Now(),
+ Message: errors.New("blob storage is not permitted"),
+ }
+ } else {
+ // This should never happen
+ slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
+ os.Exit(1)
+ }
+ }
+}
+
+func tryLogger(message library.InterServiceMessage) {
+ // Logger service
+ service, ok := services[message.ServiceID]
+ if ok {
+ switch message.MessageType {
+ case 0:
+ // Log message
+ slog.Info(service.ServiceMetadata.Name + " says: " + message.Message.(string))
+ case 1:
+ // Warn message
+ slog.Warn(service.ServiceMetadata.Name + " warns: " + message.Message.(string))
+ case 2:
+ // Error message
+ slog.Error(service.ServiceMetadata.Name + " complains: " + message.Message.(string))
+ case 3:
+ // Fatal message
+ slog.Error(service.ServiceMetadata.Name + "'s dying wish: " + message.Message.(string))
+ os.Exit(1)
+ }
+ }
+}
func processInterServiceMessage(channel chan library.InterServiceMessage) {
for {
@@ -210,253 +780,14 @@ func processInterServiceMessage(channel chan library.InterServiceMessage) {
Message: true,
}
case 1:
- // Service database initialization message
- // Check if the service has the necessary permissions
- if services[message.ServiceID].ServiceMetadata.Permissions.Database {
- // Check if we are using sqlite or postgres
- if config.Database.DatabaseType == "sqlite" {
- // Open the database and return the connection
- pluginConn, err := sql.Open("sqlite3", filepath.Join(config.Database.DatabasePath, message.ServiceID.String()+".db"))
- if err != nil {
- // Report an error
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: err,
- }
- } else {
- // Report a successful activation
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 2,
- SentAt: time.Now(),
- Message: library.Database{
- DB: pluginConn,
- DBType: library.Sqlite,
- },
- }
- }
- } else if config.Database.DatabaseType == "postgres" {
- // Connect to the database
- conn, err := sql.Open("postgres", config.Database.ConnectionString)
- if err != nil {
- // Report an error
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: err,
- }
- } else {
- // Try to create the schema
- _, err = conn.Exec("CREATE SCHEMA IF NOT EXISTS \"" + message.ServiceID.String() + "\"")
- if err != nil {
- // Report an error
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: err,
- }
- } else {
- // Create a new connection to the database
- var connectionString string
- if strings.Contains(config.Database.ConnectionString, "?") {
- connectionString = config.Database.ConnectionString + "&search_path=\"" + message.ServiceID.String() + "\""
- } else {
- connectionString = config.Database.ConnectionString + "?search_path=\"" + message.ServiceID.String() + "\""
- }
- pluginConn, err := sql.Open("postgres", connectionString)
- if err != nil {
- // Report an error
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: err,
- }
- } else {
- // Test the connection
- err = pluginConn.Ping()
- if err != nil {
- // Report an error
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: err,
- }
- } else {
- // Report a successful activation
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 2,
- SentAt: time.Now(),
- Message: library.Database{
- DB: pluginConn,
- DBType: library.Postgres,
- },
- }
- }
- }
- }
- }
- }
- } else {
- // Report an error
- services[message.ServiceID].Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("database access not permitted"),
- }
- }
+ svInit(message)
}
} else if message.ForServiceID == uuid.MustParse("00000000-0000-0000-0000-000000000002") {
- // Logger service
- service, ok := services[message.ServiceID]
- if ok {
- switch message.MessageType {
- case 0:
- // Log message
- slog.Info(service.ServiceMetadata.Name + " says: " + message.Message.(string))
- case 1:
- // Warn message
- slog.Warn(service.ServiceMetadata.Name + " warns: " + message.Message.(string))
- case 2:
- // Error message
- slog.Error(service.ServiceMetadata.Name + " complains: " + message.Message.(string))
- case 3:
- // Fatal message
- slog.Error(service.ServiceMetadata.Name + "'s dying wish: " + message.Message.(string))
- os.Exit(1)
- }
- }
+ tryLogger(message)
} else if message.ForServiceID == uuid.MustParse("00000000-0000-0000-0000-000000000003") {
- // We need to check if the service is allowed to access the Blob Storage service
- serviceMetadata, ok := services[message.ServiceID]
- if ok && serviceMetadata.ServiceMetadata.Permissions.BlobStorage {
- // Send message to Blob Storage service
- service, ok := services[uuid.MustParse("00000000-0000-0000-0000-000000000003")]
- if ok {
- service.Inbox <- message
- } else if !ok {
- // Send error message
- service, ok := services[message.ServiceID]
- if ok {
- service.Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("blob storage service not found"),
- }
- } else {
- // This should never happen
- slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
- os.Exit(1)
- }
- } else {
- // Send error message
- service, ok := services[message.ServiceID]
- if ok {
- service.Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("blob storage is not yet available"),
- }
- } else {
- // This should never happen
- slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
- os.Exit(1)
- }
- }
- } else {
- // Send error message
- service, ok := services[message.ServiceID]
- if ok {
- service.Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("blob storage is not permitted"),
- }
- } else {
- // This should never happen
- slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
- os.Exit(1)
- }
- }
+ tryStorageAccess(message)
} else if message.ForServiceID == uuid.MustParse("00000000-0000-0000-0000-000000000004") {
- // We need to check if the service is allowed to access the Authentication service
- serviceMetadata, ok := services[message.ServiceID]
- if ok && serviceMetadata.ServiceMetadata.Permissions.Authenticate {
- // Send message to Authentication service
- service, ok := services[uuid.MustParse("00000000-0000-0000-0000-000000000004")]
- if ok {
- service.Inbox <- message
- } else if !ok {
- // Send error message
- service, ok := services[message.ServiceID]
- if ok {
- service.Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("authentication service not found"),
- }
- } else {
- // This should never happen
- slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
- os.Exit(1)
- }
- } else {
- // Send error message
- service, ok := services[message.ServiceID]
- if ok {
- service.Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("authentication service not yet available"),
- }
- } else {
- // This should never happen
- slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
- os.Exit(1)
- }
- }
- } else {
- // Send error message
- service, ok := services[message.ServiceID]
- if ok {
- service.Inbox <- library.InterServiceMessage{
- ServiceID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
- ForServiceID: message.ServiceID,
- MessageType: 1,
- SentAt: time.Now(),
- Message: errors.New("authentication not permitted"),
- }
- } else {
- // This should never happen
- slog.Error("Bit flip error: Impossible service ID. Move away from radiation or use ECC memory.")
- os.Exit(1)
- }
- }
+ tryAuthAccess(message)
} else {
serviceMetadata, ok := services[message.ServiceID]
if ok && serviceMetadata.ServiceMetadata.Permissions.InterServiceCommunication {
@@ -516,54 +847,45 @@ func parseConfig(path string) Config {
// Check if it is a directory
return fileInfo.IsDir()
})
+
if err != nil {
- slog.Error("Error registering custom validator: ", err)
+ slog.Error("Error registering custom validator: " + err.Error())
os.Exit(1)
}
// Parse the configuration file
- configFile, err := os.Open(path)
+ configFile, err := os.ReadFile(path)
if err != nil {
- slog.Error("Error reading configuration file: ", err)
+ slog.Error("Error reading configuration file: " + err.Error())
os.Exit(1)
}
// Parse the configuration file
var config Config
- decoder := json.NewDecoder(configFile)
+ decoder := json.NewDecoder(strings.NewReader(string(regexp.MustCompile(`(?m)^\s*//.*`).ReplaceAll(configFile, []byte("")))))
decoder.UseNumber()
err = decoder.Decode(&config)
if err != nil {
- slog.Error("Error parsing configuration file: ", err)
+ slog.Error("Error parsing configuration file: " + err.Error())
os.Exit(1)
}
- // Set the compression level
- if config.Global.Compression != "" {
- compressionLevelI64, err := config.Global.CompressionLevelJN.Int64()
- if err != nil {
- slog.Error("Error parsing compression level: ", err)
- os.Exit(1)
- }
- config.Global.CompressionLevel = int(compressionLevelI64)
- }
-
// Validate the configuration
err = validate.Struct(config)
if err != nil {
- slog.Error("Invalid configuration: ", err)
+ slog.Error("Invalid configuration: " + err.Error())
os.Exit(1)
}
// Check if we are logging to a file
- if config.Logging != (Config{}.Logging) && config.Logging.Enabled {
+ if config.Global.Logging != (Config{}.Global.Logging) && config.Global.Logging.Enabled {
// Check if the log file is set
- logFilePath := config.Logging.File
+ logFilePath := config.Global.Logging.File
// Set the log file
logFile, err := os.OpenFile(logFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
- slog.Error("Error opening log file: ", err)
+ slog.Error("Error opening log file: " + err.Error())
os.Exit(1)
}
@@ -573,16 +895,155 @@ func parseConfig(path string) Config {
return config
}
+func iterateThroughSubdomains() {
+ for _, route := range config.Routes {
+ var subdomainRouter *chi.Mux
+ // Create the subdomain router
+ if route.Compression.Level != 0 {
+ compression[route.Subdomain] = CompressionSettings{
+ Level: int(route.Compression.Level),
+ Algorithm: route.Compression.Algorithm,
+ }
+ } else {
+ subdomainRouter = chi.NewRouter()
+ subdomainRouter.NotFound(func(w http.ResponseWriter, r *http.Request) {
+ serverError(w, 404)
+ })
+ }
+
+ subdomains[route.Subdomain] = subdomainRouter
+ subdomains[route.Subdomain].Use(logger)
+ subdomains[route.Subdomain].Use(serverChanger)
+
+ // Check the services
+ if route.Services != nil {
+ // Iterate through the services
+ for _, service := range route.Services {
+ _, ok := serviceSubdomains[strings.ToLower(service)]
+ if !ok {
+ serviceSubdomains[strings.ToLower(service)] = route.Subdomain
+ } else {
+ slog.Error("Service " + service + " has multiple subdomains")
+ os.Exit(1)
+ }
+ }
+ }
+
+ // Iterate through the paths
+ for _, path := range route.Paths {
+ if path.Static.Root != "" {
+ // Serve the static directory
+ subdomainRouter.Handle(path.Path, http.StripPrefix(strings.TrimSuffix(path.Path, "*"), newFileServer(path.Static.Root, path.Static.DirectoryListing)))
+ slog.Info("Serving static directory " + path.Static.Root + " on subdomain " + route.Subdomain + " with pattern " + path.Path)
+ } else if path.Proxy.URL != "" {
+ // Parse the URL
+ proxyUrl, err := url.Parse(path.Proxy.URL)
+ if err != nil {
+ slog.Error("Error parsing URL: " + err.Error())
+ os.Exit(1)
+ }
+ // Create the proxy
+ if path.Proxy.StripPrefix {
+ subdomainRouter.Handle(path.Path, http.StripPrefix(strings.TrimSuffix(path.Path, "*"), httputil.NewSingleHostReverseProxy(proxyUrl)))
+ } else {
+ subdomainRouter.Handle(path.Path, httputil.NewSingleHostReverseProxy(proxyUrl))
+ }
+ }
+ }
+
+ // Add the TLS certificate
+ if route.HTTPS.CertificatePath != "" && route.HTTPS.KeyPath != "" {
+ certificate, err := loadTLSCertificate(route.HTTPS.CertificatePath, route.HTTPS.KeyPath)
+ if err != nil {
+ slog.Error("Error loading TLS certificate: " + err.Error())
+ os.Exit(1)
+ }
+ certificates[route.Subdomain] = certificate
+ }
+ }
+}
+
+func initializeService(keys []time.Time, plugins map[time.Time]string, globalOutbox chan library.InterServiceMessage) {
+ for _, k := range keys {
+ // Get the plugin path
+ pluginPath := plugins[k]
+
+ // Load the plugin
+ servicePlugin, err := plugin.Open(pluginPath)
+ if err != nil {
+ slog.Error("Could not load service: " + err.Error())
+ os.Exit(1)
+ }
+
+ // Load the service information
+ serviceInformationSymbol, err := servicePlugin.Lookup("ServiceInformation")
+ if err != nil {
+ slog.Error("Service lacks necessary information: " + err.Error())
+ os.Exit(1)
+ }
+
+ serviceInformation := *serviceInformationSymbol.(*library.Service)
+
+ // Load the main function
+ main, err := servicePlugin.Lookup("Main")
+ if err != nil {
+ slog.Error("Service lacks necessary main function: " + err.Error())
+ os.Exit(1)
+ }
+
+ // Initialize the service
+ var inbox = make(chan library.InterServiceMessage)
+ lock.Lock()
+ services[serviceInformation.ServiceID] = Service{
+ ServiceID: serviceInformation.ServiceID,
+ Inbox: inbox,
+ ServiceMetadata: serviceInformation,
+ }
+ lock.Unlock()
+
+ slog.Info("Activating service " + serviceInformation.Name + " with ID " + serviceInformation.ServiceID.String())
+
+ serviceInitializationInformation := library.ServiceInitializationInformation{
+ Domain: serviceInformation.Name,
+ Configuration: config.Services[strings.ToLower(serviceInformation.Name)].(map[string]interface{}),
+ Outbox: globalOutbox,
+ Inbox: inbox,
+ }
+
+ // Make finalRouter a subdomain router if necessary
+ serviceSubdomain, ok := serviceSubdomains[strings.ToLower(serviceInformation.Name)]
+ if ok {
+ serviceInitializationInformation.Router = subdomains[serviceSubdomain]
+ } else {
+ if serviceInformation.ServiceID != uuid.MustParse("00000000-0000-0000-0000-000000000003") {
+ slog.Warn("Service " + serviceInformation.Name + " does not have a subdomain, it will not be served")
+ // Give it a blank router so it doesn't try to nil pointer dereference
+ serviceInitializationInformation.Router = chi.NewRouter()
+ }
+ }
+
+ // Check if they want a resource directory
+ if serviceInformation.Permissions.Resources {
+ serviceInitializationInformation.ResourceDir = os.DirFS(filepath.Join(config.Global.ResourceDirectory, serviceInformation.ServiceID.String()))
+ }
+
+ main.(func(library.ServiceInitializationInformation))(serviceInitializationInformation)
+
+ // Log the service activation
+ slog.Info("Service " + serviceInformation.Name + " activated with ID " + serviceInformation.ServiceID.String())
+ }
+}
+
func main() {
// Parse the configuration file
if len(os.Args) < 2 {
- info, err := os.Stat("config.json")
+ info, err := os.Stat("config.conf")
if err != nil {
if errors.Is(err, os.ErrNotExist) {
slog.Error("No configuration file provided")
os.Exit(1)
} else {
- slog.Error("Error reading configuration file: ", err)
+ slog.Error("Error reading configuration file: " + err.Error())
os.Exit(1)
}
}
@@ -592,50 +1053,22 @@ func main() {
os.Exit(1)
}
- config = parseConfig("config.json")
+ config = parseConfig("config.conf")
} else {
config = parseConfig(os.Args[1])
}
// If we are using sqlite, create the database directory if it does not exist
- if config.Database.DatabaseType == "sqlite" {
- err := os.MkdirAll(config.Database.DatabasePath, 0755)
+ if config.Global.Database.Type == "sqlite" {
+ err := os.MkdirAll(config.Global.Database.Path, 0755)
if err != nil {
- slog.Error("Error creating database directory: ", err)
+ slog.Error("Error creating database directory: " + err.Error())
os.Exit(1)
}
}
- // Create the router
- router := chi.NewRouter()
- router.Use(logger)
- router.Use(serverChanger)
-
- // Iterate through the service configurations and create routers for each unique subdomain
- subdomains := make(map[string]*chi.Mux)
- for _, service := range config.Services {
- if service.(map[string]interface{})["subdomain"] != nil {
- subdomain := service.(map[string]interface{})["subdomain"].(string)
- if subdomains[subdomain] == nil {
- subdomains[subdomain] = chi.NewRouter()
- slog.Info("Mapping subdomain " + subdomain)
- hostRouter.Map(subdomain, subdomains[subdomain])
- }
- }
- }
-
- // Iterate through the static configurations and create routers for each unique subdomain
- for _, static := range config.Static {
- // Check if it wants a subdomain
- if static.Subdomain != "" {
- // Check if the subdomain exists
- if subdomains[static.Subdomain] == nil {
- subdomains[static.Subdomain] = chi.NewRouter()
- slog.Info("Mapping subdomain " + static.Subdomain)
- hostRouter.Map(static.Subdomain, subdomains[static.Subdomain])
- }
- }
- }
+ // Iterate through the subdomains and create the routers as well as the compression levels and service maps
+ iterateThroughSubdomains()
var globalOutbox = make(chan library.InterServiceMessage)
@@ -668,8 +1101,9 @@ func main() {
return nil
})
+
if err != nil {
- slog.Error("Error walking the services directory: ", err)
+ slog.Error("Error walking the services directory: " + err.Error())
os.Exit(1)
}
@@ -683,116 +1117,28 @@ func main() {
return keys[i].Before(keys[j])
})
- for _, k := range keys {
- // Get the plugin path
- pluginPath := plugins[k]
-
- // Load the plugin
- servicePlugin, err := plugin.Open(pluginPath)
- if err != nil {
- slog.Error("Could not load service: ", err)
- os.Exit(1)
- }
-
- // Load the service information
- serviceInformationSymbol, err := servicePlugin.Lookup("ServiceInformation")
- if err != nil {
- slog.Error("Service lacks necessary information: ", err)
- os.Exit(1)
- }
-
- serviceInformation := *serviceInformationSymbol.(*library.Service)
-
- // Load the main function
- main, err := servicePlugin.Lookup("Main")
- if err != nil {
- slog.Error("Service lacks necessary main function: ", err)
- os.Exit(1)
- }
-
- // Initialize the service
- var inbox = make(chan library.InterServiceMessage)
- lock.Lock()
- services[serviceInformation.ServiceID] = Service{
- ServiceID: serviceInformation.ServiceID,
- Inbox: inbox,
- ServiceMetadata: serviceInformation,
- }
- lock.Unlock()
-
- slog.Info("Activating service " + serviceInformation.Name + " with ID " + serviceInformation.ServiceID.String())
-
- // Make finalRouter a subdomain router if necessary
- var finalRouter *chi.Mux
- if config.Services[strings.ToLower(serviceInformation.Name)].(map[string]interface{})["subdomain"] != nil {
- finalRouter = subdomains[config.Services[strings.ToLower(serviceInformation.Name)].(map[string]interface{})["subdomain"].(string)]
- } else {
- finalRouter = router
- }
-
- // Check if they want a resource directory
- var resourceDir fs.FS = nil
- if serviceInformation.Permissions.Resources {
- resourceDir = os.DirFS(filepath.Join(config.Global.ResourceDirectory, serviceInformation.ServiceID.String()))
- }
-
- main.(func(library.ServiceInitializationInformation))(library.ServiceInitializationInformation{
- Domain: serviceInformation.Name,
- Configuration: config.Services[strings.ToLower(serviceInformation.Name)].(map[string]interface{}),
- Outbox: globalOutbox,
- Inbox: inbox,
- ResourceDir: resourceDir,
- Router: finalRouter,
- })
-
- // Log the service activation
- slog.Info("Service " + serviceInformation.Name + " activated with ID " + serviceInformation.ServiceID.String())
- }
-
- // Mount the host router
- router.Mount("/", hostRouter)
- slog.Info("All subdomains mapped")
-
- // Initialize the static file servers
- for _, static := range config.Static {
- if static.Subdomain != "" {
- // Serve the static directory
- if static.Pattern != "" {
- subdomains[static.Subdomain].Handle(static.Pattern, http.FileServerFS(os.DirFS(static.Directory)))
- slog.Info("Serving static directory " + static.Directory + " on subdomain " + static.Subdomain + " with pattern " + static.Pattern)
- } else {
- subdomains[static.Subdomain].Handle("/*", http.FileServerFS(os.DirFS(static.Directory)))
- slog.Info("Serving static directory " + static.Directory + " on subdomain " + static.Subdomain)
- }
- } else {
- // Serve the static directory
- if static.Pattern != "" {
- router.Handle(static.Pattern, http.FileServerFS(os.DirFS(static.Directory)))
- slog.Info("Serving static directory " + static.Directory + " with pattern " + static.Pattern)
- } else {
- router.Handle("/*", http.FileServerFS(os.DirFS(static.Directory)))
- slog.Info("Serving static directory " + static.Directory)
- }
- }
- }
+ initializeService(keys, plugins, globalOutbox)
// Start the server
- slog.Info("Starting server on " + config.Global.IP + ":" + config.Global.Port)
- switch config.Global.Compression {
- case "":
- err = http.ListenAndServe(config.Global.IP+":"+config.Global.Port, router)
- case "gzip":
- slog.Info("GZip compression enabled")
- err = http.ListenAndServe(config.Global.IP+":"+config.Global.Port, gzipHandler(router))
- case "brotli":
- slog.Info("Brotli compression enabled")
- err = http.ListenAndServe(config.Global.IP+":"+config.Global.Port, brotliHandler(router))
- case "zstd":
- slog.Info("ZStandard compression enabled")
- err = http.ListenAndServe(config.Global.IP+":"+config.Global.Port, zStandardHandler(router))
- }
- if err != nil {
- slog.Error("Error starting server: ", err)
+ slog.Info("Starting server on " + config.Global.IP + " with ports " + config.Global.HTTPPort + " and " + config.Global.HTTPSPort)
+ go func() {
+ // Create the TLS server
+ server := &http.Server{
+ Handler: http.HandlerFunc(hostRouter),
+ Addr: config.Global.IP + ":" + config.Global.HTTPSPort,
+ TLSConfig: &tls.Config{
+ GetCertificate: getTLSCertificate,
+ },
+ }
+
+ // Start the TLS server
+ err = server.ListenAndServeTLS("", "")
+ slog.Error("Error starting HTTPS server: " + err.Error())
os.Exit(1)
- }
+ }()
+
+ // Start the HTTP server
+ err = http.ListenAndServe(config.Global.IP+":"+config.Global.HTTPPort, http.HandlerFunc(hostRouter))
+ slog.Error("Error starting server: " + err.Error())
+ os.Exit(1)
}