diff --git a/.deploy/db/init-user-db.sh b/.deploy/db/init-user-db.sh new file mode 100644 index 000000000..28649b613 --- /dev/null +++ b/.deploy/db/init-user-db.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +psql -v ON_ERROR_STOP=1 --username "$DB_USER" --dbname "$DB_NAME" <<-EOSQL + CREATE DATABASE gauzy; + GRANT ALL PRIVILEGES ON DATABASE gauzy TO postgres; +EOSQL diff --git a/.deploy/jitsu/configurator/data/logs/.gitkeep b/.deploy/jitsu/configurator/data/logs/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/.deploy/jitsu/server/data/logs/.gitkeep b/.deploy/jitsu/server/data/logs/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/.deploy/redis/jitsu_users_recognition/data/.gitkeep b/.deploy/redis/jitsu_users_recognition/data/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/.deploy/redis/jitsu_users_recognition/redis.conf b/.deploy/redis/jitsu_users_recognition/redis.conf new file mode 100644 index 000000000..e3a8b2607 --- /dev/null +++ b/.deploy/redis/jitsu_users_recognition/redis.conf @@ -0,0 +1,2054 @@ + +# See https://github.com/jitsucom/jitsu/blob/master/compose-data/redis_users_recognition/redis.conf +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interfece. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#bind 127.0.0.1 -::1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6380 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +# supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save +# +# Redis will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 key changed +# * After 300 seconds (5 minutes) if at least 100 keys changed +# * After 60 seconds if at least 10000 keys changed +# +# You can set these explicitly by uncommenting the three following lines. +# +# save 3600 1 +# save 300 100 +# save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitation checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitation +# yes - Always perform full sanitation +# clients - Perform full sanitation only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if you know what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitation is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatable with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +# requirepass foobared + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default +# defaults to the 'allchannels' permission. +# +# Future compatibility note: it is very likely that in a future version of Redis +# the directive's default of 'allchannels' will be changed to 'resetchannels' in +# order to provide better out-of-the-box Pub/Sub security. Therefore, it is +# recommended that you explicitly define Pub/Sub permissions for all users +# rather then rely on implicit default values. Once you've set explicit +# Pub/Sub for all existing users, you should uncomment the following line. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +maxmemory 5gb + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +maxmemory-policy allkeys-lru + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports three options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if cluster-tls is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 2000 +list-compress-depth 1 + + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/.deploy/redis/main/data/.gitkeep b/.deploy/redis/main/data/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/.deploy/ssh/with-cloudflare/demo/docker-compose.api.demo.cloudflare.pre.yml b/.deploy/ssh/with-cloudflare/demo/docker-compose.api.demo.cloudflare.pre.yml new file mode 100644 index 000000000..6d37ca08b --- /dev/null +++ b/.deploy/ssh/with-cloudflare/demo/docker-compose.api.demo.cloudflare.pre.yml @@ -0,0 +1,22 @@ +version: '3.8' + +services: + nginx: + image: nginx:latest + volumes: + - ./nginx.demo.pre.cloudflare.conf:/etc/nginx/nginx.conf:ro + - ./ingress.api.crt:/etc/nginx/ssl/fullchain.pem + - ./ingress.api.key:/etc/nginx/ssl/privkey.pem + restart: unless-stopped + ports: + - "80:80" + - "443:443" + networks: + - with-cloudflare_overlay +volumes: + certificates: {} + +networks: + with-cloudflare_overlay: + external: true + diff --git a/.deploy/ssh/with-cloudflare/demo/docker-compose.api.demo.template.yml b/.deploy/ssh/with-cloudflare/demo/docker-compose.api.demo.template.yml new file mode 100644 index 000000000..7e220108f --- /dev/null +++ b/.deploy/ssh/with-cloudflare/demo/docker-compose.api.demo.template.yml @@ -0,0 +1,127 @@ +version: '3.8' + +services: + api: + image: ghcr.io/ever-co/gauzy-api-demo:latest + deploy: + mode: replicated + replicas: 2 + environment: + API_HOST: '0.0.0.0' + DEMO: '${DEMO:-true}' + NODE_ENV: '${NODE_ENV:-development}' + ADMIN_PASSWORD_RESET: '${ADMIN_PASSWORD_RESET:-}' + API_BASE_URL: '${API_BASE_URL:-http://localhost:3000}' + CLIENT_BASE_URL: '${CLIENT_BASE_URL:-http://localhost:4200}' + DB_TYPE: '${DB_TYPE:-better-sqlite3}' + DB_URI: '${DB_URI:-}' + DB_HOST: '${DB_HOST:-}' + DB_USER: '${DB_USER:-}' + DB_PASS: '${DB_PASS:-}' + DB_NAME: '${DB_NAME:-}' + DB_PORT: '${DB_PORT:-}' + DB_CA_CERT: '${DB_CA_CERT:-}' + DB_SSL_MODE: '${DB_SSL_MODE:-}' + DB_POOL_SIZE: '${DB_POOL_SIZE:-}' + DB_POOL_SIZE_KNEX: '${DB_POOL_SIZE_KNEX:-}' + REDIS_ENABLED: '${REDIS_ENABLED:-}' + REDIS_URL: '${REDIS_URL:-}' + CLOUD_PROVIDER: 'DO' + SENTRY_DSN: '${SENTRY_DSN:-}' + SENTRY_TRACES_SAMPLE_RATE: '${SENTRY_TRACES_SAMPLE_RATE:-}' + SENTRY_PROFILE_SAMPLE_RATE: '${SENTRY_PROFILE_SAMPLE_RATE:-}' + SENTRY_HTTP_TRACING_ENABLED: '${SENTRY_HTTP_TRACING_ENABLED:-}' + SENTRY_POSTGRES_TRACKING_ENABLED: '${SENTRY_POSTGRES_TRACKING_ENABLED:-}' + SENTRY_PROFILING_ENABLED: '${SENTRY_PROFILING_ENABLED:-}' + AWS_ACCESS_KEY_ID: '${AWS_ACCESS_KEY_ID:-}' + AWS_SECRET_ACCESS_KEY: '${AWS_SECRET_ACCESS_KEY:-}' + AWS_REGION: '${AWS_REGION:-}' + AWS_S3_BUCKET: '${AWS_S3_BUCKET:-}' + WASABI_ACCESS_KEY_ID: '${WASABI_ACCESS_KEY_ID:-}' + WASABI_SECRET_ACCESS_KEY: '${WASABI_SECRET_ACCESS_KEY:-}' + WASABI_REGION: '${WASABI_REGION:-}' + WASABI_SERVICE_URL: '${WASABI_SERVICE_URL:-}' + WASABI_S3_BUCKET: '${WASABI_S3_BUCKET:-}' + WASABI_S3_FORCE_PATH_STYLE: '${WASABI_S3_FORCE_PATH_STYLE:-}' + DIGITALOCEAN_ACCESS_KEY_ID: '${DIGITALOCEAN_ACCESS_KEY_ID:-}' + DIGITALOCEAN_SECRET_ACCESS_KEY: '${DIGITALOCEAN_SECRET_ACCESS_KEY:-}' + DIGITALOCEAN_REGION: '${DIGITALOCEAN_REGION:-}' + DIGITALOCEAN_SERVICE_URL: '${DIGITALOCEAN_SERVICE_URL:-}' + DIGITALOCEAN_CDN_URL: '${DIGITALOCEAN_CDN_URL:-}' + DIGITALOCEAN_S3_BUCKET: '${DIGITALOCEAN_S3_BUCKET:-}' + DIGITALOCEAN_S3_FORCE_PATH_STYLE: '${DIGITALOCEAN_S3_FORCE_PATH_STYLE:-}' + EXPRESS_SESSION_SECRET: '${EXPRESS_SESSION_SECRET:-}' + JWT_SECRET: '${JWT_SECRET:-}' + JWT_REFRESH_TOKEN_SECRET: '${JWT_REFRESH_TOKEN_SECRET:-}' + JWT_REFRESH_TOKEN_EXPIRATION_TIME: '${JWT_REFRESH_TOKEN_EXPIRATION_TIME:-}' + CLOUDINARY_API_KEY: '${CLOUDINARY_API_KEY:-}' + CLOUDINARY_API_SECRET: '${CLOUDINARY_API_SECRET:-}' + CLOUDINARY_CLOUD_NAME: '${CLOUDINARY_CLOUD_NAME:-}' + MAIL_FROM_ADDRESS: '${MAIL_FROM_ADDRESS:-}' + MAIL_HOST: '${MAIL_HOST:-}' + MAIL_PORT: '${MAIL_PORT:-}' + MAIL_USERNAME: '${MAIL_USERNAME:-}' + MAIL_PASSWORD: '${MAIL_PASSWORD:-}' + ALLOW_SUPER_ADMIN_ROLE: '${ALLOW_SUPER_ADMIN_ROLE:-}' + GOOGLE_CLIENT_ID: '${GOOGLE_CLIENT_ID:-}' + GOOGLE_CLIENT_SECRET: '${GOOGLE_CLIENT_SECRET:-}' + GOOGLE_CALLBACK_URL: '${GOOGLE_CALLBACK_URL:-}' + FACEBOOK_CLIENT_ID: '${FACEBOOK_CLIENT_ID:-}' + FACEBOOK_CLIENT_SECRET: '${FACEBOOK_CLIENT_SECRET:-}' + FACEBOOK_GRAPH_VERSION: '${FACEBOOK_GRAPH_VERSION:-}' + FACEBOOK_CALLBACK_URL: '${FACEBOOK_CALLBACK_URL:-}' + INTEGRATED_USER_DEFAULT_PASS: '${INTEGRATED_USER_DEFAULT_PASS:-}' + UPWORK_REDIRECT_URL: '${UPWORK_REDIRECT_URL:-}' + FILE_PROVIDER: '${FILE_PROVIDER:-}' + GAUZY_AI_GRAPHQL_ENDPOINT: '${GAUZY_AI_GRAPHQL_ENDPOINT:-}' + GAUZY_AI_REST_ENDPOINT: '${GAUZY_AI_REST_ENDPOINT:-}' + UNLEASH_APP_NAME: '${UNLEASH_APP_NAME:-}' + UNLEASH_API_URL: '${UNLEASH_API_URL:-}' + UNLEASH_INSTANCE_ID: '${UNLEASH_INSTANCE_ID:-}' + UNLEASH_REFRESH_INTERVAL: '${UNLEASH_REFRESH_INTERVAL:-}' + UNLEASH_METRICS_INTERVAL: '${UNLEASH_METRICS_INTERVAL:-}' + UNLEASH_API_KEY: '${UNLEASH_API_KEY:-}' + JITSU_SERVER_URL: '${JITSU_SERVER_URL:-}' + JITSU_SERVER_WRITE_KEY: '${JITSU_SERVER_WRITE_KEY:-}' + OTEL_ENABLED: '${OTEL_ENABLED:-}' + OTEL_PROVIDER: '${OTEL_PROVIDER:-}' + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT:-}' + OTEL_EXPORTER_OTLP_HEADERS: '${OTEL_EXPORTER_OTLP_HEADERS:-}' + GAUZY_GITHUB_CLIENT_ID: '${GAUZY_GITHUB_CLIENT_ID:-}' + GAUZY_GITHUB_CLIENT_SECRET: '${GAUZY_GITHUB_CLIENT_SECRET:-}' + GAUZY_GITHUB_APP_PRIVATE_KEY: '${GAUZY_GITHUB_APP_PRIVATE_KEY:-}' + GAUZY_GITHUB_WEBHOOK_URL: '${GAUZY_GITHUB_WEBHOOK_URL:-}' + GAUZY_GITHUB_WEBHOOK_SECRET: '${GAUZY_GITHUB_WEBHOOK_SECRET:-}' + GAUZY_GITHUB_APP_NAME: '${GAUZY_GITHUB_APP_NAME:-}' + GAUZY_GITHUB_REDIRECT_URL: '${GAUZY_GITHUB_REDIRECT_URL:-}' + GAUZY_GITHUB_POST_INSTALL_URL: '${GAUZY_GITHUB_POST_INSTALL_URL:-}' + GAUZY_GITHUB_APP_ID: '${GAUZY_GITHUB_APP_ID:-}' + GAUZY_GITHUB_OAUTH_CLIENT_ID: '${GAUZY_GITHUB_OAUTH_CLIENT_ID:-}' + GAUZY_GITHUB_OAUTH_CLIENT_SECRET: '${GAUZY_GITHUB_OAUTH_CLIENT_SECRET:-}' + GAUZY_GITHUB_OAUTH_CALLBACK_URL: '${GAUZY_GITHUB_OAUTH_CALLBACK_URL:-}' + JITSU_BROWSER_URL: '${JITSU_BROWSER_URL:-}' + JITSU_BROWSER_WRITE_KEY: '${JITSU_BROWSER_WRITE_KEY:-}' + MAGIC_CODE_EXPIRATION_TIME: '${MAGIC_CODE_EXPIRATION_TIME:-}' + APP_NAME: '${APP_NAME:-}' + APP_LOGO: '${APP_LOGO:-}' + APP_SIGNATURE: '${APP_SIGNATURE:-}' + APP_LINK: '${APP_LINK:-}' + APP_EMAIL_CONFIRMATION_URL: '${APP_EMAIL_CONFIRMATION_URL:-}' + APP_MAGIC_SIGN_URL: '${APP_MAGIC_SIGN_URL:-}' + COMPANY_LINK: '${COMPANY_LINK:-}' + COMPANY_NAME: '${COMPANY_NAME:-}' + + entrypoint: './entrypoint.prod.sh' + command: ['node', 'main.js'] + restart: on-failure + ports: + - '3000' + networks: + - overlay + +volumes: + certificates: {} + +networks: + overlay: + driver: bridge diff --git a/.deploy/ssh/with-cloudflare/demo/nginx.demo.pre.cloudflare.conf b/.deploy/ssh/with-cloudflare/demo/nginx.demo.pre.cloudflare.conf new file mode 100644 index 000000000..d27b009e7 --- /dev/null +++ b/.deploy/ssh/with-cloudflare/demo/nginx.demo.pre.cloudflare.conf @@ -0,0 +1,25 @@ +user nginx; +events { + worker_connections 1024; +} +http { + server { + listen 80; + server_name apidemodt.gauzy.co; + + location / { + return 301 https://$host$request_uri; + } + } + server { + listen 443 ssl; + server_name apidemodt.gauzy.co; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + location / { + proxy_pass http://api:3000; + } + } +} diff --git a/.deploy/ssh/with-cloudflare/prod/docker-compose.api.prod.cloudflare.pre.yml b/.deploy/ssh/with-cloudflare/prod/docker-compose.api.prod.cloudflare.pre.yml new file mode 100644 index 000000000..36553610a --- /dev/null +++ b/.deploy/ssh/with-cloudflare/prod/docker-compose.api.prod.cloudflare.pre.yml @@ -0,0 +1,22 @@ +version: '3.8' + +services: + nginx: + image: nginx:latest + volumes: + - ./nginx.prod.pre.cloudflare.conf:/etc/nginx/nginx.conf:ro + - ./ingress.api.crt:/etc/nginx/ssl/fullchain.pem + - ./ingress.api.key:/etc/nginx/ssl/privkey.pem + restart: unless-stopped + ports: + - "80:80" + - "443:443" + networks: + - with-cloudflare_overlay +volumes: + certificates: {} + +networks: + with-cloudflare_overlay: + external: true + diff --git a/.deploy/ssh/with-cloudflare/prod/docker-compose.api.prod.template.yml b/.deploy/ssh/with-cloudflare/prod/docker-compose.api.prod.template.yml new file mode 100644 index 000000000..32b1fa202 --- /dev/null +++ b/.deploy/ssh/with-cloudflare/prod/docker-compose.api.prod.template.yml @@ -0,0 +1,127 @@ +version: '3.8' + +services: + api: + image: ghcr.io/ever-co/gauzy-api:latest + deploy: + mode: replicated + replicas: 2 + environment: + API_HOST: '0.0.0.0' + DEMO: '${DEMO}' + NODE_ENV: '${NODE_ENV}' + ADMIN_PASSWORD_RESET: '${ADMIN_PASSWORD_RESET}' + API_BASE_URL: '${API_BASE_URL}' + CLIENT_BASE_URL: '${CLIENT_BASE_URL}' + DB_TYPE: '${DB_TYPE}' + DB_URI: '${DB_URI}' + DB_HOST: '${DB_HOST}' + DB_USER: '${DB_USER}' + DB_PASS: '${DB_PASS}' + DB_NAME: '${DB_NAME}' + DB_PORT: '${DB_PORT}' + DB_CA_CERT: '${DB_CA_CERT}' + DB_SSL_MODE: '${DB_SSL_MODE}' + DB_POOL_SIZE: '${DB_POOL_SIZE}' + DB_POOL_SIZE_KNEX: '${DB_POOL_SIZE_KNEX}' + REDIS_ENABLED: '${REDIS_ENABLED}' + REDIS_URL: '${REDIS_URL}' + CLOUD_PROVIDER: 'DO' + SENTRY_DSN: '${SENTRY_DSN}' + SENTRY_TRACES_SAMPLE_RATE: '${SENTRY_TRACES_SAMPLE_RATE}' + SENTRY_PROFILE_SAMPLE_RATE: '${SENTRY_PROFILE_SAMPLE_RATE}' + SENTRY_HTTP_TRACING_ENABLED: '${SENTRY_HTTP_TRACING_ENABLED}' + SENTRY_POSTGRES_TRACKING_ENABLED: '${SENTRY_POSTGRES_TRACKING_ENABLED}' + SENTRY_PROFILING_ENABLED: '${SENTRY_PROFILING_ENABLED}' + AWS_ACCESS_KEY_ID: '${AWS_ACCESS_KEY_ID}' + AWS_SECRET_ACCESS_KEY: '${AWS_SECRET_ACCESS_KEY}' + AWS_REGION: '${AWS_REGION}' + AWS_S3_BUCKET: '${AWS_S3_BUCKET}' + WASABI_ACCESS_KEY_ID: '${WASABI_ACCESS_KEY_ID}' + WASABI_SECRET_ACCESS_KEY: '${WASABI_SECRET_ACCESS_KEY}' + WASABI_REGION: '${WASABI_REGION}' + WASABI_SERVICE_URL: '${WASABI_SERVICE_URL}' + WASABI_S3_BUCKET: '${WASABI_S3_BUCKET}' + WASABI_S3_FORCE_PATH_STYLE: '${WASABI_S3_FORCE_PATH_STYLE:-}' + DIGITALOCEAN_ACCESS_KEY_ID: '${DIGITALOCEAN_ACCESS_KEY_ID:-}' + DIGITALOCEAN_SECRET_ACCESS_KEY: '${DIGITALOCEAN_SECRET_ACCESS_KEY:-}' + DIGITALOCEAN_REGION: '${DIGITALOCEAN_REGION:-}' + DIGITALOCEAN_SERVICE_URL: '${DIGITALOCEAN_SERVICE_URL:-}' + DIGITALOCEAN_CDN_URL: '${DIGITALOCEAN_CDN_URL:-}' + DIGITALOCEAN_S3_BUCKET: '${DIGITALOCEAN_S3_BUCKET:-}' + DIGITALOCEAN_S3_FORCE_PATH_STYLE: '${DIGITALOCEAN_S3_FORCE_PATH_STYLE:-}' + EXPRESS_SESSION_SECRET: '${EXPRESS_SESSION_SECRET}' + JWT_SECRET: '${JWT_SECRET}' + JWT_REFRESH_TOKEN_SECRET: '${JWT_REFRESH_TOKEN_SECRET}' + JWT_REFRESH_TOKEN_EXPIRATION_TIME: '${JWT_REFRESH_TOKEN_EXPIRATION_TIME}' + CLOUDINARY_API_KEY: '${CLOUDINARY_API_KEY}' + CLOUDINARY_API_SECRET: '${CLOUDINARY_API_SECRET}' + CLOUDINARY_CLOUD_NAME: '${CLOUDINARY_CLOUD_NAME}' + MAIL_FROM_ADDRESS: '${MAIL_FROM_ADDRESS}' + MAIL_HOST: '${MAIL_HOST}' + MAIL_PORT: '${MAIL_PORT}' + MAIL_USERNAME: '${MAIL_USERNAME}' + MAIL_PASSWORD: '${MAIL_PASSWORD}' + ALLOW_SUPER_ADMIN_ROLE: '${ALLOW_SUPER_ADMIN_ROLE}' + GOOGLE_CLIENT_ID: '${GOOGLE_CLIENT_ID}' + GOOGLE_CLIENT_SECRET: '${GOOGLE_CLIENT_SECRET}' + GOOGLE_CALLBACK_URL: '${GOOGLE_CALLBACK_URL}' + FACEBOOK_CLIENT_ID: '${FACEBOOK_CLIENT_ID}' + FACEBOOK_CLIENT_SECRET: '${FACEBOOK_CLIENT_SECRET}' + FACEBOOK_GRAPH_VERSION: '${FACEBOOK_GRAPH_VERSION}' + FACEBOOK_CALLBACK_URL: '${FACEBOOK_CALLBACK_URL}' + INTEGRATED_USER_DEFAULT_PASS: '${INTEGRATED_USER_DEFAULT_PASS}' + UPWORK_REDIRECT_URL: '${UPWORK_REDIRECT_URL}' + FILE_PROVIDER: '${FILE_PROVIDER}' + GAUZY_AI_GRAPHQL_ENDPOINT: '${GAUZY_AI_GRAPHQL_ENDPOINT}' + GAUZY_AI_REST_ENDPOINT: '${GAUZY_AI_REST_ENDPOINT}' + UNLEASH_APP_NAME: '${UNLEASH_APP_NAME}' + UNLEASH_API_URL: '${UNLEASH_API_URL}' + UNLEASH_INSTANCE_ID: '${UNLEASH_INSTANCE_ID}' + UNLEASH_REFRESH_INTERVAL: '${UNLEASH_REFRESH_INTERVAL}' + UNLEASH_METRICS_INTERVAL: '${UNLEASH_METRICS_INTERVAL}' + UNLEASH_API_KEY: '${UNLEASH_API_KEY}' + JITSU_SERVER_URL: '${JITSU_SERVER_URL}' + JITSU_SERVER_WRITE_KEY: '${JITSU_SERVER_WRITE_KEY}' + OTEL_ENABLED: '${OTEL_ENABLED}' + OTEL_PROVIDER: '${OTEL_PROVIDER}' + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}' + OTEL_EXPORTER_OTLP_HEADERS: '${OTEL_EXPORTER_OTLP_HEADERS}' + GAUZY_GITHUB_CLIENT_ID: '${GAUZY_GITHUB_CLIENT_ID}' + GAUZY_GITHUB_CLIENT_SECRET: '${GAUZY_GITHUB_CLIENT_SECRET}' + GAUZY_GITHUB_APP_PRIVATE_KEY: '${GAUZY_GITHUB_APP_PRIVATE_KEY}' + GAUZY_GITHUB_WEBHOOK_URL: '${GAUZY_GITHUB_WEBHOOK_URL}' + GAUZY_GITHUB_WEBHOOK_SECRET: '${GAUZY_GITHUB_WEBHOOK_SECRET}' + GAUZY_GITHUB_APP_NAME: '${GAUZY_GITHUB_APP_NAME}' + GAUZY_GITHUB_REDIRECT_URL: '${GAUZY_GITHUB_REDIRECT_URL}' + GAUZY_GITHUB_POST_INSTALL_URL: '${GAUZY_GITHUB_POST_INSTALL_URL}' + GAUZY_GITHUB_APP_ID: '${GAUZY_GITHUB_APP_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_ID: '${GAUZY_GITHUB_OAUTH_CLIENT_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_SECRET: '${GAUZY_GITHUB_OAUTH_CLIENT_SECRET}' + GAUZY_GITHUB_OAUTH_CALLBACK_URL: '${GAUZY_GITHUB_OAUTH_CALLBACK_URL}' + JITSU_BROWSER_URL: '${JITSU_BROWSER_URL}' + JITSU_BROWSER_WRITE_KEY: '${JITSU_BROWSER_WRITE_KEY}' + MAGIC_CODE_EXPIRATION_TIME: '${MAGIC_CODE_EXPIRATION_TIME}' + APP_NAME: '${APP_NAME}' + APP_LOGO: '${APP_LOGO}' + APP_SIGNATURE: '${APP_SIGNATURE}' + APP_LINK: '${APP_LINK}' + APP_EMAIL_CONFIRMATION_URL: '${APP_EMAIL_CONFIRMATION_URL}' + APP_MAGIC_SIGN_URL: '${APP_MAGIC_SIGN_URL}' + COMPANY_LINK: '${COMPANY_LINK}' + COMPANY_NAME: '${COMPANY_NAME}' + + entrypoint: './entrypoint.prod.sh' + command: ['node', 'main.js'] + restart: on-failure + ports: + - '3000' + networks: + - overlay + +volumes: + certificates: {} + +networks: + overlay: + driver: bridge diff --git a/.deploy/ssh/with-cloudflare/prod/nginx.prod.pre.cloudflare.conf b/.deploy/ssh/with-cloudflare/prod/nginx.prod.pre.cloudflare.conf new file mode 100644 index 000000000..b18efd8ca --- /dev/null +++ b/.deploy/ssh/with-cloudflare/prod/nginx.prod.pre.cloudflare.conf @@ -0,0 +1,25 @@ +user nginx; +events { + worker_connections 1024; +} +http { + server { + listen 80; + server_name apidt.gauzy.co; + + location / { + return 301 https://$host$request_uri; + } + } + server { + listen 443 ssl; + server_name apidt.gauzy.co; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + location / { + proxy_pass http://api:3000; + } + } +} diff --git a/.deploy/ssh/with-cloudflare/stage/docker-compose.api.stage.cloudflare.pre.yml b/.deploy/ssh/with-cloudflare/stage/docker-compose.api.stage.cloudflare.pre.yml new file mode 100644 index 000000000..44ce1eaa9 --- /dev/null +++ b/.deploy/ssh/with-cloudflare/stage/docker-compose.api.stage.cloudflare.pre.yml @@ -0,0 +1,22 @@ +version: '3.8' + +services: + nginx: + image: nginx:latest + volumes: + - ./nginx.stage.pre.cloudflare.conf:/etc/nginx/nginx.conf:ro + - ./ingress.api.crt:/etc/nginx/ssl/fullchain.pem + - ./ingress.api.key:/etc/nginx/ssl/privkey.pem + restart: unless-stopped + ports: + - "80:80" + - "443:443" + networks: + - with-cloudflare_overlay +volumes: + certificates: {} + +networks: + with-cloudflare_overlay: + external: true + diff --git a/.deploy/ssh/with-cloudflare/stage/docker-compose.api.stage.template.yml b/.deploy/ssh/with-cloudflare/stage/docker-compose.api.stage.template.yml new file mode 100644 index 000000000..0b9fe433d --- /dev/null +++ b/.deploy/ssh/with-cloudflare/stage/docker-compose.api.stage.template.yml @@ -0,0 +1,127 @@ +version: '3.8' + +services: + api: + image: ghcr.io/ever-co/gauzy-api-stage:latest + deploy: + mode: replicated + replicas: 2 + environment: + API_HOST: '0.0.0.0' + DEMO: '${DEMO}' + NODE_ENV: '${NODE_ENV}' + ADMIN_PASSWORD_RESET: '${ADMIN_PASSWORD_RESET}' + API_BASE_URL: '${API_BASE_URL}' + CLIENT_BASE_URL: '${CLIENT_BASE_URL}' + DB_TYPE: '${DB_TYPE}' + DB_URI: '${DB_URI}' + DB_HOST: '${DB_HOST}' + DB_USER: '${DB_USER}' + DB_PASS: '${DB_PASS}' + DB_NAME: '${DB_NAME}' + DB_PORT: '${DB_PORT}' + DB_CA_CERT: '${DB_CA_CERT}' + DB_SSL_MODE: '${DB_SSL_MODE}' + DB_POOL_SIZE: '${DB_POOL_SIZE}' + DB_POOL_SIZE_KNEX: '${DB_POOL_SIZE_KNEX}' + REDIS_ENABLED: '${REDIS_ENABLED}' + REDIS_URL: '${REDIS_URL}' + CLOUD_PROVIDER: 'DO' + SENTRY_DSN: '${SENTRY_DSN}' + SENTRY_TRACES_SAMPLE_RATE: '${SENTRY_TRACES_SAMPLE_RATE}' + SENTRY_PROFILE_SAMPLE_RATE: '${SENTRY_PROFILE_SAMPLE_RATE}' + SENTRY_HTTP_TRACING_ENABLED: '${SENTRY_HTTP_TRACING_ENABLED}' + SENTRY_POSTGRES_TRACKING_ENABLED: '${SENTRY_POSTGRES_TRACKING_ENABLED}' + SENTRY_PROFILING_ENABLED: '${SENTRY_PROFILING_ENABLED}' + AWS_ACCESS_KEY_ID: '${AWS_ACCESS_KEY_ID}' + AWS_SECRET_ACCESS_KEY: '${AWS_SECRET_ACCESS_KEY}' + AWS_REGION: '${AWS_REGION}' + AWS_S3_BUCKET: '${AWS_S3_BUCKET}' + WASABI_ACCESS_KEY_ID: '${WASABI_ACCESS_KEY_ID}' + WASABI_SECRET_ACCESS_KEY: '${WASABI_SECRET_ACCESS_KEY}' + WASABI_REGION: '${WASABI_REGION}' + WASABI_SERVICE_URL: '${WASABI_SERVICE_URL}' + WASABI_S3_BUCKET: '${WASABI_S3_BUCKET}' + WASABI_S3_FORCE_PATH_STYLE: '${WASABI_S3_FORCE_PATH_STYLE:-}' + DIGITALOCEAN_ACCESS_KEY_ID: '${DIGITALOCEAN_ACCESS_KEY_ID:-}' + DIGITALOCEAN_SECRET_ACCESS_KEY: '${DIGITALOCEAN_SECRET_ACCESS_KEY:-}' + DIGITALOCEAN_REGION: '${DIGITALOCEAN_REGION:-}' + DIGITALOCEAN_SERVICE_URL: '${DIGITALOCEAN_SERVICE_URL:-}' + DIGITALOCEAN_CDN_URL: '${DIGITALOCEAN_CDN_URL:-}' + DIGITALOCEAN_S3_BUCKET: '${DIGITALOCEAN_S3_BUCKET:-}' + DIGITALOCEAN_S3_FORCE_PATH_STYLE: '${DIGITALOCEAN_S3_FORCE_PATH_STYLE:-}' + EXPRESS_SESSION_SECRET: '${EXPRESS_SESSION_SECRET}' + JWT_SECRET: '${JWT_SECRET}' + JWT_REFRESH_TOKEN_SECRET: '${JWT_REFRESH_TOKEN_SECRET}' + JWT_REFRESH_TOKEN_EXPIRATION_TIME: '${JWT_REFRESH_TOKEN_EXPIRATION_TIME}' + CLOUDINARY_API_KEY: '${CLOUDINARY_API_KEY}' + CLOUDINARY_API_SECRET: '${CLOUDINARY_API_SECRET}' + CLOUDINARY_CLOUD_NAME: '${CLOUDINARY_CLOUD_NAME}' + MAIL_FROM_ADDRESS: '${MAIL_FROM_ADDRESS}' + MAIL_HOST: '${MAIL_HOST}' + MAIL_PORT: '${MAIL_PORT}' + MAIL_USERNAME: '${MAIL_USERNAME}' + MAIL_PASSWORD: '${MAIL_PASSWORD}' + ALLOW_SUPER_ADMIN_ROLE: '${ALLOW_SUPER_ADMIN_ROLE}' + GOOGLE_CLIENT_ID: '${GOOGLE_CLIENT_ID}' + GOOGLE_CLIENT_SECRET: '${GOOGLE_CLIENT_SECRET}' + GOOGLE_CALLBACK_URL: '${GOOGLE_CALLBACK_URL}' + FACEBOOK_CLIENT_ID: '${FACEBOOK_CLIENT_ID}' + FACEBOOK_CLIENT_SECRET: '${FACEBOOK_CLIENT_SECRET}' + FACEBOOK_GRAPH_VERSION: '${FACEBOOK_GRAPH_VERSION}' + FACEBOOK_CALLBACK_URL: '${FACEBOOK_CALLBACK_URL}' + INTEGRATED_USER_DEFAULT_PASS: '${INTEGRATED_USER_DEFAULT_PASS}' + UPWORK_REDIRECT_URL: '${UPWORK_REDIRECT_URL}' + FILE_PROVIDER: '${FILE_PROVIDER}' + GAUZY_AI_GRAPHQL_ENDPOINT: '${GAUZY_AI_GRAPHQL_ENDPOINT}' + GAUZY_AI_REST_ENDPOINT: '${GAUZY_AI_REST_ENDPOINT}' + UNLEASH_APP_NAME: '${UNLEASH_APP_NAME}' + UNLEASH_API_URL: '${UNLEASH_API_URL}' + UNLEASH_INSTANCE_ID: '${UNLEASH_INSTANCE_ID}' + UNLEASH_REFRESH_INTERVAL: '${UNLEASH_REFRESH_INTERVAL}' + UNLEASH_METRICS_INTERVAL: '${UNLEASH_METRICS_INTERVAL}' + UNLEASH_API_KEY: '${UNLEASH_API_KEY}' + JITSU_SERVER_URL: '${JITSU_SERVER_URL}' + JITSU_SERVER_WRITE_KEY: '${JITSU_SERVER_WRITE_KEY}' + OTEL_ENABLED: '${OTEL_ENABLED}' + OTEL_PROVIDER: '${OTEL_PROVIDER}' + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}' + OTEL_EXPORTER_OTLP_HEADERS: '${OTEL_EXPORTER_OTLP_HEADERS}' + GAUZY_GITHUB_CLIENT_ID: '${GAUZY_GITHUB_CLIENT_ID}' + GAUZY_GITHUB_CLIENT_SECRET: '${GAUZY_GITHUB_CLIENT_SECRET}' + GAUZY_GITHUB_APP_PRIVATE_KEY: '${GAUZY_GITHUB_APP_PRIVATE_KEY}' + GAUZY_GITHUB_WEBHOOK_URL: '${GAUZY_GITHUB_WEBHOOK_URL}' + GAUZY_GITHUB_WEBHOOK_SECRET: '${GAUZY_GITHUB_WEBHOOK_SECRET}' + GAUZY_GITHUB_APP_NAME: '${GAUZY_GITHUB_APP_NAME}' + GAUZY_GITHUB_REDIRECT_URL: '${GAUZY_GITHUB_REDIRECT_URL}' + GAUZY_GITHUB_POST_INSTALL_URL: '${GAUZY_GITHUB_POST_INSTALL_URL}' + GAUZY_GITHUB_APP_ID: '${GAUZY_GITHUB_APP_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_ID: '${GAUZY_GITHUB_OAUTH_CLIENT_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_SECRET: '${GAUZY_GITHUB_OAUTH_CLIENT_SECRET}' + GAUZY_GITHUB_OAUTH_CALLBACK_URL: '${GAUZY_GITHUB_OAUTH_CALLBACK_URL}' + JITSU_BROWSER_URL: '${JITSU_BROWSER_URL}' + JITSU_BROWSER_WRITE_KEY: '${JITSU_BROWSER_WRITE_KEY}' + MAGIC_CODE_EXPIRATION_TIME: '${MAGIC_CODE_EXPIRATION_TIME}' + APP_NAME: '${APP_NAME}' + APP_LOGO: '${APP_LOGO}' + APP_SIGNATURE: '${APP_SIGNATURE}' + APP_LINK: '${APP_LINK}' + APP_EMAIL_CONFIRMATION_URL: '${APP_EMAIL_CONFIRMATION_URL}' + APP_MAGIC_SIGN_URL: '${APP_MAGIC_SIGN_URL}' + COMPANY_LINK: '${COMPANY_LINK}' + COMPANY_NAME: '${COMPANY_NAME}' + + entrypoint: './entrypoint.prod.sh' + command: ['node', 'main.js'] + restart: on-failure + ports: + - '3000' + networks: + - overlay + +volumes: + certificates: {} + +networks: + overlay: + driver: bridge diff --git a/.deploy/ssh/with-cloudflare/stage/nginx.stage.pre.cloudflare.conf b/.deploy/ssh/with-cloudflare/stage/nginx.stage.pre.cloudflare.conf new file mode 100644 index 000000000..74aa59560 --- /dev/null +++ b/.deploy/ssh/with-cloudflare/stage/nginx.stage.pre.cloudflare.conf @@ -0,0 +1,25 @@ +user nginx; +events { + worker_connections 1024; +} +http { + server { + listen 80; + server_name apistagedt.gauzy.co; + + location / { + return 301 https://$host$request_uri; + } + } + server { + listen 443 ssl; + server_name apistagedt.gauzy.co; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + location / { + proxy_pass http://api:3000; + } + } +} diff --git a/.deploy/ssh/with-letsencrypt/demo/docker-compose.api.demo.letsencrypt.pre.yml b/.deploy/ssh/with-letsencrypt/demo/docker-compose.api.demo.letsencrypt.pre.yml new file mode 100644 index 000000000..88acc0b30 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/demo/docker-compose.api.demo.letsencrypt.pre.yml @@ -0,0 +1,25 @@ +version: '3.8' + +services: + proxy: + image: jonasal/nginx-certbot:latest + restart: always + environment: + CERTBOT_EMAIL: 'ever@ever.co' + env_file: + - ./nginx-certbot.env + ports: + - '80:80' + - '443:443' + networks: + - demo_overlay + volumes: + - nginx_secrets:/etc/letsencrypt + - ./user_conf.d:/etc/nginx/user_conf.d +volumes: + nginx_secrets: {} + certificates: {} + +networks: + demo_overlay: + external: true diff --git a/.deploy/ssh/with-letsencrypt/demo/docker-compose.api.demo.template.yml b/.deploy/ssh/with-letsencrypt/demo/docker-compose.api.demo.template.yml new file mode 100644 index 000000000..7e220108f --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/demo/docker-compose.api.demo.template.yml @@ -0,0 +1,127 @@ +version: '3.8' + +services: + api: + image: ghcr.io/ever-co/gauzy-api-demo:latest + deploy: + mode: replicated + replicas: 2 + environment: + API_HOST: '0.0.0.0' + DEMO: '${DEMO:-true}' + NODE_ENV: '${NODE_ENV:-development}' + ADMIN_PASSWORD_RESET: '${ADMIN_PASSWORD_RESET:-}' + API_BASE_URL: '${API_BASE_URL:-http://localhost:3000}' + CLIENT_BASE_URL: '${CLIENT_BASE_URL:-http://localhost:4200}' + DB_TYPE: '${DB_TYPE:-better-sqlite3}' + DB_URI: '${DB_URI:-}' + DB_HOST: '${DB_HOST:-}' + DB_USER: '${DB_USER:-}' + DB_PASS: '${DB_PASS:-}' + DB_NAME: '${DB_NAME:-}' + DB_PORT: '${DB_PORT:-}' + DB_CA_CERT: '${DB_CA_CERT:-}' + DB_SSL_MODE: '${DB_SSL_MODE:-}' + DB_POOL_SIZE: '${DB_POOL_SIZE:-}' + DB_POOL_SIZE_KNEX: '${DB_POOL_SIZE_KNEX:-}' + REDIS_ENABLED: '${REDIS_ENABLED:-}' + REDIS_URL: '${REDIS_URL:-}' + CLOUD_PROVIDER: 'DO' + SENTRY_DSN: '${SENTRY_DSN:-}' + SENTRY_TRACES_SAMPLE_RATE: '${SENTRY_TRACES_SAMPLE_RATE:-}' + SENTRY_PROFILE_SAMPLE_RATE: '${SENTRY_PROFILE_SAMPLE_RATE:-}' + SENTRY_HTTP_TRACING_ENABLED: '${SENTRY_HTTP_TRACING_ENABLED:-}' + SENTRY_POSTGRES_TRACKING_ENABLED: '${SENTRY_POSTGRES_TRACKING_ENABLED:-}' + SENTRY_PROFILING_ENABLED: '${SENTRY_PROFILING_ENABLED:-}' + AWS_ACCESS_KEY_ID: '${AWS_ACCESS_KEY_ID:-}' + AWS_SECRET_ACCESS_KEY: '${AWS_SECRET_ACCESS_KEY:-}' + AWS_REGION: '${AWS_REGION:-}' + AWS_S3_BUCKET: '${AWS_S3_BUCKET:-}' + WASABI_ACCESS_KEY_ID: '${WASABI_ACCESS_KEY_ID:-}' + WASABI_SECRET_ACCESS_KEY: '${WASABI_SECRET_ACCESS_KEY:-}' + WASABI_REGION: '${WASABI_REGION:-}' + WASABI_SERVICE_URL: '${WASABI_SERVICE_URL:-}' + WASABI_S3_BUCKET: '${WASABI_S3_BUCKET:-}' + WASABI_S3_FORCE_PATH_STYLE: '${WASABI_S3_FORCE_PATH_STYLE:-}' + DIGITALOCEAN_ACCESS_KEY_ID: '${DIGITALOCEAN_ACCESS_KEY_ID:-}' + DIGITALOCEAN_SECRET_ACCESS_KEY: '${DIGITALOCEAN_SECRET_ACCESS_KEY:-}' + DIGITALOCEAN_REGION: '${DIGITALOCEAN_REGION:-}' + DIGITALOCEAN_SERVICE_URL: '${DIGITALOCEAN_SERVICE_URL:-}' + DIGITALOCEAN_CDN_URL: '${DIGITALOCEAN_CDN_URL:-}' + DIGITALOCEAN_S3_BUCKET: '${DIGITALOCEAN_S3_BUCKET:-}' + DIGITALOCEAN_S3_FORCE_PATH_STYLE: '${DIGITALOCEAN_S3_FORCE_PATH_STYLE:-}' + EXPRESS_SESSION_SECRET: '${EXPRESS_SESSION_SECRET:-}' + JWT_SECRET: '${JWT_SECRET:-}' + JWT_REFRESH_TOKEN_SECRET: '${JWT_REFRESH_TOKEN_SECRET:-}' + JWT_REFRESH_TOKEN_EXPIRATION_TIME: '${JWT_REFRESH_TOKEN_EXPIRATION_TIME:-}' + CLOUDINARY_API_KEY: '${CLOUDINARY_API_KEY:-}' + CLOUDINARY_API_SECRET: '${CLOUDINARY_API_SECRET:-}' + CLOUDINARY_CLOUD_NAME: '${CLOUDINARY_CLOUD_NAME:-}' + MAIL_FROM_ADDRESS: '${MAIL_FROM_ADDRESS:-}' + MAIL_HOST: '${MAIL_HOST:-}' + MAIL_PORT: '${MAIL_PORT:-}' + MAIL_USERNAME: '${MAIL_USERNAME:-}' + MAIL_PASSWORD: '${MAIL_PASSWORD:-}' + ALLOW_SUPER_ADMIN_ROLE: '${ALLOW_SUPER_ADMIN_ROLE:-}' + GOOGLE_CLIENT_ID: '${GOOGLE_CLIENT_ID:-}' + GOOGLE_CLIENT_SECRET: '${GOOGLE_CLIENT_SECRET:-}' + GOOGLE_CALLBACK_URL: '${GOOGLE_CALLBACK_URL:-}' + FACEBOOK_CLIENT_ID: '${FACEBOOK_CLIENT_ID:-}' + FACEBOOK_CLIENT_SECRET: '${FACEBOOK_CLIENT_SECRET:-}' + FACEBOOK_GRAPH_VERSION: '${FACEBOOK_GRAPH_VERSION:-}' + FACEBOOK_CALLBACK_URL: '${FACEBOOK_CALLBACK_URL:-}' + INTEGRATED_USER_DEFAULT_PASS: '${INTEGRATED_USER_DEFAULT_PASS:-}' + UPWORK_REDIRECT_URL: '${UPWORK_REDIRECT_URL:-}' + FILE_PROVIDER: '${FILE_PROVIDER:-}' + GAUZY_AI_GRAPHQL_ENDPOINT: '${GAUZY_AI_GRAPHQL_ENDPOINT:-}' + GAUZY_AI_REST_ENDPOINT: '${GAUZY_AI_REST_ENDPOINT:-}' + UNLEASH_APP_NAME: '${UNLEASH_APP_NAME:-}' + UNLEASH_API_URL: '${UNLEASH_API_URL:-}' + UNLEASH_INSTANCE_ID: '${UNLEASH_INSTANCE_ID:-}' + UNLEASH_REFRESH_INTERVAL: '${UNLEASH_REFRESH_INTERVAL:-}' + UNLEASH_METRICS_INTERVAL: '${UNLEASH_METRICS_INTERVAL:-}' + UNLEASH_API_KEY: '${UNLEASH_API_KEY:-}' + JITSU_SERVER_URL: '${JITSU_SERVER_URL:-}' + JITSU_SERVER_WRITE_KEY: '${JITSU_SERVER_WRITE_KEY:-}' + OTEL_ENABLED: '${OTEL_ENABLED:-}' + OTEL_PROVIDER: '${OTEL_PROVIDER:-}' + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT:-}' + OTEL_EXPORTER_OTLP_HEADERS: '${OTEL_EXPORTER_OTLP_HEADERS:-}' + GAUZY_GITHUB_CLIENT_ID: '${GAUZY_GITHUB_CLIENT_ID:-}' + GAUZY_GITHUB_CLIENT_SECRET: '${GAUZY_GITHUB_CLIENT_SECRET:-}' + GAUZY_GITHUB_APP_PRIVATE_KEY: '${GAUZY_GITHUB_APP_PRIVATE_KEY:-}' + GAUZY_GITHUB_WEBHOOK_URL: '${GAUZY_GITHUB_WEBHOOK_URL:-}' + GAUZY_GITHUB_WEBHOOK_SECRET: '${GAUZY_GITHUB_WEBHOOK_SECRET:-}' + GAUZY_GITHUB_APP_NAME: '${GAUZY_GITHUB_APP_NAME:-}' + GAUZY_GITHUB_REDIRECT_URL: '${GAUZY_GITHUB_REDIRECT_URL:-}' + GAUZY_GITHUB_POST_INSTALL_URL: '${GAUZY_GITHUB_POST_INSTALL_URL:-}' + GAUZY_GITHUB_APP_ID: '${GAUZY_GITHUB_APP_ID:-}' + GAUZY_GITHUB_OAUTH_CLIENT_ID: '${GAUZY_GITHUB_OAUTH_CLIENT_ID:-}' + GAUZY_GITHUB_OAUTH_CLIENT_SECRET: '${GAUZY_GITHUB_OAUTH_CLIENT_SECRET:-}' + GAUZY_GITHUB_OAUTH_CALLBACK_URL: '${GAUZY_GITHUB_OAUTH_CALLBACK_URL:-}' + JITSU_BROWSER_URL: '${JITSU_BROWSER_URL:-}' + JITSU_BROWSER_WRITE_KEY: '${JITSU_BROWSER_WRITE_KEY:-}' + MAGIC_CODE_EXPIRATION_TIME: '${MAGIC_CODE_EXPIRATION_TIME:-}' + APP_NAME: '${APP_NAME:-}' + APP_LOGO: '${APP_LOGO:-}' + APP_SIGNATURE: '${APP_SIGNATURE:-}' + APP_LINK: '${APP_LINK:-}' + APP_EMAIL_CONFIRMATION_URL: '${APP_EMAIL_CONFIRMATION_URL:-}' + APP_MAGIC_SIGN_URL: '${APP_MAGIC_SIGN_URL:-}' + COMPANY_LINK: '${COMPANY_LINK:-}' + COMPANY_NAME: '${COMPANY_NAME:-}' + + entrypoint: './entrypoint.prod.sh' + command: ['node', 'main.js'] + restart: on-failure + ports: + - '3000' + networks: + - overlay + +volumes: + certificates: {} + +networks: + overlay: + driver: bridge diff --git a/.deploy/ssh/with-letsencrypt/demo/nginx-certbot.env b/.deploy/ssh/with-letsencrypt/demo/nginx-certbot.env new file mode 100644 index 000000000..1e81781c0 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/demo/nginx-certbot.env @@ -0,0 +1,16 @@ +# Required +CERTBOT_EMAIL="ever@ever.co" + +# Optional (Defaults) +DHPARAM_SIZE=2048 +ELLIPTIC_CURVE=secp256r1 +RENEWAL_INTERVAL=8d +RSA_KEY_SIZE=2048 +STAGING=0 +USE_ECDSA=1 + +# Advanced (Defaults) +CERTBOT_AUTHENTICATOR=webroot +CERTBOT_DNS_PROPAGATION_SECONDS="" +DEBUG=0 +USE_LOCAL_CA=0 diff --git a/.deploy/ssh/with-letsencrypt/demo/user_conf.d/nginx.conf b/.deploy/ssh/with-letsencrypt/demo/user_conf.d/nginx.conf new file mode 100644 index 000000000..3fa20e8d7 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/demo/user_conf.d/nginx.conf @@ -0,0 +1,15 @@ +server { + listen 443 ssl; + server_name apidemodts.gauzy.co; + ssl_certificate /etc/letsencrypt/live/apidemodts.gauzy.co/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/apidemodts.gauzy.co/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/apidemodts.gauzy.co/chain.pem; + + # Load the Diffie-Hellman parameter. + ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; + + location / { + proxy_pass http://api:3000; + } + +} diff --git a/.deploy/ssh/with-letsencrypt/prod/docker-compose.api.prod.letsencrypt.pre.yml b/.deploy/ssh/with-letsencrypt/prod/docker-compose.api.prod.letsencrypt.pre.yml new file mode 100644 index 000000000..7708b6dda --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/prod/docker-compose.api.prod.letsencrypt.pre.yml @@ -0,0 +1,25 @@ +version: '3.8' + +services: + proxy: + image: jonasal/nginx-certbot:latest + restart: always + environment: + CERTBOT_EMAIL: 'ever@ever.co' + env_file: + - ./nginx-certbot.env + ports: + - '80:80' + - '443:443' + networks: + - prod_overlay + volumes: + - nginx_secrets:/etc/letsencrypt + - ./user_conf.d:/etc/nginx/user_conf.d +volumes: + nginx_secrets: {} + certificates: {} + +networks: + prod_overlay: + external: true diff --git a/.deploy/ssh/with-letsencrypt/prod/docker-compose.api.prod.template.yml b/.deploy/ssh/with-letsencrypt/prod/docker-compose.api.prod.template.yml new file mode 100644 index 000000000..9fa35660b --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/prod/docker-compose.api.prod.template.yml @@ -0,0 +1,126 @@ +version: '3.8' + +services: + api: + image: ghcr.io/ever-co/gauzy-api:latest + deploy: + mode: replicated + replicas: 2 + environment: + API_HOST: '0.0.0.0' + DEMO: '${DEMO}' + NODE_ENV: '${NODE_ENV}' + ADMIN_PASSWORD_RESET: '${ADMIN_PASSWORD_RESET}' + API_BASE_URL: '${API_BASE_URL}' + CLIENT_BASE_URL: '${CLIENT_BASE_URL}' + DB_TYPE: '${DB_TYPE}' + DB_URI: '${DB_URI}' + DB_HOST: '${DB_HOST}' + DB_USER: '${DB_USER}' + DB_PASS: '${DB_PASS}' + DB_NAME: '${DB_NAME}' + DB_PORT: '${DB_PORT}' + DB_CA_CERT: '${DB_CA_CERT}' + DB_SSL_MODE: '${DB_SSL_MODE}' + DB_POOL_SIZE: '${DB_POOL_SIZE}' + DB_POOL_SIZE_KNEX: '${DB_POOL_SIZE_KNEX}' + REDIS_ENABLED: '${REDIS_ENABLED}' + REDIS_URL: '${REDIS_URL}' + CLOUD_PROVIDER: 'DO' + SENTRY_DSN: '${SENTRY_DSN}' + SENTRY_TRACES_SAMPLE_RATE: '${SENTRY_TRACES_SAMPLE_RATE}' + SENTRY_PROFILE_SAMPLE_RATE: '${SENTRY_PROFILE_SAMPLE_RATE}' + SENTRY_HTTP_TRACING_ENABLED: '${SENTRY_HTTP_TRACING_ENABLED}' + SENTRY_POSTGRES_TRACKING_ENABLED: '${SENTRY_POSTGRES_TRACKING_ENABLED}' + SENTRY_PROFILING_ENABLED: '${SENTRY_PROFILING_ENABLED}' + AWS_ACCESS_KEY_ID: '${AWS_ACCESS_KEY_ID}' + AWS_SECRET_ACCESS_KEY: '${AWS_SECRET_ACCESS_KEY}' + AWS_REGION: '${AWS_REGION}' + AWS_S3_BUCKET: '${AWS_S3_BUCKET}' + WASABI_ACCESS_KEY_ID: '${WASABI_ACCESS_KEY_ID}' + WASABI_SECRET_ACCESS_KEY: '${WASABI_SECRET_ACCESS_KEY}' + WASABI_REGION: '${WASABI_REGION}' + WASABI_SERVICE_URL: '${WASABI_SERVICE_URL}' + WASABI_S3_BUCKET: '${WASABI_S3_BUCKET}' + WASABI_S3_FORCE_PATH_STYLE: '${WASABI_S3_FORCE_PATH_STYLE:-}' + DIGITALOCEAN_ACCESS_KEY_ID: '${DIGITALOCEAN_ACCESS_KEY_ID:-}' + DIGITALOCEAN_SECRET_ACCESS_KEY: '${DIGITALOCEAN_SECRET_ACCESS_KEY:-}' + DIGITALOCEAN_REGION: '${DIGITALOCEAN_REGION:-}' + DIGITALOCEAN_SERVICE_URL: '${DIGITALOCEAN_SERVICE_URL:-}' + DIGITALOCEAN_CDN_URL: '${DIGITALOCEAN_CDN_URL:-}' + DIGITALOCEAN_S3_BUCKET: '${DIGITALOCEAN_S3_BUCKET:-}' + DIGITALOCEAN_S3_FORCE_PATH_STYLE: '${DIGITALOCEAN_S3_FORCE_PATH_STYLE:-}' + EXPRESS_SESSION_SECRET: '${EXPRESS_SESSION_SECRET}' + JWT_SECRET: '${JWT_SECRET}' + JWT_REFRESH_TOKEN_SECRET: '${JWT_REFRESH_TOKEN_SECRET}' + JWT_REFRESH_TOKEN_EXPIRATION_TIME: '${JWT_REFRESH_TOKEN_EXPIRATION_TIME}' + CLOUDINARY_API_KEY: '${CLOUDINARY_API_KEY}' + CLOUDINARY_API_SECRET: '${CLOUDINARY_API_SECRET}' + CLOUDINARY_CLOUD_NAME: '${CLOUDINARY_CLOUD_NAME}' + MAIL_FROM_ADDRESS: '${MAIL_FROM_ADDRESS}' + MAIL_HOST: '${MAIL_HOST}' + MAIL_PORT: '${MAIL_PORT}' + MAIL_USERNAME: '${MAIL_USERNAME}' + MAIL_PASSWORD: '${MAIL_PASSWORD}' + ALLOW_SUPER_ADMIN_ROLE: '${ALLOW_SUPER_ADMIN_ROLE}' + GOOGLE_CLIENT_ID: '${GOOGLE_CLIENT_ID}' + GOOGLE_CLIENT_SECRET: '${GOOGLE_CLIENT_SECRET}' + GOOGLE_CALLBACK_URL: '${GOOGLE_CALLBACK_URL}' + FACEBOOK_CLIENT_ID: '${FACEBOOK_CLIENT_ID}' + FACEBOOK_CLIENT_SECRET: '${FACEBOOK_CLIENT_SECRET}' + FACEBOOK_GRAPH_VERSION: '${FACEBOOK_GRAPH_VERSION}' + FACEBOOK_CALLBACK_URL: '${FACEBOOK_CALLBACK_URL}' + INTEGRATED_USER_DEFAULT_PASS: '${INTEGRATED_USER_DEFAULT_PASS}' + UPWORK_REDIRECT_URL: '${UPWORK_REDIRECT_URL}' + FILE_PROVIDER: '${FILE_PROVIDER}' + GAUZY_AI_GRAPHQL_ENDPOINT: '${GAUZY_AI_GRAPHQL_ENDPOINT}' + GAUZY_AI_REST_ENDPOINT: '${GAUZY_AI_REST_ENDPOINT}' + UNLEASH_APP_NAME: '${UNLEASH_APP_NAME}' + UNLEASH_API_URL: '${UNLEASH_API_URL}' + UNLEASH_INSTANCE_ID: '${UNLEASH_INSTANCE_ID}' + UNLEASH_REFRESH_INTERVAL: '${UNLEASH_REFRESH_INTERVAL}' + UNLEASH_METRICS_INTERVAL: '${UNLEASH_METRICS_INTERVAL}' + UNLEASH_API_KEY: '${UNLEASH_API_KEY}' + JITSU_SERVER_URL: '${JITSU_SERVER_URL}' + JITSU_SERVER_WRITE_KEY: '${JITSU_SERVER_WRITE_KEY}' + OTEL_ENABLED: '${OTEL_ENABLED}' + OTEL_PROVIDER: '${OTEL_PROVIDER}' + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}' + OTEL_EXPORTER_OTLP_HEADERS: '${OTEL_EXPORTER_OTLP_HEADERS}' + GAUZY_GITHUB_CLIENT_ID: '${GAUZY_GITHUB_CLIENT_ID}' + GAUZY_GITHUB_CLIENT_SECRET: '${GAUZY_GITHUB_CLIENT_SECRET}' + GAUZY_GITHUB_APP_PRIVATE_KEY: '${GAUZY_GITHUB_APP_PRIVATE_KEY}' + GAUZY_GITHUB_WEBHOOK_URL: '${GAUZY_GITHUB_WEBHOOK_URL}' + GAUZY_GITHUB_WEBHOOK_SECRET: '${GAUZY_GITHUB_WEBHOOK_SECRET}' + GAUZY_GITHUB_APP_NAME: '${GAUZY_GITHUB_APP_NAME}' + GAUZY_GITHUB_REDIRECT_URL: '${GAUZY_GITHUB_REDIRECT_URL}' + GAUZY_GITHUB_POST_INSTALL_URL: '${GAUZY_GITHUB_POST_INSTALL_URL}' + GAUZY_GITHUB_APP_ID: '${GAUZY_GITHUB_APP_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_ID: '${GAUZY_GITHUB_OAUTH_CLIENT_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_SECRET: '${GAUZY_GITHUB_OAUTH_CLIENT_SECRET}' + GAUZY_GITHUB_OAUTH_CALLBACK_URL: '${GAUZY_GITHUB_OAUTH_CALLBACK_URL}' + JITSU_BROWSER_URL: '${JITSU_BROWSER_URL}' + JITSU_BROWSER_WRITE_KEY: '${JITSU_BROWSER_WRITE_KEY}' + MAGIC_CODE_EXPIRATION_TIME: '${MAGIC_CODE_EXPIRATION_TIME}' + APP_NAME: '${APP_NAME}' + APP_LOGO: '${APP_LOGO}' + APP_SIGNATURE: '${APP_SIGNATURE}' + APP_LINK: '${APP_LINK}' + APP_EMAIL_CONFIRMATION_URL: '${APP_EMAIL_CONFIRMATION_URL}' + APP_MAGIC_SIGN_URL: '${APP_MAGIC_SIGN_URL}' + COMPANY_LINK: '${COMPANY_LINK}' + COMPANY_NAME: '${COMPANY_NAME}' + + entrypoint: './entrypoint.prod.sh' + command: ['node', 'main.js'] + restart: on-failure + ports: + - '3000' + networks: + - overlay +volumes: + certificates: {} + +networks: + overlay: + driver: bridge diff --git a/.deploy/ssh/with-letsencrypt/prod/nginx-certbot.env b/.deploy/ssh/with-letsencrypt/prod/nginx-certbot.env new file mode 100644 index 000000000..1e81781c0 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/prod/nginx-certbot.env @@ -0,0 +1,16 @@ +# Required +CERTBOT_EMAIL="ever@ever.co" + +# Optional (Defaults) +DHPARAM_SIZE=2048 +ELLIPTIC_CURVE=secp256r1 +RENEWAL_INTERVAL=8d +RSA_KEY_SIZE=2048 +STAGING=0 +USE_ECDSA=1 + +# Advanced (Defaults) +CERTBOT_AUTHENTICATOR=webroot +CERTBOT_DNS_PROPAGATION_SECONDS="" +DEBUG=0 +USE_LOCAL_CA=0 diff --git a/.deploy/ssh/with-letsencrypt/prod/user_conf.d/nginx.conf b/.deploy/ssh/with-letsencrypt/prod/user_conf.d/nginx.conf new file mode 100644 index 000000000..4c5299293 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/prod/user_conf.d/nginx.conf @@ -0,0 +1,15 @@ +server { + listen 443 ssl; + server_name apidts.gauzy.co; + ssl_certificate /etc/letsencrypt/live/apidts.gauzy.co/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/apidts.gauzy.co/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/apidts.gauzy.co/chain.pem; + + # Load the Diffie-Hellman parameter. + ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; + + location / { + proxy_pass http://api:3000; + } + +} diff --git a/.deploy/ssh/with-letsencrypt/stage/docker-compose.api.stage.letsencrypt.pre.yml b/.deploy/ssh/with-letsencrypt/stage/docker-compose.api.stage.letsencrypt.pre.yml new file mode 100644 index 000000000..3c9b424e8 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/stage/docker-compose.api.stage.letsencrypt.pre.yml @@ -0,0 +1,25 @@ +version: '3.8' + +services: + proxy: + image: jonasal/nginx-certbot:latest + restart: always + environment: + CERTBOT_EMAIL: 'ever@ever.co' + env_file: + - ./nginx-certbot.env + ports: + - '80:80' + - '443:443' + networks: + - stage_overlay + volumes: + - nginx_secrets:/etc/letsencrypt + - ./user_conf.d:/etc/nginx/user_conf.d +volumes: + nginx_secrets: {} + certificates: {} + +networks: + stage_overlay: + external: true diff --git a/.deploy/ssh/with-letsencrypt/stage/docker-compose.api.stage.template.yml b/.deploy/ssh/with-letsencrypt/stage/docker-compose.api.stage.template.yml new file mode 100644 index 000000000..eca8f6b92 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/stage/docker-compose.api.stage.template.yml @@ -0,0 +1,126 @@ +version: '3.8' + +services: + api: + image: ghcr.io/ever-co/gauzy-api-stage:latest + deploy: + mode: replicated + replicas: 2 + environment: + API_HOST: '0.0.0.0' + DEMO: '${DEMO}' + NODE_ENV: '${NODE_ENV}' + ADMIN_PASSWORD_RESET: '${ADMIN_PASSWORD_RESET}' + API_BASE_URL: '${API_BASE_URL}' + CLIENT_BASE_URL: '${CLIENT_BASE_URL}' + DB_TYPE: '${DB_TYPE}' + DB_URI: '${DB_URI}' + DB_HOST: '${DB_HOST}' + DB_USER: '${DB_USER}' + DB_PASS: '${DB_PASS}' + DB_NAME: '${DB_NAME}' + DB_PORT: '${DB_PORT}' + DB_CA_CERT: '${DB_CA_CERT}' + DB_SSL_MODE: '${DB_SSL_MODE}' + DB_POOL_SIZE: '${DB_POOL_SIZE}' + DB_POOL_SIZE_KNEX: '${DB_POOL_SIZE_KNEX}' + REDIS_ENABLED: '${REDIS_ENABLED}' + REDIS_URL: '${REDIS_URL}' + CLOUD_PROVIDER: 'DO' + SENTRY_DSN: '${SENTRY_DSN}' + SENTRY_TRACES_SAMPLE_RATE: '${SENTRY_TRACES_SAMPLE_RATE}' + SENTRY_PROFILE_SAMPLE_RATE: '${SENTRY_PROFILE_SAMPLE_RATE}' + SENTRY_HTTP_TRACING_ENABLED: '${SENTRY_HTTP_TRACING_ENABLED}' + SENTRY_POSTGRES_TRACKING_ENABLED: '${SENTRY_POSTGRES_TRACKING_ENABLED}' + SENTRY_PROFILING_ENABLED: '${SENTRY_PROFILING_ENABLED}' + AWS_ACCESS_KEY_ID: '${AWS_ACCESS_KEY_ID}' + AWS_SECRET_ACCESS_KEY: '${AWS_SECRET_ACCESS_KEY}' + AWS_REGION: '${AWS_REGION}' + AWS_S3_BUCKET: '${AWS_S3_BUCKET}' + WASABI_ACCESS_KEY_ID: '${WASABI_ACCESS_KEY_ID}' + WASABI_SECRET_ACCESS_KEY: '${WASABI_SECRET_ACCESS_KEY}' + WASABI_REGION: '${WASABI_REGION}' + WASABI_SERVICE_URL: '${WASABI_SERVICE_URL}' + WASABI_S3_BUCKET: '${WASABI_S3_BUCKET}' + WASABI_S3_FORCE_PATH_STYLE: '${WASABI_S3_FORCE_PATH_STYLE:-}' + DIGITALOCEAN_ACCESS_KEY_ID: '${DIGITALOCEAN_ACCESS_KEY_ID:-}' + DIGITALOCEAN_SECRET_ACCESS_KEY: '${DIGITALOCEAN_SECRET_ACCESS_KEY:-}' + DIGITALOCEAN_REGION: '${DIGITALOCEAN_REGION:-}' + DIGITALOCEAN_SERVICE_URL: '${DIGITALOCEAN_SERVICE_URL:-}' + DIGITALOCEAN_CDN_URL: '${DIGITALOCEAN_CDN_URL:-}' + DIGITALOCEAN_S3_BUCKET: '${DIGITALOCEAN_S3_BUCKET:-}' + DIGITALOCEAN_S3_FORCE_PATH_STYLE: '${DIGITALOCEAN_S3_FORCE_PATH_STYLE:-}' + EXPRESS_SESSION_SECRET: '${EXPRESS_SESSION_SECRET}' + JWT_SECRET: '${JWT_SECRET}' + JWT_REFRESH_TOKEN_SECRET: '${JWT_REFRESH_TOKEN_SECRET}' + JWT_REFRESH_TOKEN_EXPIRATION_TIME: '${JWT_REFRESH_TOKEN_EXPIRATION_TIME}' + CLOUDINARY_API_KEY: '${CLOUDINARY_API_KEY}' + CLOUDINARY_API_SECRET: '${CLOUDINARY_API_SECRET}' + CLOUDINARY_CLOUD_NAME: '${CLOUDINARY_CLOUD_NAME}' + MAIL_FROM_ADDRESS: '${MAIL_FROM_ADDRESS}' + MAIL_HOST: '${MAIL_HOST}' + MAIL_PORT: '${MAIL_PORT}' + MAIL_USERNAME: '${MAIL_USERNAME}' + MAIL_PASSWORD: '${MAIL_PASSWORD}' + ALLOW_SUPER_ADMIN_ROLE: '${ALLOW_SUPER_ADMIN_ROLE}' + GOOGLE_CLIENT_ID: '${GOOGLE_CLIENT_ID}' + GOOGLE_CLIENT_SECRET: '${GOOGLE_CLIENT_SECRET}' + GOOGLE_CALLBACK_URL: '${GOOGLE_CALLBACK_URL}' + FACEBOOK_CLIENT_ID: '${FACEBOOK_CLIENT_ID}' + FACEBOOK_CLIENT_SECRET: '${FACEBOOK_CLIENT_SECRET}' + FACEBOOK_GRAPH_VERSION: '${FACEBOOK_GRAPH_VERSION}' + FACEBOOK_CALLBACK_URL: '${FACEBOOK_CALLBACK_URL}' + INTEGRATED_USER_DEFAULT_PASS: '${INTEGRATED_USER_DEFAULT_PASS}' + UPWORK_REDIRECT_URL: '${UPWORK_REDIRECT_URL}' + FILE_PROVIDER: '${FILE_PROVIDER}' + GAUZY_AI_GRAPHQL_ENDPOINT: '${GAUZY_AI_GRAPHQL_ENDPOINT}' + GAUZY_AI_REST_ENDPOINT: '${GAUZY_AI_REST_ENDPOINT}' + UNLEASH_APP_NAME: '${UNLEASH_APP_NAME}' + UNLEASH_API_URL: '${UNLEASH_API_URL}' + UNLEASH_INSTANCE_ID: '${UNLEASH_INSTANCE_ID}' + UNLEASH_REFRESH_INTERVAL: '${UNLEASH_REFRESH_INTERVAL}' + UNLEASH_METRICS_INTERVAL: '${UNLEASH_METRICS_INTERVAL}' + UNLEASH_API_KEY: '${UNLEASH_API_KEY}' + JITSU_SERVER_URL: '${JITSU_SERVER_URL}' + JITSU_SERVER_WRITE_KEY: '${JITSU_SERVER_WRITE_KEY}' + OTEL_ENABLED: '${OTEL_ENABLED}' + OTEL_PROVIDER: '${OTEL_PROVIDER}' + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: '${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}' + OTEL_EXPORTER_OTLP_HEADERS: '${OTEL_EXPORTER_OTLP_HEADERS}' + GAUZY_GITHUB_CLIENT_ID: '${GAUZY_GITHUB_CLIENT_ID}' + GAUZY_GITHUB_CLIENT_SECRET: '${GAUZY_GITHUB_CLIENT_SECRET}' + GAUZY_GITHUB_APP_PRIVATE_KEY: '${GAUZY_GITHUB_APP_PRIVATE_KEY}' + GAUZY_GITHUB_WEBHOOK_URL: '${GAUZY_GITHUB_WEBHOOK_URL}' + GAUZY_GITHUB_WEBHOOK_SECRET: '${GAUZY_GITHUB_WEBHOOK_SECRET}' + GAUZY_GITHUB_APP_NAME: '${GAUZY_GITHUB_APP_NAME}' + GAUZY_GITHUB_REDIRECT_URL: '${GAUZY_GITHUB_REDIRECT_URL}' + GAUZY_GITHUB_POST_INSTALL_URL: '${GAUZY_GITHUB_POST_INSTALL_URL}' + GAUZY_GITHUB_APP_ID: '${GAUZY_GITHUB_APP_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_ID: '${GAUZY_GITHUB_OAUTH_CLIENT_ID}' + GAUZY_GITHUB_OAUTH_CLIENT_SECRET: '${GAUZY_GITHUB_OAUTH_CLIENT_SECRET}' + GAUZY_GITHUB_OAUTH_CALLBACK_URL: '${GAUZY_GITHUB_OAUTH_CALLBACK_URL}' + JITSU_BROWSER_URL: '${JITSU_BROWSER_URL}' + JITSU_BROWSER_WRITE_KEY: '${JITSU_BROWSER_WRITE_KEY}' + MAGIC_CODE_EXPIRATION_TIME: '${MAGIC_CODE_EXPIRATION_TIME}' + APP_NAME: '${APP_NAME}' + APP_LOGO: '${APP_LOGO}' + APP_SIGNATURE: '${APP_SIGNATURE}' + APP_LINK: '${APP_LINK}' + APP_EMAIL_CONFIRMATION_URL: '${APP_EMAIL_CONFIRMATION_URL}' + APP_MAGIC_SIGN_URL: '${APP_MAGIC_SIGN_URL}' + COMPANY_LINK: '${COMPANY_LINK}' + COMPANY_NAME: '${COMPANY_NAME}' + + entrypoint: './entrypoint.prod.sh' + command: ['node', 'main.js'] + restart: on-failure + ports: + - '3000' + networks: + - overlay +volumes: + certificates: {} + +networks: + overlay: + driver: bridge diff --git a/.deploy/ssh/with-letsencrypt/stage/nginx-certbot.env b/.deploy/ssh/with-letsencrypt/stage/nginx-certbot.env new file mode 100644 index 000000000..1e81781c0 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/stage/nginx-certbot.env @@ -0,0 +1,16 @@ +# Required +CERTBOT_EMAIL="ever@ever.co" + +# Optional (Defaults) +DHPARAM_SIZE=2048 +ELLIPTIC_CURVE=secp256r1 +RENEWAL_INTERVAL=8d +RSA_KEY_SIZE=2048 +STAGING=0 +USE_ECDSA=1 + +# Advanced (Defaults) +CERTBOT_AUTHENTICATOR=webroot +CERTBOT_DNS_PROPAGATION_SECONDS="" +DEBUG=0 +USE_LOCAL_CA=0 diff --git a/.deploy/ssh/with-letsencrypt/stage/user_conf.d/nginx.conf b/.deploy/ssh/with-letsencrypt/stage/user_conf.d/nginx.conf new file mode 100644 index 000000000..301fdf0b3 --- /dev/null +++ b/.deploy/ssh/with-letsencrypt/stage/user_conf.d/nginx.conf @@ -0,0 +1,15 @@ +server { + listen 443 ssl; + server_name apistagedts.gauzy.co; + ssl_certificate /etc/letsencrypt/live/apistagedts.gauzy.co/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/apistagedts.gauzy.co/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/apistagedts.gauzy.co/chain.pem; + + # Load the Diffie-Hellman parameter. + ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; + + location / { + proxy_pass http://api:3000; + } + +} diff --git a/.env.compose b/.env.compose new file mode 100644 index 000000000..0f9f93b75 --- /dev/null +++ b/.env.compose @@ -0,0 +1,6 @@ +# Docker Compose sample .env file for Production + +NODE_ENV=production + +# set true if running inside Docker container +IS_DOCKER=true diff --git a/.env.demo.compose b/.env.demo.compose new file mode 100644 index 000000000..0f9f93b75 --- /dev/null +++ b/.env.demo.compose @@ -0,0 +1,6 @@ +# Docker Compose sample .env file for Production + +NODE_ENV=production + +# set true if running inside Docker container +IS_DOCKER=true diff --git a/.env.docker b/.env.docker new file mode 100644 index 000000000..0f9f93b75 --- /dev/null +++ b/.env.docker @@ -0,0 +1,6 @@ +# Docker Compose sample .env file for Production + +NODE_ENV=production + +# set true if running inside Docker container +IS_DOCKER=true diff --git a/.gitignore b/.gitignore index 73bd06141..387c89f2d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,9 +8,14 @@ # lerna files /lerna-debug.log +# sqlite3 database +*.sqlite3 +*.sqlite3-journal +*.sqlite3-shm +*.sqlite3-wal # dependencies -/node_modules +node_modules/ /.pnp .pnp.js .idea @@ -39,10 +44,10 @@ /build # misc -.DS_Store -*.pem - -# misc +/.sass-cache +/connect.lock +/coverage +/libpeerconnection.log npm-debug.log* yarn-debug.log* yarn-error.log* @@ -63,6 +68,7 @@ testem.log # System Files .DS_Store Thumbs.db +*.pem # Cache File /.cache @@ -95,3 +101,11 @@ apps/server-web/src/assets/icons/desktop_logo_512x512.png # Generated desktop icon menu apps/server-web/src/assets/icons/menu + +megalinter-reports/ + +/.deploy/jitsu/configurator/data/logs/*.log +/.deploy/jitsu/server/data/logs/*.log +/.deploy/redis/data/*.rdb +/.deploy/redis/jitsu_users_recognition/data/*.rdb +/.deploy/jitsu/server/data/logs/events diff --git a/README.md b/README.md index 5fdf6a7fc..4dcd098b9 100644 --- a/README.md +++ b/README.md @@ -92,16 +92,54 @@ Please refer to our official [Platform Documentation](https://docs.ever.team) (W -### Quick Start with our public live APIs +### Run with Docker Compose + +- Clone repo. +- Make sure you have the latest Docker Compose [installed locally](https://docs.docker.com/compose/install). Important: you need a minimum [v2.20](https://docs.docker.com/compose/release-notes/#2200). +- Run `docker-compose -f docker-compose.demo.yml up`, if you want to run the platform in basic configuration (e.g. for Demo / explore functionality / quick run) using our prebuilt Docker images. Check `.env.demo.compose` file for different settings (optionally). _(Note: Docker Compose will use latest images pre-build automatically from head of `master` branch using GitHub CI/CD.)_ +- Run `docker-compose up`, if you want to run the platform in production configuration using our prebuilt Docker images. Check `.env.compose` file for different settings (optionally). _(Note: Docker Compose will use latest images pre-build automatically from head of `master` branch using GitHub CI/CD.)_ +- Run `docker-compose -f docker-compose.build.yml up`, if you want to build everything (code and Docker images) locally. Check `.env.compose` file for different settings (optionally). _(Note: this can be long process because it builds whole platform locally. Other options above are much faster!)_ +- :coffee: time... It might take some time for the first Docker Compose run, even if you used prebuilt Docker images. +- Open in your browser, register a new account, and start using Ever Teams! +- Enjoy! + +_Notes:_ +- _You can execute `docker-compose` command with `-d` option to run it in the "detached" mode (allows containers to run in the background, separate from the terminal)._ +- _By default, Ever Teams web frontend will be connected to our production [Ever Gauzy API](https://github.com/ever-co/ever-gauzy) API endpoint . You can change it in environment variables `GAUZY_API_SERVER_URL` and `NEXT_PUBLIC_GAUZY_API_SERVER_URL`, see more in the section about how to run with a Self-hosted Backend._ + +### Run with Docker + +#### Build & Run + +Run with Public Images: +- You can pull our public docker image with `docker pull everco/ever-teams-webapp .` command. +- You can run docker image with the following command: `docker run -p 127.0.0.1:3030:3030/tcp everco/ever-teams-webapp`. +- Open in your browser, register a new account, and start using Ever Teams! + +_Note: To build such images on each release (push to our master branch), we are using relevant [Github Action](https://github.com/ever-co/ever-teams/blob/develop/.github/workflows/docker-build-publish-prod.yml)._ + +Build and Run Locally: +- If you want to build an image locally from our source code (after clone repo locally), please run the following command (from the root of mono-repo): `docker build . -t ever-teams-webapp -f Dockerfile`. +- To run the locally built image, please run the following command: `docker run -p 127.0.0.1:3030:3030/tcp ever-teams-webapp`. +- Open in your browser, register a new account, and start using Ever Teams! + +_Note: By default, Ever Teams web frontend will be connected to our production [Ever Gauzy API](https://github.com/ever-co/ever-gauzy) API endpoint . You can change it in environment variables `GAUZY_API_SERVER_URL` and `NEXT_PUBLIC_GAUZY_API_SERVER_URL`, see more in the section about how to run with a Self-hosted Backend._ + +#### Images + +We have Ever Teams Docker images published into: +- https://hub.docker.com/u/everco?page=1&search=ever-teams +- https://github.com/orgs/ever-co/packages?tab=packages&q=ever-teams + +### Quick Start to manually build & run locally 1. Clone this repo 2. Run `yarn install` 3. Run `yarn build:web && yarn start:web` OR `yarn start:web:dev` 4. Open in in your Browser -Notes: - -- by default, Ever Teams web frontend will be connected to our production [Ever Gauzy API](https://github.com/ever-co/ever-gauzy) API endpoint . You can change it in environment variables `GAUZY_API_SERVER_URL` and `NEXT_PUBLIC_GAUZY_API_SERVER_URL`, see below how to run with a Self-hosted Backend. +_Notes:_ +- _by default, Ever Teams web frontend will be connected to our production [Ever Gauzy API](https://github.com/ever-co/ever-gauzy) API endpoint . You can change it in environment variables `GAUZY_API_SERVER_URL` and `NEXT_PUBLIC_GAUZY_API_SERVER_URL`, see below how to run with a Self-hosted Backend._ ### Run with a Self-hosted Backend @@ -128,10 +166,6 @@ DevContainers for VSCode are supported (WIP). [Click here to get started.](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/ever-co/ever-teams) -### Run in Docker & Docker Compose - -WIP - ## 🚗 Self Hosting ### DigitalOcean diff --git a/apps/web/app/[locale]/all-teams/component.tsx b/apps/web/app/[locale]/all-teams/component.tsx index a62530752..4e0d8a149 100644 --- a/apps/web/app/[locale]/all-teams/component.tsx +++ b/apps/web/app/[locale]/all-teams/component.tsx @@ -39,19 +39,21 @@ function AllTeamsPage() { showTimer={isTrackingEnabled} className="items-start" mainHeaderSlot={ -
-
- -
-
- + +
+
+ +
+
+ +
+
-
-
- {view == IssuesView.CARDS && } -
+ {view == IssuesView.CARDS && } +
+ } > diff --git a/apps/web/app/[locale]/layout.tsx b/apps/web/app/[locale]/layout.tsx index 7353f53b4..85dc858c7 100644 --- a/apps/web/app/[locale]/layout.tsx +++ b/apps/web/app/[locale]/layout.tsx @@ -17,6 +17,7 @@ import { PropsWithChildren, useEffect } from 'react'; import { useCheckAPI } from '@app/hooks/useCheckAPI'; import GlobalSkeleton from '@components/ui/global-skeleton'; +import OfflineWrapper from '@components/offline-wrapper'; import { JitsuOptions } from '@jitsu/jitsu-react/dist/useJitsu'; import { PHProvider } from './integration/posthog/provider'; @@ -145,14 +146,16 @@ const LocaleLayout = ({ children, params: { locale }, pageProps }: PropsWithChil enableSystem disableTransitionOnChange > - {loading && !pathname?.startsWith('/auth') ? ( - - ) : ( - <> - - {children} - - )} + + {loading && !pathname?.startsWith('/auth') ? ( + + ) : ( + <> + + {children} + + )} + diff --git a/apps/web/app/[locale]/page-component.tsx b/apps/web/app/[locale]/page-component.tsx index 2a1ab3151..f8c5f696d 100644 --- a/apps/web/app/[locale]/page-component.tsx +++ b/apps/web/app/[locale]/page-component.tsx @@ -2,7 +2,7 @@ 'use client'; import React, { useEffect, useState } from 'react'; -import { useOrganizationTeams, useTimerView } from '@app/hooks'; +import { useOrganizationTeams } from '@app/hooks'; import { clsxm } from '@app/utils'; import NoTeam from '@components/pages/main/no-team'; import { withAuthentication } from 'lib/app/authenticator'; @@ -10,8 +10,6 @@ import { Breadcrumb, Card, Container } from 'lib/components'; import { AuthUserTaskInput, TeamInvitations, TeamMembers, Timer, UnverifiedEmail } from 'lib/features'; import { MainLayout } from 'lib/layout'; import { IssuesView } from '@app/constants'; -import { useNetworkState } from '@uidotdev/usehooks'; -import Offline from '@components/pages/offline'; import { useTranslations } from 'next-intl'; import { Analytics } from '@vercel/analytics/react'; @@ -34,7 +32,6 @@ function MainPage() { const t = useTranslations(); const [headerSize] = useState(10); const { isTeamMember, isTrackingEnabled, activeTeam } = useOrganizationTeams(); - const { timerStatus } = useTimerView(); const [fullWidth, setFullWidth] = useAtom(fullWidthState); const [view, setView] = useAtom(headerTabs); @@ -44,7 +41,7 @@ function MainPage() { { title: activeTeam?.name || '', href: '/' }, { title: t(`common.${view}`), href: `/` } ]; - const { online } = useNetworkState(); + useEffect(() => { if (view == IssuesView.KANBAN && path == '/') { setView(IssuesView.CARDS); @@ -57,13 +54,10 @@ function MainPage() { setFullWidth(JSON.parse(window?.localStorage.getItem('conf-fullWidth-mode') || 'true')); }, [setFullWidth]); - if (!online) { - return ; - } return ( <>
- {/*
*/} + {/*
*/}
-
+
-
+
@@ -100,7 +94,7 @@ function MainPage() { footerClassName={clsxm('')} > -
{isTeamMember ? +
{isTeamMember ? diff --git a/apps/web/app/[locale]/settings/personal/page.tsx b/apps/web/app/[locale]/settings/personal/page.tsx index 63bb4889d..cc38d86e4 100644 --- a/apps/web/app/[locale]/settings/personal/page.tsx +++ b/apps/web/app/[locale]/settings/personal/page.tsx @@ -6,6 +6,7 @@ import { Accordian } from 'lib/components/accordian'; import Link from 'next/link'; import { useTranslations } from 'next-intl'; import { SyncZone } from 'lib/settings/sync.zone'; +import { WorkingHours } from '@/lib/settings/working-hours'; const Personal = () => { const t = useTranslations(); @@ -28,6 +29,12 @@ const Personal = () => { + + + * updateFormState('taskId', value.id)} + onChange={(value) => { + updateFormState('taskId', value) + }} classNameGroup='h-[40vh]' ariaLabel='Task issues' className='w-full font-medium' diff --git a/apps/web/app/[locale]/timesheet/[memberId]/components/CalendarView.tsx b/apps/web/app/[locale]/timesheet/[memberId]/components/CalendarView.tsx index 57a3f0bea..a3c003e5a 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/components/CalendarView.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/components/CalendarView.tsx @@ -11,8 +11,9 @@ import { cn } from "@/lib/utils"; import MonthlyTimesheetCalendar from "./MonthlyTimesheetCalendar"; import { useTimelogFilterOptions } from "@/app/hooks"; import WeeklyTimesheetCalendar from "./WeeklyTimesheetCalendar"; -import { IUser } from "@/app/interfaces"; +import { IUser, TimesheetLog } from "@/app/interfaces"; import TimesheetSkeleton from "@components/shared/skeleton/TimesheetSkeleton"; +import { Checkbox } from "@components/ui/checkbox"; interface BaseCalendarDataViewProps { t: TranslationHooks data: GroupedTimesheet[]; @@ -51,7 +52,6 @@ export function CalendarView({ data, loading, user }: { data?: GroupedTimesheet[
); } - return (
{(() => { @@ -70,7 +70,7 @@ export function CalendarView({ data, loading, user }: { data?: GroupedTimesheet[ } const CalendarDataView = ({ data, t }: { data?: GroupedTimesheet[], t: TranslationHooks }) => { - const { getStatusTimesheet } = useTimesheet({}); + const { getStatusTimesheet, handleSelectRowTimesheet, selectTimesheetId } = useTimesheet({}); return (
@@ -134,9 +134,9 @@ const CalendarDataView = ({ data, t }: { data?: GroupedTimesheet[], t: Translati }} className={cn( - 'border-l-4 rounded-l flex flex-col p-2 gap-2 items-start space-x-4 ', + 'group/item border-l-4 rounded-l flex flex-col p-2 gap-2 items-start space-x-4', )}> -
+
-
- {task.project?.imageUrl && ( - - )} - - {task.project?.name ?? 'No Project'} - +
+
+ {task.project?.imageUrl && ( + + )} + + {task.project?.name ?? 'No Project'} + +
+
))} @@ -188,7 +194,7 @@ const CalendarDataView = ({ data, t }: { data?: GroupedTimesheet[], t: Translati } const BaseCalendarDataView = ({ data, daysLabels, t, CalendarComponent }: BaseCalendarDataViewProps) => { - const { getStatusTimesheet } = useTimesheet({}); + const { getStatusTimesheet, handleSelectRowTimesheet, selectTimesheetId } = useTimesheet({}); return (
@@ -263,16 +269,23 @@ const BaseCalendarDataView = ({ data, daysLabels, t, CalendarComponent }: BaseCa dash taskNumberClassName="text-sm" /> -
- {task.project?.imageUrl && ( - - )} - - {task.project?.name ?? 'No Project'} - +
+
+ {task.project?.imageUrl && ( + + )} + + {task.project?.name ?? 'No Project'} + +
+
))} @@ -299,3 +312,14 @@ const MonthlyCalendarDataView = (props: { data: GroupedTimesheet[], t: Translati const WeeklyCalendarDataView = (props: { data: GroupedTimesheet[], t: TranslationHooks, daysLabels?: string[] }) => ( ); + + +export const CheckBoxTimesheet = ({ selectTimesheetId, timesheet, handleSelectRowTimesheet }: { selectTimesheetId: TimesheetLog[], timesheet: TimesheetLog, handleSelectRowTimesheet: (items: TimesheetLog) => void }) => { + return handleSelectRowTimesheet(timesheet)} + checked={selectTimesheetId.includes(timesheet)} + /> +} diff --git a/apps/web/app/[locale]/timesheet/[memberId]/components/EditTaskModal.tsx b/apps/web/app/[locale]/timesheet/[memberId]/components/EditTaskModal.tsx index 66a19e4de..14ad3f596 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/components/EditTaskModal.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/components/EditTaskModal.tsx @@ -193,7 +193,9 @@ export function EditTaskModal({ isOpen, closeModal, dataTimesheet }: IEditTaskMo className="border border-transparent hover:border-transparent dark:hover:border-transparent" options={activeTeam?.members || []} value={timesheetData.employeeId} - onChange={(value) => setTimesheetData({ ...timesheetData, employeeId: value.employeeId })} + onChange={(value) => { + setTimesheetData({ ...timesheetData, employeeId: value }) + }} renderOption={(option) => (
{option.employee.fullName} diff --git a/apps/web/app/[locale]/timesheet/[memberId]/components/RejectSelectedModal.tsx b/apps/web/app/[locale]/timesheet/[memberId]/components/RejectSelectedModal.tsx index 6e82b567b..80ff2f1f2 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/components/RejectSelectedModal.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/components/RejectSelectedModal.tsx @@ -1,6 +1,7 @@ import { useTimesheet } from '@/app/hooks/features/useTimesheet'; import { clsxm } from '@/app/utils'; import { Modal } from '@/lib/components'; +import { ReloadIcon } from '@radix-ui/react-icons'; import { useTranslations } from 'next-intl'; import { useState } from 'react'; export interface IRejectSelectedModalProps { @@ -33,7 +34,7 @@ export function RejectSelectedModal({ }: IRejectSelectedModalProps) { const [isSubmitting, setIsSubmitting] = useState(false); const [reason, setReason] = useState(''); - const { updateTimesheetStatus, setSelectTimesheetId } = useTimesheet({}); + const { updateTimesheetStatus, loadingUpdateTimesheetStatus, setSelectTimesheetId } = useTimesheet({}); const t = useTranslations(); const handleSubmit = async (e: React.FormEvent) => { @@ -44,8 +45,8 @@ export function RejectSelectedModal({ status: 'DENIED', ids: selectTimesheetId || [], }).then(() => { - closeModal(); setSelectTimesheetId([]) + closeModal(); }).catch((error) => console.error(error)); } finally { setIsSubmitting(false); @@ -97,7 +98,7 @@ export function RejectSelectedModal({
diff --git a/apps/web/app/[locale]/timesheet/[memberId]/components/SelectionBar.tsx b/apps/web/app/[locale]/timesheet/[memberId]/components/SelectionBar.tsx index 5cdf3bfa1..bac09e440 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/components/SelectionBar.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/components/SelectionBar.tsx @@ -1,5 +1,7 @@ +import { ID, TimesheetLog, TimesheetStatus } from "@/app/interfaces"; import { cn } from "@/lib/utils"; import { useTranslations } from "next-intl"; +import { useCallback } from "react"; type ActionButtonProps = { label: string; @@ -70,3 +72,77 @@ export const SelectionBar = ({
) } + + +interface SelectedTimesheetProps { + selectTimesheetId: TimesheetLog[]; + updateTimesheetStatus: ({ status, ids }: { status: TimesheetStatus, ids: ID[] | ID }) => Promise; + deleteTaskTimesheet: ({ logIds }: { logIds: string[] }) => Promise; + setSelectTimesheetId: React.Dispatch>; + fullWidth: boolean; +} + + +/** + * SelectedTimesheet + * + * A component that renders a selection bar to handle tasks in the timesheet. + * It provides buttons to approve, reject, delete and clear the selected tasks. + * + * @param selectTimesheetId - The selected timesheet logs. + * @param updateTimesheetStatus - A function to update the status of the selected timesheet logs. + * @param deleteTaskTimesheet - A function to delete the selected timesheet logs. + * @param setSelectTimesheetId - A function to set the selected timesheet logs. + * @param fullWidth - A boolean to indicate if the component should be rendered in full width. + * @returns {React.ReactElement} - The rendered timesheet component. + */ +export const SelectedTimesheet: React.FC = ({ selectTimesheetId, updateTimesheetStatus, deleteTaskTimesheet, setSelectTimesheetId, fullWidth }) => { + const handleApprove = useCallback(async () => { + try { + updateTimesheetStatus({ + status: 'APPROVED', + ids: selectTimesheetId.map((select) => select.timesheet.id).filter((id) => id !== undefined) + }).then(() => { + setSelectTimesheetId([]); + }); + } catch (error) { + console.error(error); + } + }, [selectTimesheetId, updateTimesheetStatus]); + + const handleReject = useCallback(async () => { + try { + updateTimesheetStatus({ + status: 'DENIED', + ids: selectTimesheetId.map((select) => select.timesheet.id).filter((id) => id !== undefined) + }).then(() => { + setSelectTimesheetId([]); + }); + } catch (error) { + console.error(error); + } + }, [selectTimesheetId, updateTimesheetStatus]); + + const handleDelete = useCallback(async () => { + try { + deleteTaskTimesheet({ + logIds: selectTimesheetId?.map((select) => select.timesheet.id).filter((id) => id !== undefined) + }).then(() => { + setSelectTimesheetId([]); + }); + } catch (error) { + console.error(error); + } + }, [selectTimesheetId, deleteTaskTimesheet, setSelectTimesheetId]); + + return ( + setSelectTimesheetId([])} + fullWidth={fullWidth} + /> + ) +} diff --git a/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilter.tsx b/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilter.tsx index d4cd7aba5..61c353a79 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilter.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilter.tsx @@ -41,7 +41,7 @@ export function TimesheetFilter({ closeModal, isOpen, openModal, t, initDate, fi
- + {isManage && ( <> diff --git a/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilterDate.tsx b/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilterDate.tsx index 83e1f8a91..c0052aaab 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilterDate.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/components/TimesheetFilterDate.tsx @@ -5,13 +5,14 @@ import { DatePicker } from '@components/ui/DatePicker'; import { Button } from '@components/ui/button'; import { Popover, PopoverContent, PopoverTrigger } from '@components/ui/popover'; import { CalendarIcon } from '@radix-ui/react-icons'; -import { format } from 'date-fns'; +import { format, isAfter, isToday, startOfToday } from 'date-fns'; import { TranslationHooks } from 'next-intl'; import { MdKeyboardArrowRight } from 'react-icons/md'; import { PiCalendarDotsThin } from 'react-icons/pi'; import React, { Dispatch, useEffect, useState, SetStateAction, useCallback, useMemo, memo } from 'react'; import moment from 'moment'; import { ChevronDown } from 'lucide-react'; +import { TimesheetLog } from '@/app/interfaces'; interface DatePickerInputProps { @@ -25,6 +26,7 @@ export interface TimesheetFilterDateProps { minDate?: Date; maxDate?: Date; t: TranslationHooks; + data?: TimesheetLog[] } export function TimesheetFilterDate({ @@ -32,12 +34,25 @@ export function TimesheetFilterDate({ initialRange, minDate, maxDate, + data, t }: Readonly) { - const [dateRange, setDateRange] = React.useState<{ from: Date | null; to: Date | null }>({ - from: initialRange?.from ?? new Date(), - to: initialRange?.to ?? new Date() - }); + const today = startOfToday() + + const adjustedInitialRange = React.useMemo(() => { + if (!initialRange) { + return { + from: today, + to: today, + } + } + return { + from: initialRange.from, + to: initialRange.to && isAfter(initialRange.to, today) ? today : initialRange.to, + } + }, [initialRange, today]) + + const [dateRange, setDateRange] = React.useState<{ from: Date | null; to: Date | null }>(adjustedInitialRange); const [isVisible, setIsVisible] = useState(false); @@ -49,6 +64,7 @@ export function TimesheetFilterDate({ onChange?.({ ...dateRange, from: fromDate }); }; + const handleToChange = (toDate: Date | null) => { if (dateRange.from && toDate && toDate < dateRange.from) { return; @@ -129,12 +145,13 @@ export function TimesheetFilterDate({ {isVisible && (
- +
@@ -215,14 +232,17 @@ export function DatePickerFilter({ date, setDate, minDate, - maxDate + maxDate, + timesheet, }: { label: string; date: Date | null; setDate: (date: Date | null) => void; minDate?: Date | null; maxDate?: Date | null; + timesheet?: TimesheetLog[], }) { + const isDateDisabled = React.useCallback( (date: Date) => { if (minDate && date < minDate) return true; @@ -232,14 +252,64 @@ export function DatePickerFilter({ [minDate, maxDate] ); + const datesWithEntries = React.useMemo(() => { + return new Set(timesheet?.map((entry) => { + if (!entry.timesheet?.createdAt) { + console.warn('Skipping entry with missing timesheet or createdAt:', entry); + return null; + } + return format(new Date(entry.timesheet.createdAt), "yyyy-MM-dd"); + }).filter(Boolean)); + }, [timesheet]) + + const entriesByDate = React.useMemo(() => { + const map = new Map(); + timesheet?.forEach(entry => { + if (!entry.timesheet?.createdAt) { + console.warn('Skipping entry with missing timesheet or createdAt:', entry); + return; + } + const dateKey = format(new Date(entry.timesheet.createdAt), "yyyy-MM-dd"); + if (!map.has(dateKey)) { + map.set(dateKey, []); + } + map.get(dateKey)?.push(entry); + }); + return map; + }, [timesheet]); + + const getEntriesForDate = (date: Date) => { + const dateKey = format(date, "yyyy-MM-dd"); + return entriesByDate.get(dateKey) || []; + }; + const hasTimeEntry = (date: Date) => { + return datesWithEntries.has(format(date, "yyyy-MM-dd")) + } + + + const handleSelect = (day: Date) => { + if (day && !isDateDisabled(day)) { + setDate(day); + } + }; + return (
} mode="single" @@ -247,26 +317,72 @@ export function DatePickerFilter({ initialFocus defaultMonth={date ?? new Date()} selected={date ?? new Date()} - onSelect={(selectedDate) => { - if (selectedDate && !isDateDisabled(selectedDate)) { - setDate(selectedDate); - } + onSelect={(date) => date && handleSelect(date)} + modifiers={{ + hasEntry: (date) => hasTimeEntry(date), + today: (day) => isToday(day), }} modifiersClassNames={{ - booked: clsxm( - 'relative after:absolute after:bottom-0 after:left-1/2 after:-translate-x-1/2 after:w-1.5 after:h-1.5 after:bg-primary after:rounded-full' - ), - selected: clsxm('bg-primary after:hidden text-white !rounded-full'), - pastDay: clsxm( - 'relative after:absolute after:bottom-0 after:left-1/2 after:-translate-x-1/2 after:w-1.5 after:h-1.5 after:bg-yellow-600 after:rounded-full' - ), - today: clsxm('border-2 !border-yellow-700 rounded') + selected: clsxm("bg-primary after:hidden text-white !rounded-full"), + today: clsxm("border-2 !border-yellow-700 rounded"), + }} + disabled={[ + ...(minDate ? [{ before: minDate }] : []), + ...(maxDate ? [{ after: maxDate }] : []), + { + before: new Date(new Date().getFullYear(), new Date().getMonth(), 1), + }, + ]} + components={{ + Day: ({ date: dayDate, ...props }) => { + const isSelected = date?.getTime() === dayDate.getTime(); + + const isDayDisabled = isDateDisabled(dayDate); + return ( + + ); + }, }} - disabled={[...(minDate ? [{ before: minDate }] : []), ...(maxDate ? [{ after: maxDate }] : [])]} />
); } +const DayIndicators = ({ entries }: { entries: TimesheetLog[] }) => { + if (entries.length === 1) { + return ; + } + return ( +
+ {[...Array(3)].map((_, index) => ( + + ))} +
+ ); +}; + interface ICalendarProps { setSelectedPlan: Dispatch>; diff --git a/apps/web/app/[locale]/timesheet/[memberId]/page.tsx b/apps/web/app/[locale]/timesheet/[memberId]/page.tsx index d79febb07..5b3156f80 100644 --- a/apps/web/app/[locale]/timesheet/[memberId]/page.tsx +++ b/apps/web/app/[locale]/timesheet/[memberId]/page.tsx @@ -14,7 +14,7 @@ import { fullWidthState } from '@app/stores/fullWidth'; import { useAtomValue } from 'jotai'; import { ArrowLeftIcon } from 'assets/svg'; -import { CalendarView, CalendarViewIcon, FilterStatus, ListViewIcon, MemberWorkIcon, MenHoursIcon, PendingTaskIcon, TimesheetCard, TimesheetFilter, TimesheetView } from './components'; +import { CalendarView, CalendarViewIcon, FilterStatus, ListViewIcon, MemberWorkIcon, MenHoursIcon, PendingTaskIcon, SelectedTimesheet, TimesheetCard, TimesheetFilter, TimesheetView } from './components'; import { GoSearch } from 'react-icons/go'; import { differenceBetweenHours, getGreeting, secondsToTime } from '@/app/helpers'; @@ -55,7 +55,16 @@ const TimeSheet = React.memo(function TimeSheetPage({ params }: { params: { memb to: endOfMonth(new Date()), }); - const { timesheet: filterDataTimesheet, statusTimesheet, loadingTimesheet, isManage, timesheetGroupByDays } = useTimesheet({ + const { + timesheet: filterDataTimesheet, + statusTimesheet, loadingTimesheet, + isManage, + timesheetGroupByDays, + selectTimesheetId, + setSelectTimesheetId, + updateTimesheetStatus, + deleteTaskTimesheet + } = useTimesheet({ startDate: dateRange.from!, endDate: dateRange.to!, timesheetViewMode: timesheetNavigator, @@ -171,7 +180,7 @@ const TimeSheet = React.memo(function TimeSheetPage({ params }: { params: { memb }} /> } @@ -258,15 +267,25 @@ const TimeSheet = React.memo(function TimeSheetPage({ params }: { params: { memb loading={loadingTimesheet} /> ) : ( - + + {selectTimesheetId.length > 0 && } - loading={loadingTimesheet} - /> + )} {shouldRenderPagination && ( )} +
diff --git a/apps/web/app/hooks/features/useTaskStatus.ts b/apps/web/app/hooks/features/useTaskStatus.ts index 20fc56417..ad221e416 100644 --- a/apps/web/app/hooks/features/useTaskStatus.ts +++ b/apps/web/app/hooks/features/useTaskStatus.ts @@ -18,7 +18,6 @@ import { useCallback, useEffect } from 'react'; import { useAtom, useAtomValue } from 'jotai'; import { useFirstLoad } from '../useFirstLoad'; import { useQuery } from '../useQuery'; -import isEqual from 'lodash/isEqual'; import { getActiveTeamIdCookie } from '@app/helpers'; export function useTaskStatus() { @@ -64,16 +63,21 @@ export function useTaskStatus() { user?.employee?.organizationId as string, activeTeamId || teamId || null ).then((res) => { - if (!isEqual(res.data?.items || [], taskStatus)) { - setTaskStatus(res.data?.items || []); - } + setTaskStatus(res.data?.items || []); return res; }); - }, [user, activeTeamId, setTaskStatus, taskStatus, queryCall, loadingRef]); + }, [user, activeTeamId, setTaskStatus, queryCall, loadingRef]); + + useEffect(() => { + if (user?.tenantId && (activeTeamId || getActiveTeamIdCookie())) { + loadTaskStatusData(); + } + }, [user?.tenantId, activeTeamId, loadTaskStatusData]); useEffect(() => { - if (!firstLoad) return; - loadTaskStatusData(); + if (firstLoad) { + loadTaskStatusData(); + } }, [loadTaskStatusData, firstLoad]); const createTaskStatus = useCallback( diff --git a/apps/web/app/hooks/features/useTimelogFilterOptions.ts b/apps/web/app/hooks/features/useTimelogFilterOptions.ts index 242352156..d10519559 100644 --- a/apps/web/app/hooks/features/useTimelogFilterOptions.ts +++ b/apps/web/app/hooks/features/useTimelogFilterOptions.ts @@ -1,5 +1,5 @@ import { IUser, RoleNameEnum, TimesheetLog } from '@/app/interfaces'; -import { timesheetDeleteState, timesheetGroupByDayState, timesheetFilterEmployeeState, timesheetFilterProjectState, timesheetFilterStatusState, timesheetFilterTaskState, timesheetUpdateStatus } from '@/app/stores'; +import { timesheetDeleteState, timesheetGroupByDayState, timesheetFilterEmployeeState, timesheetFilterProjectState, timesheetFilterStatusState, timesheetFilterTaskState, timesheetUpdateStatus, selectTimesheetIdState } from '@/app/stores'; import { useAtom } from 'jotai'; import React from 'react'; @@ -13,7 +13,9 @@ export function useTimelogFilterOptions() { const [timesheetGroupByDays, setTimesheetGroupByDays] = useAtom(timesheetGroupByDayState); const [puTimesheetStatus, setPuTimesheetStatus] = useAtom(timesheetUpdateStatus) const [selectedItems, setSelectedItems] = React.useState<{ status: string; date: string }[]>([]); - const [selectTimesheetId, setSelectTimesheetId] = React.useState([]) + // const [selectTimesheetId, setSelectTimesheetId] = React.useState([]) + const [selectTimesheetId, setSelectTimesheetId] = useAtom(selectTimesheetIdState) + const employee = employeeState; const project = projectState; diff --git a/apps/web/app/hooks/features/useTimesheet.ts b/apps/web/app/hooks/features/useTimesheet.ts index d06e4db2b..f9ba5f78a 100644 --- a/apps/web/app/hooks/features/useTimesheet.ts +++ b/apps/web/app/hooks/features/useTimesheet.ts @@ -7,6 +7,7 @@ import { deleteTaskTimesheetLogsApi, getTaskTimesheetLogsApi, updateStatusTimesh import moment from 'moment'; import { ID, TimesheetLog, TimesheetStatus, UpdateTimesheet } from '@/app/interfaces'; import { useTimelogFilterOptions } from './useTimelogFilterOptions'; +import axios from 'axios'; interface TimesheetParams { startDate?: Date | string; @@ -23,29 +24,47 @@ export interface GroupedTimesheet { const groupByDate = (items: TimesheetLog[]): GroupedTimesheet[] => { if (!items?.length) return []; - type GroupedMap = Record; - const groupedByDate = items.reduce((acc, item) => { - if (!item?.timesheet?.createdAt) { + + // First, group by timesheetId + const groupedByTimesheet = items.reduce((acc, item) => { + if (!item?.timesheet?.id || !item?.timesheet.createdAt) { console.warn('Skipping item with missing timesheet or createdAt:', item); return acc; } - try { - const date = new Date(item.timesheet.createdAt).toISOString().split('T')[0]; - if (!acc[date]) acc[date] = []; - acc[date].push(item); - } catch (error) { - console.error( - `Failed to process date for timesheet ${item.timesheet.id}:`, - { createdAt: item.timesheet.createdAt, error } - ); + const timesheetId = item.timesheet.id; + if (!acc[timesheetId]) { + acc[timesheetId] = []; } + acc[timesheetId].push(item); return acc; - }, {}); + }, {} as Record); - return Object.entries(groupedByDate) - .map(([date, tasks]) => ({ date, tasks })) - .sort((a, b) => b.date.localeCompare(a.date)); -} + // Then, for each timesheet group, group by date and merge all results + const result: GroupedTimesheet[] = []; + Object.values(groupedByTimesheet).forEach(timesheetLogs => { + const groupedByDate = timesheetLogs.reduce((acc, item) => { + try { + const date = new Date(item.timesheet.createdAt).toISOString().split('T')[0]; + if (!acc[date]) acc[date] = []; + acc[date].push(item); + } catch (error) { + console.error( + `Failed to process date for timesheet ${item.timesheet.id}:`, + { createdAt: item.timesheet.createdAt, error } + ); + } + return acc; + }, {} as Record); + + // Convert grouped dates to array format and add to results + Object.entries(groupedByDate).forEach(([date, tasks]) => { + result.push({ date, tasks }); + }); + }); + + // Sort by date in descending order + return result.sort((a, b) => b.date.localeCompare(a.date)); +}; const getWeekYearKey = (date: Date): string => { const startOfYear = new Date(date.getFullYear(), 0, 1); const daysSinceStart = Math.floor((date.getTime() - startOfYear.getTime()) / (1000 * 60 * 60 * 24)); @@ -53,41 +72,59 @@ const getWeekYearKey = (date: Date): string => { return `${date.getFullYear()}-W${week}`; }; +const getMonthKey = (date: Date): string => { + return `${date.getFullYear()}-${(date.getMonth() + 1).toString().padStart(2, '0')}`; +}; type GroupingKeyFunction = (date: Date) => string; const createGroupingFunction = (getKey: GroupingKeyFunction) => (items: TimesheetLog[]): GroupedTimesheet[] => { if (!items?.length) return []; - type GroupedMap = Record; - const grouped = items.reduce((acc, item) => { - if (!item?.timesheet?.createdAt) { + // First, group by timesheetId + const groupedByTimesheet = items.reduce((acc, item) => { + if (!item?.timesheet?.id || !item?.timesheet?.createdAt) { console.warn('Skipping item with missing timesheet or createdAt:', item); return acc; } - try { - const date = new Date(item.timesheet.createdAt); - const key = getKey(date); - if (!acc[key]) acc[key] = []; - acc[key].push(item); - } catch (error) { - console.error( - `Failed to process date for timesheet ${item.timesheet.id}:`, - { createdAt: item.timesheet.createdAt, error } - ); + const timesheetId = item.timesheet.id; + if (!acc[timesheetId]) { + acc[timesheetId] = []; } + acc[timesheetId].push(item); return acc; - }, {}); + }, {} as Record); - return Object.entries(grouped) - .map(([key, tasks]) => ({ date: key, tasks })) - .sort((a, b) => b.date.localeCompare(a.date)); + // Then, for each timesheet group, group by date and merge all results + const result: GroupedTimesheet[] = []; + Object.values(groupedByTimesheet).forEach(timesheetLogs => { + const groupedByDate = timesheetLogs.reduce((acc, item) => { + try { + const date = new Date(item.timesheet.createdAt); + const key = getKey(date); + if (!acc[key]) acc[key] = []; + acc[key].push(item); + } catch (error) { + console.error( + `Failed to process date for timesheet ${item.timesheet.id}:`, + { createdAt: item.timesheet.createdAt, error } + ); + } + return acc; + }, {} as Record); + + // Convert grouped dates to array format and add to results + Object.entries(groupedByDate).forEach(([key, tasks]) => { + result.push({ date: key, tasks }); + }); + }); + + // Sort by date in descending order + return result.sort((a, b) => b.date.localeCompare(a.date)); }; const groupByWeek = createGroupingFunction(date => getWeekYearKey(date)); -const groupByMonth = createGroupingFunction(date => - `${date.getFullYear()}-${(date.getMonth() + 1).toString().padStart(2, '0')}` -); +const groupByMonth = createGroupingFunction(getMonthKey); /** * @function useTimesheet @@ -128,7 +165,7 @@ export function useTimesheet({ }: TimesheetParams) { const { user } = useAuthenticateUser(); const [timesheet, setTimesheet] = useAtom(timesheetRapportState); - const { employee, project, task, statusState, timesheetGroupByDays, puTimesheetStatus, isUserAllowedToAccess, normalizeText, setSelectTimesheetId } = useTimelogFilterOptions(); + const { employee, project, task, statusState, timesheetGroupByDays, puTimesheetStatus, isUserAllowedToAccess, normalizeText, setSelectTimesheetId, selectTimesheetId, handleSelectRowByStatusAndDate, handleSelectRowTimesheet } = useTimelogFilterOptions(); const { loading: loadingTimesheet, queryCall: queryTimesheet } = useQuery(getTaskTimesheetLogsApi); const { loading: loadingDeleteTimesheet, queryCall: queryDeleteTimesheet } = useQuery(deleteTaskTimesheetLogsApi); const { loading: loadingUpdateTimesheetStatus, queryCall: queryUpdateTimesheetStatus } = useQuery(updateStatusTimesheetFromApi) @@ -160,15 +197,7 @@ export function useTimesheet({ console.error('Error fetching timesheet:', error); }); }, - [ - user, - queryTimesheet, - setTimesheet, - employee, - project, - task, - statusState - ] + [user, queryTimesheet, isManage, employee, project, task, statusState, setTimesheet] ); const createTimesheet = useCallback( @@ -177,40 +206,56 @@ export function useTimesheet({ throw new Error("User not authenticated"); } try { - const response = await queryCreateTimesheet(timesheetParams); - setTimesheet((prevTimesheet) => [ - response.data, - ...(prevTimesheet || []) - ]); + const response = queryCreateTimesheet(timesheetParams).then((res) => { + return res.data + }); + return response } catch (error) { - console.error('Error:', error); + if (axios.isAxiosError(error)) { + console.error('Axios Error:', { + status: error.response?.status, + statusText: error.response?.statusText, + data: error.response?.data + }); + throw new Error(`Request failed: ${error.message}`); + } + console.error('Error:', error instanceof Error ? error.message : error); + throw error; } }, - [queryCreateTimesheet, setTimesheet, user] + [queryCreateTimesheet, user] ); - - - const updateTimesheet = useCallback<(params: UpdateTimesheet) => Promise>( - async ({ ...timesheet }: UpdateTimesheet) => { + const updateTimesheet = useCallback( + async (timesheet: UpdateTimesheet) => { if (!user) { - throw new Error("User not authenticated"); + console.warn("User not authenticated!"); + return; } try { const response = await queryUpdateTimesheet(timesheet); - setTimesheet((prevTimesheet) => { - const updatedTimesheets = prevTimesheet.map((item) => - item.id === response.data.id - ? { ...item, ...response.data } - : item + if (response?.data?.id) { + setTimesheet((prevTimesheet) => + prevTimesheet.map((item) => + item.id === response.data.id + ? { ...item, ...response.data } + : item + ) ); - return updatedTimesheets; - }); + } else { + console.warn( + "Unexpected structure of the response. No update performed.", + response + ); + } } catch (error) { - console.error('Error updating timesheet:', error); + console.error("Error updating the timesheet:", error); throw error; } - }, [queryUpdateTimesheet, setTimesheet, user]) + }, + [queryUpdateTimesheet, setTimesheet, user] + ); + const updateTimesheetStatus = useCallback( @@ -320,7 +365,19 @@ export function useTimesheet({ ) ); }); - }, [timesheet, inputSearch]); + }, [timesheet, inputSearch, normalizeText, statusState]); + + const reGroupByDate = (groupedTimesheets: GroupedTimesheet[]): GroupedTimesheet[] => { + return groupedTimesheets.reduce((acc, { date, tasks }) => { + const existingGroup = acc.find(group => group.date === date); + if (existingGroup) { + existingGroup.tasks = existingGroup.tasks.concat(tasks); + } else { + acc.push({ date, tasks }); + } + return acc; + }, [] as GroupedTimesheet[]); + }; const timesheetElementGroup = useMemo(() => { if (!timesheet) { @@ -328,20 +385,28 @@ export function useTimesheet({ } if (timesheetViewMode === 'ListView') { + const groupedTimesheets = groupByDate(filterDataTimesheet); + const reGroupedByDate = reGroupByDate(groupedTimesheets); switch (timesheetGroupByDays) { case 'Daily': - return groupByDate(filterDataTimesheet); + return reGroupedByDate; case 'Weekly': return groupByWeek(filterDataTimesheet); case 'Monthly': return groupByMonth(filterDataTimesheet); default: - return groupByDate(filterDataTimesheet); + return reGroupedByDate; } } + return reGroupByDate(groupByDate(filterDataTimesheet)); + }, [timesheet, timesheetViewMode, filterDataTimesheet, timesheetGroupByDays]); - return groupByDate(filterDataTimesheet); - }, [timesheetGroupByDays, timesheetViewMode, timesheet]); + const rowsToObject = (rows: TimesheetLog[]): Record => { + return rows.reduce((acc, row) => { + acc[row.timesheet.id] = { task: row, status: row.timesheet.status } + return acc; + }, {} as Record); + }; useEffect(() => { @@ -367,6 +432,10 @@ export function useTimesheet({ groupByDate, isManage, normalizeText, - setSelectTimesheetId + setSelectTimesheetId, + selectTimesheetId, + handleSelectRowByStatusAndDate, + handleSelectRowTimesheet, + rowsToObject }; } diff --git a/apps/web/app/hooks/useLeftSettingData.ts b/apps/web/app/hooks/useLeftSettingData.ts index 60172af88..940e8e408 100644 --- a/apps/web/app/hooks/useLeftSettingData.ts +++ b/apps/web/app/hooks/useLeftSettingData.ts @@ -8,6 +8,11 @@ export const useLeftSettingData = () => { color: '#7E7991', href: '#general' }, + { + title: 'Working hours', + color: '#7E7991', + href: '#working-hours', + }, // { // title: t('pages.settingsPersonal.WORK_SCHEDULE'), // color: '#7E7991', diff --git a/apps/web/app/interfaces/IBaseModel.ts b/apps/web/app/interfaces/IBaseModel.ts index debe7f323..e5b7f3b06 100644 --- a/apps/web/app/interfaces/IBaseModel.ts +++ b/apps/web/app/interfaces/IBaseModel.ts @@ -38,6 +38,6 @@ export interface IBasePerTenantAndOrganizationEntity extends IBasePerTenant { export interface IBasePerTenantAndOrganizationEntityMutationInput extends Pick, - Partial { + Partial { organization?: Pick; } diff --git a/apps/web/app/stores/time-logs.ts b/apps/web/app/stores/time-logs.ts index 19ad5da66..514fc7e89 100644 --- a/apps/web/app/stores/time-logs.ts +++ b/apps/web/app/stores/time-logs.ts @@ -21,3 +21,4 @@ export const timesheetDeleteState = atom([]); export const timesheetGroupByDayState = atom('Daily') export const timesheetUpdateStatus = atom([]) export const timesheetUpdateState = atom(null) +export const selectTimesheetIdState = atom([]) diff --git a/apps/web/components/offline-wrapper/index.tsx b/apps/web/components/offline-wrapper/index.tsx new file mode 100644 index 000000000..d56797850 --- /dev/null +++ b/apps/web/components/offline-wrapper/index.tsx @@ -0,0 +1,37 @@ +'use client'; + +import { useNetworkState } from '@uidotdev/usehooks'; +import Offline from '@components/pages/offline'; +import { useTimerView } from '@app/hooks'; +import { usePathname } from 'next/navigation'; + +interface OfflineWrapperProps { + children: React.ReactNode; +} + +/** + * A wrapper component that conditionally renders the Offline component if the user is not online. + * The Offline component is not shown on authentication pages (paths starting with /auth). + * When the user is offline, the Offline component is rendered with the showTimer prop set to + * whether the timer is running or not. + * + * @example + * + * + * + * @param {React.ReactNode} children - The children components to render when the user is online + * @returns {React.ReactElement} - The Offline component if the user is offline (except on auth pages), or the children components if the user is online + */ +export default function OfflineWrapper({ children }: OfflineWrapperProps) { + const { online } = useNetworkState(); + const { timerStatus } = useTimerView(); + const pathname = usePathname(); + + const isAuthPage = pathname?.startsWith('/auth'); + + if (!online && !isAuthPage) { + return ; + } + + return <>{children}; +} diff --git a/apps/web/components/pages/offline/index.tsx b/apps/web/components/pages/offline/index.tsx index 198e689df..dec2f2754 100644 --- a/apps/web/components/pages/offline/index.tsx +++ b/apps/web/components/pages/offline/index.tsx @@ -3,11 +3,14 @@ import { cn } from '@/lib/utils'; import SadCry from '@components/ui/svgs/sad-cry'; import { Text } from 'lib/components'; import { useTranslations } from 'next-intl'; + interface IPropsOffline { showTimer?: boolean } + function Offline({ showTimer }: IPropsOffline) { const t = useTranslations(); + return (
diff --git a/apps/web/components/ui/time-picker.tsx b/apps/web/components/ui/time-picker.tsx new file mode 100644 index 000000000..472cfa3fd --- /dev/null +++ b/apps/web/components/ui/time-picker.tsx @@ -0,0 +1,125 @@ +import React, { useState } from 'react'; +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from './select'; +import { Input } from './input'; + +interface TimePickerProps { + value: string; + onChange: (value: string) => void; + disabled?: boolean; +} + +const generateTimeOptions = () => { + const options: string[] = []; + for (let hour = 0; hour < 24; hour++) { + for (let minute = 0; minute < 60; minute += 10) { + const formattedHour = hour.toString().padStart(2, '0'); + const formattedMinute = minute.toString().padStart(2, '0'); + options.push(`${formattedHour}:${formattedMinute}`); + } + } + return options; +}; + +const isValidTimeFormat = (time: string): boolean => { + const timeRegex = /^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$/; + return timeRegex.test(time); +}; + +const formatTime = (time: string): string => { + if (!time) return ''; + const [hours, minutes] = time.split(':'); + const formattedHours = hours.padStart(2, '0'); + const formattedMinutes = minutes ? minutes.padStart(2, '0') : '00'; + return `${formattedHours}:${formattedMinutes}`; +}; + +export const TimePicker: React.FC = ({ value, onChange, disabled }) => { + const [inputValue, setInputValue] = useState(value); + const [isEditing, setIsEditing] = useState(false); + const timeOptions = generateTimeOptions(); + + const handleInputChange = (e: React.ChangeEvent) => { + const newValue = e.target.value; + setInputValue(newValue); + + // Auto-format as user types + if (newValue.length === 2 && !newValue.includes(':')) { + setInputValue(newValue + ':'); + } + }; + + const handleInputBlur = () => { + setIsEditing(false); + if (isValidTimeFormat(inputValue)) { + const formattedTime = formatTime(inputValue); + setInputValue(formattedTime); + onChange(formattedTime); + } else { + setInputValue(value); + } + }; + + const handleSelectChange = (newValue: string) => { + setInputValue(newValue); + onChange(newValue); + }; + + return ( +
+ {isEditing ? ( + + ) : ( + +
+
+ {timeOptions.map((time) => ( + + {time} + + ))} +
+ + + )} +
+ ); +}; diff --git a/apps/web/lib/components/color-picker.tsx b/apps/web/lib/components/color-picker.tsx index d06dbef3a..a5f0d5130 100644 --- a/apps/web/lib/components/color-picker.tsx +++ b/apps/web/lib/components/color-picker.tsx @@ -67,7 +67,7 @@ export const ColorPicker = ({ }, []); return fullWidthInput ? ( - + e.stopPropagation()}> {() => ( <>
@@ -149,7 +149,7 @@ export function DataSyncToggler({ className }: IClassName) { className={clsxm( 'flex flex-row justify-center items-center p-2 w-8 h-8 rounded-[60px] mr-[-2px]', !dataSync && - 'bg-red-400 shadow-md dark:bg-transparent dark:bg-red-400' + 'bg-red-400 shadow-md dark:bg-transparent dark:bg-red-400' )} > @@ -186,7 +186,7 @@ export function DataSyncModeToggler({ className }: IClassName) { className={clsxm( 'flex flex-row justify-center items-center p-2 w-8 h-8 rounded-[60px] ml-[-2px]', dataSyncMode == 'REAL_TIME' && - 'bg-white text-primary shadow-md dark:bg-transparent dark:bg-[#3B4454]' + 'bg-white text-primary shadow-md dark:bg-transparent dark:bg-[#3B4454]' )} > @@ -197,7 +197,7 @@ export function DataSyncModeToggler({ className }: IClassName) { className={clsxm( 'flex flex-row justify-center items-center p-2 w-8 h-8 rounded-[60px] mr-[-2px]', dataSyncMode == 'PULL' && - 'bg-white shadow-md dark:bg-transparent dark:bg-[#3B4454]' + 'bg-white shadow-md dark:bg-transparent dark:bg-[#3B4454]' )} > @@ -234,9 +234,8 @@ export function CommonToggle({ >