Skip to content

Commit 6e801e0

Browse files
committed
Fix workflows
1 parent b660bad commit 6e801e0

File tree

12 files changed

+570
-10
lines changed

12 files changed

+570
-10
lines changed

.github/workflows/rails_graphql_federation_books_ci.yml

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ on:
99

1010
env:
1111
rails_app_root: rails_graphql_federation/books
12+
CI: true
1213

1314
jobs:
1415
scan_ruby:

.github/workflows/rails_graphql_federation_users_ci.yml

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ on:
99

1010
env:
1111
rails_app_root: rails_graphql_federation/users
12+
CI: true
1213

1314
jobs:
1415
scan_ruby:

rails_graphql_federation/books/app/models/application_record.rb

-3
This file was deleted.

rails_graphql_federation/books/config/initializers/phobos.rb

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
Rails.application.configure do
2-
Phobos.configure("config/phobos.yml")
2+
if ENV["CI"].present?
3+
Phobos.configure("config/phobos.ci.yml")
4+
else
5+
Phobos.configure("config/phobos.yml")
6+
end
37

48
group_id = SecureRandom.uuid
59

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
logger:
2+
# Optional log file, set to false or remove to disable it
3+
file: log/phobos.log
4+
# Optional output format for stdout, default is false (human readable).
5+
# Set to true to enable json output.
6+
stdout_json: false
7+
level: info
8+
# Comment the block to disable ruby-kafka logs
9+
ruby_kafka:
10+
level: error
11+
12+
kafka:
13+
# identifier for this application
14+
client_id: books
15+
# timeout setting for connecting to brokers
16+
connect_timeout:
17+
# timeout setting for socket connections
18+
socket_timeout:
19+
# PEM encoded CA cert to use with an SSL connection (string)
20+
ssl_ca_cert:
21+
# PEM encoded client cert to use with an SSL connection (string)
22+
# Must be used in combination with ssl_client_cert_key
23+
ssl_client_cert:
24+
# PEM encoded client cert key to use with an SSL connection (string)
25+
# Must be used in combination with ssl_client_cert
26+
ssl_client_cert_key:
27+
# list of brokers used to initialize the client ("port:protocol")
28+
seed_brokers:
29+
- kafka:9092
30+
31+
producer:
32+
# number of seconds a broker can wait for replicas to acknowledge
33+
# a write before responding with a timeout
34+
ack_timeout: 5
35+
# number of replicas that must acknowledge a write, or `:all`
36+
# if all in-sync replicas must acknowledge
37+
required_acks: :all
38+
# number of retries that should be attempted before giving up sending
39+
# messages to the cluster. Does not include the original attempt
40+
max_retries: 2
41+
# number of seconds to wait between retries
42+
retry_backoff: 1
43+
# number of messages allowed in the buffer before new writes will
44+
# raise {BufferOverflow} exceptions
45+
max_buffer_size: 1000
46+
# maximum size of the buffer in bytes. Attempting to produce messages
47+
# when the buffer reaches this size will result in {BufferOverflow} being raised
48+
max_buffer_bytesize: 10000000
49+
# name of the compression codec to use, or nil if no compression should be performed.
50+
# Valid codecs: `:snappy` and `:gzip`
51+
compression_codec:
52+
# number of messages that needs to be in a message set before it should be compressed.
53+
# Note that message sets are per-partition rather than per-topic or per-producer
54+
compression_threshold: 1
55+
# maximum number of messages allowed in the queue. Only used for async_producer
56+
max_queue_size: 1000
57+
# if greater than zero, the number of buffered messages that will automatically
58+
# trigger a delivery. Only used for async_producer
59+
delivery_threshold: 0
60+
# if greater than zero, the number of seconds between automatic message
61+
# deliveries. Only used for async_producer
62+
delivery_interval: 0
63+
# Set this to true to keep the producer connection between publish calls.
64+
# This can speed up subsequent messages by around 30%, but it does mean
65+
# that you need to manually call sync_producer_shutdown before exiting,
66+
# similar to async_producer_shutdown.
67+
persistent_connections: false
68+
# kafka here supports the same parameters as the top-level, allowing custom connection
69+
# configuration details for producers
70+
kafka:
71+
connect_timeout: 120
72+
73+
consumer:
74+
# number of seconds after which, if a client hasn't contacted the Kafka cluster,
75+
# it will be kicked out of the group
76+
session_timeout: 30
77+
# interval between offset commits, in seconds
78+
offset_commit_interval: 10
79+
# number of messages that can be processed before their offsets are committed.
80+
# If zero, offset commits are not triggered by message processing
81+
offset_commit_threshold: 0
82+
# the time period that committed offsets will be retained, in seconds. Defaults to the broker setting.
83+
offset_retention_time:
84+
# interval between heartbeats; must be less than the session window
85+
heartbeat_interval: 10
86+
# kafka here supports the same parameters as the top-level, allowing custom connection
87+
# configuration details for consumers
88+
kafka:
89+
connect_timeout: 130
90+
91+
backoff:
92+
min_ms: 1000
93+
max_ms: 60000
94+
95+
listeners:
96+
- handler: Phobos::EchoHandler
97+
topic: test
98+
# id of the group that the consumer should join
99+
group_id: test-1
100+
# Number of threads created for this listener, each thread will behave as an independent consumer.
101+
# They don't share any state
102+
max_concurrency: 1
103+
# Once the consumer group has checkpointed its progress in the topic's partitions,
104+
# the consumers will always start from the checkpointed offsets, regardless of config
105+
# As such, this setting only applies when the consumer initially starts consuming from a topic
106+
start_from_beginning: true
107+
# maximum amount of data fetched from a single partition at a time
108+
max_bytes_per_partition: 524288 # 512 KB
109+
# Minimum number of bytes to read before returning messages from the server; if `max_wait_time` is reached, this is ignored.
110+
min_bytes: 1
111+
# Maximum duration of time to wait before returning messages from the server, in seconds
112+
max_wait_time: 5
113+
# Apply this encoding to the message payload, if blank it uses the original encoding. This property accepts values
114+
# defined by the ruby Encoding class (https://ruby-doc.org/core-2.3.0/Encoding.html). Ex: UTF_8, ASCII_8BIT, etc
115+
force_encoding:
116+
# Specify the delivery method for a listener.
117+
# Possible values: [`message`, `batch` (default)]
118+
# - `message` will yield individual messages from Ruby Kafka using `each_message` and will commit/heartbeat at every consumed message.
119+
# This is overall a bit slower than using batch, but easier to configure.
120+
# - `batch` will yield batches from Ruby Kafka using `each_batch`, and commit at every consumed batch. It will
121+
# still heartbeat after every message if necessary (using the heartbeat_interval, below).
122+
# - `inline_batch` also uses `each_batch`, but will pass the entire batch to your handler instead
123+
# of one message at a time. To use this method, you should include Phobos::BatchHandler
124+
# instead of Phobos::Handler so that you can make use of the `consume_batch` etc. methods.
125+
# Note: Ultimately commit/heartbeart will depend on the offset commit options and the heartbeat interval.
126+
delivery: batch
127+
# Use this if custom backoff is required for a listener
128+
backoff:
129+
min_ms: 500
130+
max_ms: 10000
131+
# session_timeout, offset_commit_interval, offset_commit_threshold, offset_retention_time, and heartbeat_interval
132+
# can be customized per listener if desired
133+
session_timeout: 30
134+
offset_commit_interval: 15
135+
offset_commit_threshold: 5
136+
offset_retention_time: 172800
137+
heartbeat_interval: 20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
logger:
2+
# Optional log file, set to false or remove to disable it
3+
# file: log/phobos.log
4+
# Optional output format for stdout, default is false (human readable).
5+
# Set to true to enable json output.
6+
stdout_json: false
7+
level: error
8+
# Comment the block to disable ruby-kafka logs
9+
ruby_kafka:
10+
level: error
11+
12+
kafka:
13+
# identifier for this application
14+
client_id: books
15+
# timeout setting for connecting to brokers
16+
connect_timeout:
17+
# timeout setting for socket connections
18+
socket_timeout:
19+
# PEM encoded CA cert to use with an SSL connection (string)
20+
ssl_ca_cert:
21+
# PEM encoded client cert to use with an SSL connection (string)
22+
# Must be used in combination with ssl_client_cert_key
23+
ssl_client_cert:
24+
# PEM encoded client cert key to use with an SSL connection (string)
25+
# Must be used in combination with ssl_client_cert
26+
ssl_client_cert_key:
27+
# list of brokers used to initialize the client ("port:protocol")
28+
seed_brokers:
29+
- kafka:9092
30+
31+
producer:
32+
# number of seconds a broker can wait for replicas to acknowledge
33+
# a write before responding with a timeout
34+
ack_timeout: 5
35+
# number of replicas that must acknowledge a write, or `:all`
36+
# if all in-sync replicas must acknowledge
37+
required_acks: :all
38+
# number of retries that should be attempted before giving up sending
39+
# messages to the cluster. Does not include the original attempt
40+
max_retries: 2
41+
# number of seconds to wait between retries
42+
retry_backoff: 1
43+
# number of messages allowed in the buffer before new writes will
44+
# raise {BufferOverflow} exceptions
45+
max_buffer_size: 1000
46+
# maximum size of the buffer in bytes. Attempting to produce messages
47+
# when the buffer reaches this size will result in {BufferOverflow} being raised
48+
max_buffer_bytesize: 10000000
49+
# name of the compression codec to use, or nil if no compression should be performed.
50+
# Valid codecs: `:snappy` and `:gzip`
51+
compression_codec:
52+
# number of messages that needs to be in a message set before it should be compressed.
53+
# Note that message sets are per-partition rather than per-topic or per-producer
54+
compression_threshold: 1
55+
# maximum number of messages allowed in the queue. Only used for async_producer
56+
max_queue_size: 1000
57+
# if greater than zero, the number of buffered messages that will automatically
58+
# trigger a delivery. Only used for async_producer
59+
delivery_threshold: 0
60+
# if greater than zero, the number of seconds between automatic message
61+
# deliveries. Only used for async_producer
62+
delivery_interval: 0
63+
# Set this to true to keep the producer connection between publish calls.
64+
# This can speed up subsequent messages by around 30%, but it does mean
65+
# that you need to manually call sync_producer_shutdown before exiting,
66+
# similar to async_producer_shutdown.
67+
persistent_connections: false
68+
# kafka here supports the same parameters as the top-level, allowing custom connection
69+
# configuration details for producers
70+
kafka:
71+
connect_timeout: 120
72+
73+
consumer:
74+
# number of seconds after which, if a client hasn't contacted the Kafka cluster,
75+
# it will be kicked out of the group
76+
session_timeout: 30
77+
# interval between offset commits, in seconds
78+
offset_commit_interval: 10
79+
# number of messages that can be processed before their offsets are committed.
80+
# If zero, offset commits are not triggered by message processing
81+
offset_commit_threshold: 0
82+
# the time period that committed offsets will be retained, in seconds. Defaults to the broker setting.
83+
offset_retention_time:
84+
# interval between heartbeats; must be less than the session window
85+
heartbeat_interval: 10
86+
# kafka here supports the same parameters as the top-level, allowing custom connection
87+
# configuration details for consumers
88+
kafka:
89+
connect_timeout: 130
90+
91+
backoff:
92+
min_ms: 1000
93+
max_ms: 60000
94+
95+
listeners:
96+
- handler: Phobos::EchoHandler
97+
topic: test
98+
# id of the group that the consumer should join
99+
group_id: test-1
100+
# Number of threads created for this listener, each thread will behave as an independent consumer.
101+
# They don't share any state
102+
max_concurrency: 1
103+
# Once the consumer group has checkpointed its progress in the topic's partitions,
104+
# the consumers will always start from the checkpointed offsets, regardless of config
105+
# As such, this setting only applies when the consumer initially starts consuming from a topic
106+
start_from_beginning: true
107+
# maximum amount of data fetched from a single partition at a time
108+
max_bytes_per_partition: 524288 # 512 KB
109+
# Minimum number of bytes to read before returning messages from the server; if `max_wait_time` is reached, this is ignored.
110+
min_bytes: 1
111+
# Maximum duration of time to wait before returning messages from the server, in seconds
112+
max_wait_time: 5
113+
# Apply this encoding to the message payload, if blank it uses the original encoding. This property accepts values
114+
# defined by the ruby Encoding class (https://ruby-doc.org/core-2.3.0/Encoding.html). Ex: UTF_8, ASCII_8BIT, etc
115+
force_encoding:
116+
# Specify the delivery method for a listener.
117+
# Possible values: [`message`, `batch` (default)]
118+
# - `message` will yield individual messages from Ruby Kafka using `each_message` and will commit/heartbeat at every consumed message.
119+
# This is overall a bit slower than using batch, but easier to configure.
120+
# - `batch` will yield batches from Ruby Kafka using `each_batch`, and commit at every consumed batch. It will
121+
# still heartbeat after every message if necessary (using the heartbeat_interval, below).
122+
# - `inline_batch` also uses `each_batch`, but will pass the entire batch to your handler instead
123+
# of one message at a time. To use this method, you should include Phobos::BatchHandler
124+
# instead of Phobos::Handler so that you can make use of the `consume_batch` etc. methods.
125+
# Note: Ultimately commit/heartbeart will depend on the offset commit options and the heartbeat interval.
126+
delivery: batch
127+
# Use this if custom backoff is required for a listener
128+
backoff:
129+
min_ms: 500
130+
max_ms: 10000
131+
# session_timeout, offset_commit_interval, offset_commit_threshold, offset_retention_time, and heartbeat_interval
132+
# can be customized per listener if desired
133+
session_timeout: 30
134+
offset_commit_interval: 15
135+
offset_commit_threshold: 5
136+
offset_retention_time: 172800
137+
heartbeat_interval: 20

rails_graphql_federation/books/spec/spec_helper.rb

+5-1
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,10 @@
9797
# Include Phobos helper
9898
config.include Phobos::Test::Helper
9999
config.before(:each) do
100-
Phobos.configure('config/phobos_spec.yml')
100+
if ENV["CI"].present?
101+
Phobos.configure('config/phobos_spec.ci.yml')
102+
else
103+
Phobos.configure('config/phobos_spec.yml')
104+
end
101105
end
102106
end

rails_graphql_federation/users/app/models/application_record.rb

-3
This file was deleted.

rails_graphql_federation/users/config/initializers/phobos.rb

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
Rails.application.configure do
2-
Phobos.configure("config/phobos.yml")
2+
if ENV["CI"].present?
3+
Phobos.configure("config/phobos.ci.yml")
4+
else
5+
Phobos.configure("config/phobos.yml")
6+
end
37

48
group_id = SecureRandom.uuid
59

0 commit comments

Comments
 (0)