-
Notifications
You must be signed in to change notification settings - Fork 31
/
runtime.exs
179 lines (151 loc) · 6.21 KB
/
runtime.exs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
import Config
require Logger
# config/runtime.exs is executed for all environments, including
# during releases. It is executed after compilation and before the
# system starts, so it is typically used to load production configuration
# and secrets from environment variables or elsewhere. Do not define
# any compile-time configuration in here, as it won't be applied.
{worker, webserver} =
case config_env() do
:prod ->
{
System.get_env("WORKER") || raise("expected the WORKER environment variable to be set"),
System.get_env("WEBSERVER") || raise("expected the WEBSERVER variable to be set")
}
:dev ->
# By default in dev, the application will be both a worker and a webserver
{
System.get_env("WORKER", "1"),
System.get_env("WEBSERVER", "1")
}
:test ->
{
"0",
"0"
}
end
worker = worker == "1"
webserver = webserver == "1"
# expose the result so that the application can configure itself from there
config :transport,
worker: worker,
webserver: webserver
config :unlock,
enforce_ttl: webserver
# Inside IEx, we do not want jobs to start processing, nor plugins working.
# The jobs can be heavy and for instance in production, one person could
# unknowningly create duplicate RAM heavy jobs. With this trick, we can still
# enqueue jobs from IEx, but only the real worker will process them
# See https://github.com/sorentwo/oban/issues/520#issuecomment-883416363
iex_started? = Code.ensure_loaded?(IEx) && IEx.started?()
# Scheduled jobs (via Quantum at this point) are run in production and only on the first worker node
# https://www.clever-cloud.com/doc/reference/reference-environment-variables/#set-by-the-deployment-process
# They should not run in an iex session either.
if config_env() == :prod && !iex_started? && worker && System.fetch_env!("INSTANCE_NUMBER") == "0" do
config :transport, Transport.Scheduler, jobs: Transport.Scheduler.scheduled_jobs()
end
# Make sure that APP_ENV is set in production to distinguish
# production and staging (both running with MIX_ENV=prod)
# See https://github.com/etalab/transport-site/issues/1945
app_env = System.get_env("APP_ENV", "") |> String.to_atom()
app_env_is_valid = Enum.member?([:production, :staging], app_env)
if config_env() == :prod and not app_env_is_valid do
raise("APP_ENV must be set to production or staging while in production")
end
config :transport,
app_env: app_env
# Override configuration specific to staging
if app_env == :staging do
config :transport,
s3_buckets: %{
history: "resource-history-staging",
on_demand_validation: "on-demand-validation-staging"
}
end
base_oban_conf = [repo: DB.Repo]
# Oban jobs that should run in every deployed environment (staging, prod)
# but not during dev or test
# Be careful : there is "app_env :prod" in contrast to :staging (ie production website vs prochainement)
# and "config_env :prod" in contrast to :dev et :test
oban_crontab_all_envs =
case config_env() do
:prod ->
[
{"0 */6 * * *", Transport.Jobs.ResourceHistoryDispatcherJob},
{"30 */6 * * *", Transport.Jobs.GtfsToGeojsonConverterJob},
# every 6 hours but not at the same time as other jobs
{"0 3,9,15,21 * * *", Transport.Jobs.GtfsToNetexConverterJob},
{"20 8 * * *", Transport.Jobs.CleanOrphanConversionsJob},
{"0 * * * *", Transport.Jobs.ResourcesUnavailableDispatcherJob},
{"*/10 * * * *", Transport.Jobs.ResourcesUnavailableDispatcherJob, args: %{only_unavailable: true}},
{"20 */2 * * *", Transport.Jobs.GTFSRTEntitiesDispatcherJob}
]
:dev ->
[]
:test ->
[]
end
# Oban jobs that *should not* be run in staging (ie on prochainement) by the crontab
non_staging_crontab =
if app_env == :staging do
[]
# Oban jobs that should be run in all envs, *except* staging
else
[]
end
extra_oban_conf =
if not worker || iex_started? || config_env() == :test do
[queues: false, plugins: false]
else
[
queues: [default: 2, heavy: 1, on_demand_validation: 1],
plugins: [
{Oban.Plugins.Pruner, max_age: 60 * 60 * 24},
{Oban.Plugins.Cron, crontab: List.flatten(oban_crontab_all_envs, non_staging_crontab)}
]
]
end
config :transport, Oban, Keyword.merge(base_oban_conf, extra_oban_conf)
# here we only override specific keys. As documented in https://hexdocs.pm/elixir/master/Config.html#config/2,
# for keywords there is a recursive deep-merge, which should work nicely here.
if config_env() == :dev do
config :transport, TransportWeb.Endpoint,
# optionally allowing to override the port is useful to play with 2 nodes locally, without conflict
http: [port: System.get_env("PORT", "5000")],
# We also make sure to start the assets watcher only if the webserver is up, to avoid cluttering the logs.
watchers: if(webserver, do: [npm: ["run", "--prefix", "apps/transport/client", "watch"]], else: [])
end
email_host_name =
case config_env() do
:dev ->
"localhost"
:test ->
# used to make sure we are replacing the app host name by the email host name
# when it is different, in some email testing
"email.localhost"
:prod ->
# NOTE: it would be best to configure this via EMAIL_HOST_NAME var instead,
# but that will do for today.
case app_env do
:staging -> "prochainement.transport.data.gouv.fr"
:production -> "transport.data.gouv.fr"
end
end
config :transport, :email_host_name, email_host_name
if config_env() == :prod do
pool_size =
case app_env do
:production -> 15
:staging -> 6
end
config :db, DB.Repo,
url:
System.get_env("POSTGRESQL_ADDON_DIRECT_URI") || System.get_env("POSTGRESQL_ADDON_URI") ||
"" |> String.replace_prefix("postgresql", "ecto"),
# NOTE: we must be careful with this ; front-end + worker are consuming
pool_size: pool_size,
# See https://hexdocs.pm/db_connection/DBConnection.html#start_link/2-queue-config
# [Ecto.Repo] :pool_timeout is no longer supported in favor of a new queue system described in DBConnection.start_link/2
# under "Queue config". For most users, configuring :timeout is enough, as it now includes both queue and query time
timeout: 15_000
end