@@ -77,42 +77,63 @@ Adjusted application properties have to be provided. An application properties t
77777878# Enviroment where application will be running
7979environment=Local
80+ # Maximum number of workflows that can be triggered in bulk (Default value 10)
81+ application.maximumNumberOfWorkflowsInBulkRun=10
82+ ```
83+ ```
84+ # Unique app id, for easier application identification
85+ appUniqueId=
86+ ```
87+ ```
88+ # Health check settings
89+ health.databaseConnection.timeoutMillis=120000
90+ health.yarnConnection.testEndpoint=/cluster/cluster
91+ health.yarnConnection.timeoutMillis=120000
8092```
8193```
8294# How will users authenticate. Available options: inmemory, ldap
8395auth.mechanism=inmemory
96+ #If set, all users that do not have admim role will not have access to admin protected endpoints
97+ auth.admin.role=ROLE_ADMIN
8498# INMEMORY authentication: username and password defined here will be used for authentication.
85- auth.inmemory.user=user
86- auth.inmemory.password=password
99+ auth.inmemory.user=hyperdriver-user
100+ auth.inmemory.password=hyperdriver-password
101+ auth.inmemory.admin.user=hyperdriver-admin-user
102+ auth.inmemory.admin.password=hyperdriver-admin-password
87103# LDAP authentication: props template that has to be defined in case of LDAP authentication
88104auth.ad.domain=
89105auth.ad.server=
90106auth.ldap.search.base=
91107auth.ldap.search.filter=
92108```
93109```
94- # Unique app id, for easier application identification
95- appUniqueId=
96- ```
97- ```
98110# Core properties.
99111# How many threads to use for each part of the "scheduler".
100112# Heart beat interval in milliseconds.
101113# Lag threshold, before instance is deactivated by another instance.
114+ scheduler.autostart=true
102115scheduler.thread.pool.size=10
103116scheduler.sensors.thread.pool.size=20
117+ scheduler.sensors.changedSensorsChunkQuerySize=100
104118scheduler.executors.thread.pool.size=30
105119scheduler.jobs.parallel.number=100
106120scheduler.heart.beat=5000
107121scheduler.lag.threshold=20000
108122```
109123```
124+ # Propeties used to send notifications to users.
125+ notification.enabled=false
126+ notification.sender.address=
127+ spring.mail.host=
128+ spring.mail.port=
129+ ```
130+ ```
110131#Kafka sensor properties. Not all are required. Adjust according to your use case.
111- kafkaSource.group.id=hyper_drive_${appUniqueId}
112- kafkaSource.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
113- kafkaSource.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
132+ kafkaSource.group.id.prefix=hyper_drive_${appUniqueId}
114133kafkaSource.poll.duration=500
115- kafkaSource.max.poll.records=3
134+ kafkaSource.properties.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
135+ kafkaSource.properties.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
136+ kafkaSource.properties.max.poll.records=3
116137kafkaSource.properties.enable.auto.commit=false
117138kafkaSource.properties.auto.offset.reset=latest
118139kafkaSource.properties.security.protocol=
@@ -126,13 +147,20 @@ kafkaSource.properties.sasl.mechanism=
126147kafkaSource.properties.sasl.jaas.config=
127148```
128149```
129- #Spark yarn sink properties. Properties used to deploy and run Spark job in Yarn. Not all are required. Adjust according to your use case.
130- spark.submit.thread.pool.size=
131- sparkYarnSink.hadoopResourceManagerUrlBase=
132- sparkYarnSink.hadoopConfDir=
133- sparkYarnSink.sparkHome=
134- sparkYarnSink.master=yarn
150+ # Recurring sensor properties.
151+ recurringSensor.maxJobsPerDuration=8
152+ recurringSensor.duration=1h
153+ ```
154+ ```
155+ #Spark properties. Properties used to deploy and run Spark job. Not all are required. Adjust according to your use case.
156+ #Where spark jobs will be executed. Available options: yarn, emr.
157+ spark.submitApi=yarn
158+
159+ #Submit api = YARN
135160sparkYarnSink.submitTimeout=160000
161+ sparkYarnSink.hadoopConfDir=/opt/hadoop
162+ sparkYarnSink.sparkHome=/opt/spark
163+ sparkYarnSink.master=yarn
136164sparkYarnSink.filesToDeploy=
137165sparkYarnSink.additionalConfs.spark.ui.port=
138166sparkYarnSink.additionalConfs.spark.executor.extraJavaOptions=
@@ -145,6 +173,16 @@ sparkYarnSink.additionalConfs.spark.yarn.keytab=
145173sparkYarnSink.additionalConfs.spark.yarn.principal=
146174sparkYarnSink.additionalConfs.spark.shuffle.service.enabled=true
147175sparkYarnSink.additionalConfs.spark.dynamicAllocation.enabled=true
176+
177+ #Submit api = EMR
178+ spark.emr.clusterId=
179+ spark.emr.filesToDeploy=
180+ spark.emr.additionalConfs=
181+
182+ #Common properties for Submit api = YARN and EMR
183+ sparkYarnSink.hadoopResourceManagerUrlBase=
184+ sparkYarnSink.userUsedToKillJob=
185+ spark.submit.thread.pool.size=10
148186```
149187```
150188#Postgresql properties for connection to trigger database
@@ -155,7 +193,9 @@ db.password=
155193db.keepAliveConnection=true
156194db.connectionPool=HikariCP
157195db.numThreads=4
196+
158197db.skip.liquibase=false
198+ spring.liquibase.change-log=classpath:/db_scripts/liquibase/db.changelog.yml
159199```
160200
161201## Embedded Tomcat
0 commit comments