diff --git a/obp-api/pom.xml b/obp-api/pom.xml
index 846196e01..6ab9afa82 100644
--- a/obp-api/pom.xml
+++ b/obp-api/pom.xml
@@ -342,6 +342,11 @@
flexmark-util-options
0.64.0
+
+ com.zaxxer
+ HikariCP
+ 4.0.3
+
diff --git a/obp-api/src/main/resources/props/sample.props.template b/obp-api/src/main/resources/props/sample.props.template
index 109008268..106550bc6 100644
--- a/obp-api/src/main/resources/props/sample.props.template
+++ b/obp-api/src/main/resources/props/sample.props.template
@@ -182,10 +182,6 @@ write_connector_metrics=true
db.driver=org.h2.Driver
db.url=jdbc:h2:./lift_proto.db;NON_KEYWORDS=VALUE;DB_CLOSE_ON_EXIT=FALSE
-#the default max database connection pool size is 30
-db.maxPoolSize=30
-
-
#If you want to use the postgres , be sure to create your database and update the line below!
#db.driver=org.postgresql.Driver
#db.url=jdbc:postgresql://localhost:5432/dbname?user=dbusername&password=thepassword
diff --git a/obp-api/src/main/scala/bootstrap/liftweb/Boot.scala b/obp-api/src/main/scala/bootstrap/liftweb/Boot.scala
index 3d419a8f3..89e825585 100644
--- a/obp-api/src/main/scala/bootstrap/liftweb/Boot.scala
+++ b/obp-api/src/main/scala/bootstrap/liftweb/Boot.scala
@@ -108,7 +108,7 @@ import code.productfee.ProductFee
import code.products.MappedProduct
import code.ratelimiting.RateLimiting
import code.remotedata.RemotedataActors
-import code.scheduler.{DatabaseDriverScheduler, JobScheduler, MetricsArchiveScheduler, DatabaseConnectionPoolScheduler}
+import code.scheduler.{DatabaseDriverScheduler, JobScheduler, MetricsArchiveScheduler}
import code.scope.{MappedScope, MappedUserScope}
import code.snippet.{OAuthAuthorisation, OAuthWorkedThanks}
import code.socialmedia.MappedSocialMedia
@@ -231,42 +231,132 @@ class Boot extends MdcLoggable {
def boot {
- // set up the way to connect to the relational DB we're using (ok if other connector than relational)
- if (!DB.jndiJdbcConnAvailable_?) {
- val driver =
- Props.mode match {
- case Props.RunModes.Production | Props.RunModes.Staging | Props.RunModes.Development => APIUtil.getPropsValue("db.driver") openOr "org.h2.Driver"
- case Props.RunModes.Test => APIUtil.getPropsValue("db.driver") openOr "org.h2.Driver"
- case _ => "org.h2.Driver"
- }
- val vendor =
- Props.mode match {
- case Props.RunModes.Production | Props.RunModes.Staging | Props.RunModes.Development =>
- new CustomDBVendor(driver,
- APIUtil.getPropsValue("db.url") openOr "jdbc:h2:lift_proto.db;AUTO_SERVER=TRUE",
- APIUtil.getPropsValue("db.user"), APIUtil.getPropsValue("db.password"))
- case Props.RunModes.Test =>
- new CustomDBVendor(
- driver,
- APIUtil.getPropsValue("db.url") openOr Constant.h2DatabaseDefaultUrlValue,
- APIUtil.getPropsValue("db.user").orElse(Empty),
- APIUtil.getPropsValue("db.password").orElse(Empty)
- )
- case _ =>
- new CustomDBVendor(
- driver,
- h2DatabaseDefaultUrlValue,
- Empty, Empty)
- }
+ implicit val formats = CustomJsonFormats.formats
+ // set up the way to connect to the relational DB we're using (ok if other connector than relational)
+ val driver =
+ Props.mode match {
+ case Props.RunModes.Production | Props.RunModes.Staging | Props.RunModes.Development => APIUtil.getPropsValue("db.driver") openOr "org.h2.Driver"
+ case Props.RunModes.Test => APIUtil.getPropsValue("db.driver") openOr "org.h2.Driver"
+ case _ => "org.h2.Driver"
+ }
+ val vendor =
+ Props.mode match {
+ case Props.RunModes.Production | Props.RunModes.Staging | Props.RunModes.Development =>
+ new CustomDBVendor(driver,
+ APIUtil.getPropsValue("db.url") openOr "jdbc:h2:lift_proto.db;AUTO_SERVER=TRUE",
+ APIUtil.getPropsValue("db.user"), APIUtil.getPropsValue("db.password"))
+ case Props.RunModes.Test =>
+ new CustomDBVendor(
+ driver,
+ APIUtil.getPropsValue("db.url") openOr Constant.h2DatabaseDefaultUrlValue,
+ APIUtil.getPropsValue("db.user").orElse(Empty),
+ APIUtil.getPropsValue("db.password").orElse(Empty)
+ )
+ case _ =>
+ new CustomDBVendor(
+ driver,
+ h2DatabaseDefaultUrlValue,
+ Empty, Empty)
+ }
+
+ logger.debug("Using database driver: " + driver)
+// LiftRules.unloadHooks.append(vendor.closeAllConnections_! _)
- logger.debug("Using database driver: " + driver)
- LiftRules.unloadHooks.append(vendor.closeAllConnections_! _)
+ DB.defineConnectionManager(net.liftweb.util.DefaultConnectionIdentifier, vendor)
- DB.defineConnectionManager(net.liftweb.util.DefaultConnectionIdentifier, vendor)
- DatabaseConnectionPoolScheduler.start(vendor, 10)// 10 seconds
-// logger.debug("ThreadPoolConnectionsScheduler.start(vendor, 10)")
- }
+ /**
+ * Function that determines if foreign key constraints are
+ * created by Schemifier for the specified connection.
+ *
+ * Note: The chosen driver must also support foreign keys for
+ * creation to happen
+ *
+ * In case of PostgreSQL it works
+ */
+ MapperRules.createForeignKeys_? = (_) => APIUtil.getPropsAsBoolValue("mapper_rules.create_foreign_keys", false)
+ schemifyAll()
+
+// logger.info("Mapper database info: " + Migration.DbFunction.mapperDatabaseInfo())
+
+ // DbFunction.tableExists(ResourceUser, (DB.use(DefaultConnectionIdentifier){ conn => conn})) match {
+ // case true => // DB already exist
+ // // Migration Scripts are used to update the model of OBP-API DB to a latest version.
+ // // Please note that migration scripts are executed before Lift Mapper Schemifier
+ //// Migration.database.executeScripts(startedBeforeSchemifier = true)
+ // logger.info("The Mapper database already exits. The scripts are executed BEFORE Lift Mapper Schemifier.")
+ // case false => // DB is still not created. The scripts will be executed after Lift Mapper Schemifier
+ // logger.info("The Mapper database is still not created. The scripts are going to be executed AFTER Lift Mapper Schemifier.")
+ // }
+
+ // Migration Scripts are used to update the model of OBP-API DB to a latest version.
+
+ // Please note that migration scripts are executed after Lift Mapper Schemifier
+ //Migration.database.executeScripts(startedBeforeSchemifier = false)
+
+ if (APIUtil.getPropsAsBoolValue("create_system_views_at_boot", true)) {
+ // Create system views
+ val owner = Views.views.vend.getOrCreateSystemView(SYSTEM_OWNER_VIEW_ID).isDefined
+ val auditor = Views.views.vend.getOrCreateSystemView(SYSTEM_AUDITOR_VIEW_ID).isDefined
+ val accountant = Views.views.vend.getOrCreateSystemView(SYSTEM_ACCOUNTANT_VIEW_ID).isDefined
+ val standard = Views.views.vend.getOrCreateSystemView(SYSTEM_STANDARD_VIEW_ID).isDefined
+ val stageOne = Views.views.vend.getOrCreateSystemView(SYSTEM_STAGE_ONE_VIEW_ID).isDefined
+ val manageCustomViews = Views.views.vend.getOrCreateSystemView(SYSTEM_MANAGE_CUSTOM_VIEWS_VIEW_ID).isDefined
+ // Only create Firehose view if they are enabled at instance.
+ val accountFirehose = if (ApiPropsWithAlias.allowAccountFirehose)
+ Views.views.vend.getOrCreateSystemView(SYSTEM_FIREHOSE_VIEW_ID).isDefined
+ else Empty.isDefined
+
+ val comment: String =
+ s"""
+ |System view ${SYSTEM_OWNER_VIEW_ID} exists/created at the instance: ${owner}
+ |System view ${SYSTEM_AUDITOR_VIEW_ID} exists/created at the instance: ${auditor}
+ |System view ${SYSTEM_ACCOUNTANT_VIEW_ID} exists/created at the instance: ${accountant}
+ |System view ${SYSTEM_FIREHOSE_VIEW_ID} exists/created at the instance: ${accountFirehose}
+ |System view ${SYSTEM_STANDARD_VIEW_ID} exists/created at the instance: ${standard}
+ |System view ${SYSTEM_STAGE_ONE_VIEW_ID} exists/created at the instance: ${stageOne}
+ |System view ${SYSTEM_MANAGE_CUSTOM_VIEWS_VIEW_ID} exists/created at the instance: ${manageCustomViews}
+ |""".stripMargin
+ logger.info(comment)
+
+ APIUtil.getPropsValue("additional_system_views") match {
+ case Full(value) =>
+ val viewSetUKOpenBanking = value.split(",").map(_.trim).toList
+ val viewsUKOpenBanking = List(
+ SYSTEM_READ_ACCOUNTS_BASIC_VIEW_ID, SYSTEM_READ_ACCOUNTS_DETAIL_VIEW_ID,
+ SYSTEM_READ_BALANCES_VIEW_ID, SYSTEM_READ_TRANSACTIONS_BASIC_VIEW_ID,
+ SYSTEM_READ_TRANSACTIONS_DEBITS_VIEW_ID, SYSTEM_READ_TRANSACTIONS_DETAIL_VIEW_ID,
+ SYSTEM_READ_ACCOUNTS_BERLIN_GROUP_VIEW_ID,
+ SYSTEM_READ_BALANCES_BERLIN_GROUP_VIEW_ID,
+ SYSTEM_READ_TRANSACTIONS_BERLIN_GROUP_VIEW_ID
+ )
+ for {
+ systemView <- viewSetUKOpenBanking
+ if viewsUKOpenBanking.exists(_ == systemView)
+ } {
+ Views.views.vend.getOrCreateSystemView(systemView)
+ val comment = s"System view ${systemView} exists/created at the instance"
+ logger.info(comment)
+ }
+ case _ => // Do nothing
+ }
+
+ }
+
+ ApiWarnings.logWarningsRegardingProperties()
+ ApiWarnings.customViewNamesCheck()
+ ApiWarnings.systemViewNamesCheck()
+
+ //see the notes for this method:
+ createDefaultBankAndDefaultAccountsIfNotExisting()
+
+ //launch the scheduler to clean the database from the expired tokens and nonces
+ Schedule.schedule(() => OAuthAuthorisation.dataBaseCleaner, 2 minutes)
+
+// if (Props.devMode || Props.testMode) {
+// StoredProceduresMockedData.createOrDropMockedPostgresStoredProcedures()
+// }
+
if (APIUtil.getPropsAsBoolValue("logging.database.queries.enable", false)) {
DB.addLogFunc
{
@@ -299,8 +389,6 @@ class Boot extends MdcLoggable {
}
-
- implicit val formats = CustomJsonFormats.formats
LiftRules.statelessDispatch.prepend {
case _ if tryo(DB.use(DefaultConnectionIdentifier){ conn => conn}.isClosed).isEmpty =>
Props.mode match {
@@ -314,8 +402,6 @@ class Boot extends MdcLoggable {
)
}
}
-
- logger.info("Mapper database info: " + Migration.DbFunction.mapperDatabaseInfo())
//If use_custom_webapp=true, this will copy all the files from `OBP-API/obp-api/src/main/webapp` to `OBP-API/obp-api/src/main/resources/custom_webapp`
if (APIUtil.getPropsAsBoolValue("use_custom_webapp", false)){
@@ -342,21 +428,9 @@ class Boot extends MdcLoggable {
}
}
- DbFunction.tableExists(ResourceUser, (DB.use(DefaultConnectionIdentifier){ conn => conn})) match {
- case true => // DB already exist
- // Migration Scripts are used to update the model of OBP-API DB to a latest version.
- // Please note that migration scripts are executed before Lift Mapper Schemifier
- Migration.database.executeScripts(startedBeforeSchemifier = true)
- logger.info("The Mapper database already exits. The scripts are executed BEFORE Lift Mapper Schemifier.")
- case false => // DB is still not created. The scripts will be executed after Lift Mapper Schemifier
- logger.info("The Mapper database is still not created. The scripts are going to be executed AFTER Lift Mapper Schemifier.")
- }
-
// ensure our relational database's tables are created/fit the schema
val connector = APIUtil.getPropsValue("connector").openOrThrowException("no connector set")
- schemifyAll()
-
-
+
val runningMode = Props.mode match {
case Props.RunModes.Production => "Production mode"
case Props.RunModes.Staging => "Staging mode"
@@ -379,11 +453,7 @@ class Boot extends MdcLoggable {
ObpActorSystem.startNorthSideAkkaConnectorActorSystem()
case _ => // Do nothing
}
-
- if (Props.devMode || Props.testMode) {
- StoredProceduresMockedData.createOrDropMockedPostgresStoredProcedures()
- }
-
+
// where to search snippets
LiftRules.addToPackages("code")
@@ -398,16 +468,6 @@ class Boot extends MdcLoggable {
) => false})
}
- /**
- * Function that determines if foreign key constraints are
- * created by Schemifier for the specified connection.
- *
- * Note: The chosen driver must also support foreign keys for
- * creation to happen
- *
- * In case of PostgreSQL it works
- */
- MapperRules.createForeignKeys_? = (_) => APIUtil.getPropsAsBoolValue("mapper_rules.create_foreign_keys", false)
@@ -492,11 +552,6 @@ class Boot extends MdcLoggable {
// LiftRules.statelessDispatch.append(Metrics) TODO: see metric menu entry below
-
-
- //launch the scheduler to clean the database from the expired tokens and nonces
- Schedule.schedule(()=> OAuthAuthorisation.dataBaseCleaner, 2 minutes)
-
val accountCreation = {
if(APIUtil.getPropsAsBoolValue("allow_sandbox_account_creation", false)){
//user must be logged in, as a created account needs an owner
@@ -750,11 +805,6 @@ class Boot extends MdcLoggable {
// Sanity check for incompatible Props values for Scopes.
sanityCheckOPropertiesRegardingScopes()
-
- // Migration Scripts are used to update the model of OBP-API DB to a latest version.
- // Please note that migration scripts are executed after Lift Mapper Schemifier
- Migration.database.executeScripts(startedBeforeSchemifier = false)
-
// export one Connector's methods as endpoints, it is just for develop
APIUtil.getPropsValue("connector.name.export.as.endpoints").foreach { connectorName =>
// validate whether "connector.name.export.as.endpoints" have set a correct value
@@ -778,63 +828,6 @@ class Boot extends MdcLoggable {
ConnectorEndpoints.registerConnectorEndpoints
}
-
- if (APIUtil.getPropsAsBoolValue("create_system_views_at_boot", true)){
- // Create system views
- val owner = Views.views.vend.getOrCreateSystemView(SYSTEM_OWNER_VIEW_ID).isDefined
- val auditor = Views.views.vend.getOrCreateSystemView(SYSTEM_AUDITOR_VIEW_ID).isDefined
- val accountant = Views.views.vend.getOrCreateSystemView(SYSTEM_ACCOUNTANT_VIEW_ID).isDefined
- val standard = Views.views.vend.getOrCreateSystemView(SYSTEM_STANDARD_VIEW_ID).isDefined
- val stageOne = Views.views.vend.getOrCreateSystemView(SYSTEM_STAGE_ONE_VIEW_ID).isDefined
- val manageCustomViews = Views.views.vend.getOrCreateSystemView(SYSTEM_MANAGE_CUSTOM_VIEWS_VIEW_ID).isDefined
- // Only create Firehose view if they are enabled at instance.
- val accountFirehose = if (ApiPropsWithAlias.allowAccountFirehose)
- Views.views.vend.getOrCreateSystemView(SYSTEM_FIREHOSE_VIEW_ID).isDefined
- else Empty.isDefined
-
- val comment: String =
- s"""
- |System view ${SYSTEM_OWNER_VIEW_ID} exists/created at the instance: ${owner}
- |System view ${SYSTEM_AUDITOR_VIEW_ID} exists/created at the instance: ${auditor}
- |System view ${SYSTEM_ACCOUNTANT_VIEW_ID} exists/created at the instance: ${accountant}
- |System view ${SYSTEM_FIREHOSE_VIEW_ID} exists/created at the instance: ${accountFirehose}
- |System view ${SYSTEM_STANDARD_VIEW_ID} exists/created at the instance: ${standard}
- |System view ${SYSTEM_STAGE_ONE_VIEW_ID} exists/created at the instance: ${stageOne}
- |System view ${SYSTEM_MANAGE_CUSTOM_VIEWS_VIEW_ID} exists/created at the instance: ${manageCustomViews}
- |""".stripMargin
- logger.info(comment)
-
- APIUtil.getPropsValue("additional_system_views") match {
- case Full(value) =>
- val viewSetUKOpenBanking = value.split(",").map(_.trim).toList
- val viewsUKOpenBanking = List(
- SYSTEM_READ_ACCOUNTS_BASIC_VIEW_ID, SYSTEM_READ_ACCOUNTS_DETAIL_VIEW_ID,
- SYSTEM_READ_BALANCES_VIEW_ID, SYSTEM_READ_TRANSACTIONS_BASIC_VIEW_ID,
- SYSTEM_READ_TRANSACTIONS_DEBITS_VIEW_ID, SYSTEM_READ_TRANSACTIONS_DETAIL_VIEW_ID,
- SYSTEM_READ_ACCOUNTS_BERLIN_GROUP_VIEW_ID,
- SYSTEM_READ_BALANCES_BERLIN_GROUP_VIEW_ID,
- SYSTEM_READ_TRANSACTIONS_BERLIN_GROUP_VIEW_ID
- )
- for {
- systemView <- viewSetUKOpenBanking
- if viewsUKOpenBanking.exists(_ == systemView)
- } {
- Views.views.vend.getOrCreateSystemView(systemView)
- val comment = s"System view ${systemView} exists/created at the instance"
- logger.info(comment)
- }
- case _ => // Do nothing
- }
-
- }
-
- ApiWarnings.logWarningsRegardingProperties()
- ApiWarnings.customViewNamesCheck()
- ApiWarnings.systemViewNamesCheck()
-
- //see the notes for this method:
- createDefaultBankAndDefaultAccountsIfNotExisting()
-
if(HydraUtil.mirrorConsumerInHydra) {
createHydraClients()
}
diff --git a/obp-api/src/main/scala/bootstrap/liftweb/CustomDBVendor.scala b/obp-api/src/main/scala/bootstrap/liftweb/CustomDBVendor.scala
index 2bea78080..fe01cbea8 100644
--- a/obp-api/src/main/scala/bootstrap/liftweb/CustomDBVendor.scala
+++ b/obp-api/src/main/scala/bootstrap/liftweb/CustomDBVendor.scala
@@ -1,8 +1,10 @@
package bootstrap.liftweb
import code.api.util.APIUtil
-import java.sql.{Connection, DriverManager}
+import com.zaxxer.hikari.pool.ProxyConnection
+import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
+import java.sql.{Connection}
import net.liftweb.common.{Box, Failure, Full, Logger}
import net.liftweb.db.ConnectionManager
import net.liftweb.util.ConnectionIdentifier
@@ -23,170 +25,49 @@ class CustomDBVendor(driverName: String,
private val logger = Logger(classOf[CustomDBVendor])
- def createOne: Box[Connection] = {
- tryo{t:Throwable => logger.error("Cannot load database driver: %s".format(driverName), t)}{Class.forName(driverName);()}
+ object HikariDatasource {
+ val config = new HikariConfig()
(dbUser, dbPassword) match {
case (Full(user), Full(pwd)) =>
- tryo{t:Throwable => logger.error("Unable to get database connection. url=%s, user=%s".format(dbUrl, user),t)}(DriverManager.getConnection(dbUrl, user, pwd))
+ config.setJdbcUrl(dbUrl)
+ config.setUsername(user)
+ config.setPassword(pwd)
case _ =>
- tryo{t:Throwable => logger.error("Unable to get database connection. url=%s".format(dbUrl),t)}(DriverManager.getConnection(dbUrl))
+ config.setJdbcUrl(dbUrl)
}
+
+// val dbMaxPoolSize = APIUtil.getPropsAsIntValue("db.maxPoolSize", config.getMaximumPoolSize)
+// config.setMaximumPoolSize(dbMaxPoolSize)
+ config.addDataSourceProperty("cachePrepStmts", "true")
+ config.addDataSourceProperty("prepStmtCacheSize", "250")
+ config.addDataSourceProperty("prepStmtCacheSqlLimit", "2048")
+
+ val ds: HikariDataSource = new HikariDataSource(config)
}
+
+ def createOne: Box[Connection] = {
+ tryo{t:Throwable => logger.error("Cannot load database driver: %s".format(driverName), t)}{Class.forName(driverName);()}
+ tryo{t:Throwable => logger.error("Unable to get database connection. url=%s".format(dbUrl),t)}(HikariDatasource.ds.getConnection())
+ }
+
+ def closeAllConnections_!(): Unit = HikariDatasource.ds.close()
}
trait CustomProtoDBVendor extends ConnectionManager {
private val logger = Logger(classOf[CustomProtoDBVendor])
- private var freePool: List[Connection] = Nil // no process use the connections, they are available for use
- private var usedPool: List[Connection] = Nil // connections are already used, not available for use
-// private var totalConnectionsCount = 0
-// private var tempMaxSize = maxPoolSize
-
- /**
- * Override and set to false if the maximum freePool size can temporarily be expanded to avoid freePool starvation
- */
- protected def allowTemporaryPoolExpansion = false
-
- /**
- * Override this method if you want something other than 10 connections in the freePool and usedPool
- * freePool.size + usedPool.size <=10
- */
- val dbMaxPoolSize = APIUtil.getPropsAsIntValue("db.maxPoolSize",30)
- protected def maxPoolSize = dbMaxPoolSize
-
- /**
- * The absolute maximum that this freePool can extend to
- * The default is 40. Override this method to change.
- */
- protected def doNotExpandBeyond = 40
-
- /**
- * The logic for whether we can expand the freePool beyond the current size. By
- * default, the logic tests allowTemporaryPoolExpansion && totalConnectionsCount <= doNotExpandBeyond
- */
-// protected def canExpand_? : Boolean = allowTemporaryPoolExpansion && totalConnectionsCount <= doNotExpandBeyond
/**
* How is a connection created?
*/
def createOne: Box[Connection]
- /**
- * Test the connection. By default, setAutoCommit(false),
- * but you can do a real query on your RDBMS to see if the connection is alive
- */
- protected def testConnection(conn: Connection) {
- conn.setAutoCommit(false)
- }
-
// Tail Recursive function in order to avoid Stack Overflow
// PLEASE NOTE: Changing this function you can break the above named feature
def newConnection(name: ConnectionIdentifier): Box[Connection] = {
- val (connection: Box[Connection], needRecursiveAgain: Boolean) = commonPart(name)
- needRecursiveAgain match {
- case true => newConnection(name)
- case false => connection
- }
+ createOne
}
+ def releaseConnection(conn: Connection): Unit = {conn.asInstanceOf[ProxyConnection].close()}
- def commonPart(name: ConnectionIdentifier): (Box[Connection], Boolean) =
- synchronized {
- freePool match {
- case Nil if (freePool.size + usedPool.size) < maxPoolSize =>{
- val ret = createOne // get oneConnection from JDBC, not in the freePool yet, we add ot the Pool when we release it .
- try {
- ret.head.setAutoCommit(false) // we test the connection status, if it is success, we return it back.
- usedPool = ret.head :: usedPool
- logger.trace(s"Created connection is good, detail is $ret ")
- } catch {
- case e: Exception =>
- logger.trace(s"Created connection is bad, detail is $e")
- }
-
- //Note: we may return the invalid connection
- (ret, false)
- }
-
- case Nil => //freePool is empty and we are at maxPoolSize limit
- wait(50L)
- logger.error(s"The (freePool.size + usedPool.size) is at the limit ($maxPoolSize) and there are no free connections.")
- (
- Failure(s"The (freePool.size + usedPool.size) is at the limit ($maxPoolSize) and there are no free connections."),
- false
- )
-
- case freeHead :: freeTail =>//if freePool is not empty, we just get connection from freePool, no need to create new connection from JDBC.
- logger.trace("Found connection in freePool, name=%s freePool size =%s".format(name, freePool.size))
-
- freePool = freeTail // remove the head from freePool
- //TODO check if we need add head or tail
- usedPool = freeHead :: usedPool // we added connection to usedPool
-
- try {
- this.testConnection(freeHead) // we test the connection status, if it is success, we return it back.
- (Full(freeHead),false)
- } catch {
- case e: Exception => try {
- logger.error(s"testConnection failed, try to close it and call newConnection(name), detail is $e")
- tryo(freeHead.close) // call JDBC to close this connection
- (
- Failure(s"testConnection failed, try to close it and call newConnection(name), detail is $e"),
- true
- )
- } catch {
- case e: Exception =>{
- logger.error(s"could not close connection and call newConnection(name), detail is $e")
- (
- Failure(s"could not close connection and call newConnection(name), detail is $e"),
- true
- )
- }
- }
- }
- }
- }
-
- def releaseConnection(conn: Connection): Unit = synchronized {
- usedPool = usedPool.filterNot(_ ==conn)
- logger.trace(s"Released connection. removed connection from usedPool size is ${usedPool.size}")
- //TODO check if we need add head or tail
- freePool = conn :: freePool
- logger.trace(s"Released connection. added connection to freePool size is ${freePool.size}")
- notifyAll
- }
-
- def closeAllConnections_!(): Unit = _closeAllConnections_!(0)
-
-
- private def _closeAllConnections_!(cnt: Int): Unit = synchronized {
- logger.trace(s"Closing all connections, try the $cnt time")
- if (cnt > 10) ()//we only try this 10 times,
- else {
- freePool.foreach {c => tryo(c.close);}
- usedPool.foreach {c => tryo(c.close);}
- freePool = Nil
- usedPool = Nil
-
- if (usedPool.length > 0 || freePool.length > 0) wait(250)
-
- _closeAllConnections_!(cnt + 1)
- }
- }
-
-
- //This is only for debugging
- def logAllConnectionsStatus = {
- logger.trace(s"Hello from logAllConnectionsStatus: usedPool.size is ${usedPool.length}, freePool.size is ${freePool.length}")
- for {
- usedConnection <- usedPool
- } yield {
- logger.trace(s"usedConnection (${usedConnection.toString}): isClosed-${usedConnection.isClosed}, getWarnings-${usedConnection.getWarnings}")
- }
- for {
- freeConnection <- freePool
- } yield {
- logger.trace(s"freeConnection (${freeConnection.toString}): isClosed-${freeConnection.isClosed}, getWarnings-${freeConnection.getWarnings}")
- }
- }
-}
+}
\ No newline at end of file
diff --git a/obp-api/src/main/scala/code/scheduler/DatabaseConnectionPoolScheduler.scala b/obp-api/src/main/scala/code/scheduler/DatabaseConnectionPoolScheduler.scala
deleted file mode 100644
index c825659f7..000000000
--- a/obp-api/src/main/scala/code/scheduler/DatabaseConnectionPoolScheduler.scala
+++ /dev/null
@@ -1,49 +0,0 @@
-package code.scheduler
-
-import bootstrap.liftweb.CustomProtoDBVendor
-import code.actorsystem.ObpLookupSystem
-import code.util.Helper.MdcLoggable
-import java.util.concurrent.TimeUnit
-import scala.concurrent.duration._
-
-
-object DatabaseConnectionPoolScheduler extends MdcLoggable {
-
- private lazy val actorSystem = ObpLookupSystem.obpLookupSystem
- implicit lazy val executor = actorSystem.dispatcher
- private lazy val scheduler = actorSystem.scheduler
-
- def start(vendor: CustomProtoDBVendor, interval: Long): Unit = {
- scheduler.schedule(
- initialDelay = Duration(interval, TimeUnit.SECONDS),
- interval = Duration(interval, TimeUnit.SECONDS),
- runnable = new Runnable {
- def run(): Unit = {
- clearAllConnections(vendor)
-// vendor.logAllConnectionsStatus //This is only to be used for debugging .
- }
- }
- )
- }
-
- def clearAllConnections(vendor: CustomProtoDBVendor) = {
- val connectionBox = vendor.createOne
- try {
- if (connectionBox.isEmpty) {
- vendor.closeAllConnections_!()
- logger.debug("ThreadPoolConnectionsScheduler.clearAllConnections")
- }
- } catch {
- case e => logger.debug(s"ThreadPoolConnectionsScheduler.clearAllConnections() method throwed exception, details is $e")
- }finally {
- try {
- if (connectionBox.isDefined)
- connectionBox.head.close()
- } catch {
- case e =>logger.debug(s"ThreadPoolConnectionsScheduler.clearAllConnections.close method throwed exception, details is $e")
- }
- }
- }
-
-
-}
diff --git a/release_notes.md b/release_notes.md
index ad1f4cec6..ed8d397c7 100644
--- a/release_notes.md
+++ b/release_notes.md
@@ -3,7 +3,7 @@
### Most recent changes at top of file
```
Date Commit Action
-22/09/2023 752ff04b Added props db.maxPoolSize, default is 30.
+22/09/2023 752ff04b Added props db.maxPoolSize, default is 10.
24/08/2023 bcb8fcfd Added props expectedOpenFuturesPerService, default is 100.
16/08/2023 4d8dfa66 Added props short_endpoint_timeout, default is 1.
Added props medium_endpoint_timeout, default is 7.