layout: post title: "连接池" subtitle: " "连接池技术"" date: 2018-11-05 06:00:00 author: "青乡" header-img: "img/post-bg-2015.jpg" catalog: true tags: - 连接池 - connection pool
是什么?
实现了jdk-sql里的连接池技术。
1.jdk-sql接口 包含2部分 1)基本功能接口 2)连接池功能接口
2.数据库厂商jdbc.jar包实现访问数据库的基本功能 实现jdk-sql基本接口。
3.开源连接池框架实现连接池技术 实现jdk-sql连接池接口。
作用
对象池的一种。
怎么使用?怎么封装?流程和步骤?
自定义类
这一层是持久层的应用层,即DAO层。
持久层就是ibatis/hibernate等持久层框架,实现和封装了java + 数据库厂商-jdbc的功能。
DAO层就是对持久层框架的应用。具体来说,就是调用持久层框架的API,这些API已经完全实现了访问数据库的功能,所以DAO层要做的只有两件事,1.调用持久层API(简单的封装一下,更方便的提供给上层应用,即给service层/业务层使用) 2.配置一下(指定使用哪一种持久层框架,是ibatis还是hibernate)。
//gzh-dao/orm.jar-GenericDaoIbatisImplpackage com.gzh.orm.dao.ibatis;import com.ibatis.sqlmap.client.SqlMapClient;import com.ibatis.sqlmap.client.SqlMapExecutor;import com.ibatis.sqlmap.engine.execution.SqlExecutor;import com.ibatis.sqlmap.engine.impl.SqlMapClientImpl;import com.ibatis.sqlmap.engine.impl.SqlMapExecutorDelegate;import com.gzh.commons.Reflect.ReflectUtils;import com.gzh.orm.dao.IGenericDao;import com.gzh.orm.page.Page;import java.io.Serializable;import java.sql.SQLException;import java.util.List;import org.apache.commons.lang.StringUtils;import org.springframework.orm.ibatis.SqlMapClientCallback;import org.springframework.orm.ibatis.SqlMapClientTemplate; //spring集成了ibatisimport org.springframework.orm.ibatis.support.SqlMapClientDaoSupport;import org.springframework.util.ClassUtils;public class GenericDaoIbatisImpl extends SqlMapClientDaoSupport //自定义类继承了spring-ibatis.jar。所以自定义类只和spring打交道,即只需要继承spring-ibatis.jar里的类,配置也只需要配置和spring/spring-ibatis相关的信息。 implements IGenericDao{ private SqlExecutor sqlExecutor; public void setSqlExecutor(SqlExecutor sqlExecutor) { this.sqlExecutor = sqlExecutor; } public void initialize() throws Exception { if (this.sqlExecutor != null) { SqlMapClient sqlMapClient = getSqlMapClientTemplate().getSqlMapClient(); SqlMapClientImpl client = (SqlMapClientImpl)sqlMapClient; SqlMapExecutorDelegate delgate = client.getDelegate(); delgate.getSqlExecutor(); ReflectUtils.setFieldValue(((SqlMapClientImpl)sqlMapClient).getDelegate(), "sqlExecutor", SqlExecutor.class, this.sqlExecutor); } } public Object save(Object entity) { String className = ClassUtils.getShortName(entity.getClass()); return getSqlMapClientTemplate().insert("add" + className, entity); } public Object save(String statementName, Object entity) { if (StringUtils.isEmpty(statementName)) { return save(entity); } return getSqlMapClientTemplate().insert(statementName, entity); } public void saveBatch(final String statementName, final List entityList) { if ((entityList != null) && (entityList.size() > 0)) { getSqlMapClientTemplate().execute(new SqlMapClientCallback() { public Object doInSqlMapClient(SqlMapExecutor executor) throws SQLException { executor.startBatch(); int batch = 0; for (int i = 0; i < entityList.size(); i++) { executor.insert(statementName, entityList.get(i)); batch++; if (batch == 500) { executor.executeBatch(); batch = 0; executor.startBatch(); } } executor.executeBatch(); return null; } }); } } public int update(Object parameterObject) { String className = ClassUtils.getShortName(parameterObject.getClass()); return getSqlMapClientTemplate().update("update" + className, parameterObject); } public int update(String statementName, Object parameterObject) { if (StringUtils.isEmpty(statementName)) { return update(parameterObject); } return getSqlMapClientTemplate().update(statementName, parameterObject); } public void updateBatch(final String statementName, final List paramsList) { if ((paramsList != null) && (paramsList.size() > 0)) { getSqlMapClientTemplate().execute( new SqlMapClientCallback() { public Object doInSqlMapClient(SqlMapExecutor executor) throws SQLException { executor.startBatch(); for (int i = 0; i < paramsList.size(); i++) { executor.update(statementName, paramsList.get(i)); } executor.executeBatch(); return null; } }); } } public void updateBatchs(final String statementName, final List paramsList) { if ((paramsList != null) && (paramsList.size() > 0)) { getSqlMapClientTemplate().execute( new SqlMapClientCallback() { public Object doInSqlMapClient(SqlMapExecutor executor) throws SQLException { executor.startBatch(); int batch = 0; for (int i = 0; i < paramsList.size(); i++) { executor.update(statementName, paramsList.get(i)); batch++; if (batch % 500 == 0) { executor.executeBatch(); executor.startBatch(); } } executor.executeBatch(); return null; } }); } } public int delete(Object parameterObject) { String className = ClassUtils.getShortName(parameterObject.getClass()); return getSqlMapClientTemplate().delete("delete" + className, parameterObject); } public int delete(String statementName) { return getSqlMapClientTemplate().delete(statementName); } public int delete(String statementName, Object parameterObject) { if (StringUtils.isEmpty(statementName)) { return delete(parameterObject); } return getSqlMapClientTemplate().delete(statementName, parameterObject); } public Object get(String statementName, Serializable pkId) { return getSqlMapClientTemplate().queryForObject(statementName, pkId); } public Object getObject(String statementName) { return getSqlMapClientTemplate().queryForObject(statementName); } public Object getObject(String statementName, Object parameterObject) { return getSqlMapClientTemplate().queryForObject(statementName, parameterObject); } public Object findObject(String statementName, Object parameterObject) { List resultList = null; if (parameterObject == null) { resultList = find(statementName); } else { resultList = find(statementName, parameterObject); } if ((resultList != null) && (resultList.size() > 0)) { return resultList.get(0); } return null; } public Object findObject(String statementName) { List resultList = find(statementName); if ((resultList != null) && (resultList.size() > 0)) { return resultList.get(0); } return null; } public List find(String statementName) { return getSqlMapClientTemplate().queryForList(statementName); } public List find(String statementName, Object parameterObject) { return getSqlMapClientTemplate().queryForList(statementName, parameterObject); } public List findList(String statementName, Object parameterObject, int start, int count) { if (parameterObject == null) { return getSqlMapClientTemplate().queryForList(statementName, start, count); } return getSqlMapClientTemplate().queryForList(statementName, parameterObject, start, count); } public Page find(String statementName, String countStatementName, int start, int count) { Page page = new Page(); int totalCount = getRowCount(countStatementName, null); page.setTotalRecord(totalCount); List dataList = getSqlMapClientTemplate().queryForList(statementName, start, count); page.setList(dataList); return page; } public Page find(String statementName, String countStatementName, Object parameterObject, int start, int count) { Page page = new Page(); int totalCount = getRowCount(countStatementName, parameterObject); page.setTotalRecord(totalCount); List dataList = getSqlMapClientTemplate().queryForList(statementName, parameterObject, start, count); page.setList(dataList); return page; } public int getRowCount(String statementName, Object parameterObject) { if (parameterObject != null) { return ((Integer)getSqlMapClientTemplate().queryForObject(statementName, parameterObject)).intValue(); } return ((Integer)getSqlMapClientTemplate().queryForObject(statementName)).intValue(); }}复制代码
spring-ibatis.jar
spring是一个业务层框架。集成了多种持久层框架。其中也包括ibatis。
封装了ibatis。但主要功能和作用是,集成了多种持久层框架,提供一个统一的门面来访问数据库(这是对门面设计模式的应用),不需要程序员自己与每一个持久层框架打交通,底层的事spring帮你封装好了,不管到时候是使用哪一种持久层框架,现在的spring门面提供的是统一的API,如果需要更换一种底层的持久层框架,只需要配置一下即可。
ibatis/hibernate
持久层框架2大功能, 1.封装各个厂商的jdbc 作用是实现访问数据库的功能。
2.连接池功能 2种方法, 1)持久层框架自己实现了一个简单的版本 2)集成了开源连接池框架 例如,ibatis集成了dbcp,hibernate集成了c3p0。
开源连接池框架
1.c3p0 hibernate集成了c3p0。即默认是使用c3p0来实现连接池功能。
2.dbcp ibatis集成了dbcp。
3.阿里druid 注意,阿里连接池druid是和其他的开源连接池框架是同级的关系,即都是相同的目的,为了解决连接池的问题。它们不是保护关系,也不是集成关系。
为什么使用了持久层框架,并且持久层框架已经集成了开源连接池,但是项目当中还是要使用阿里的连接池框架druid
原因? 因为持久层框架本身就提供了多种连接池方式,有自己实现的,也有集成了开源连接池框架的,也可以程序员自己实现一个连接池框架。
阿里的连接池框架就是阿里的程序员实现的。所以也是开源选择阿里的连接池框架。具体怎么选择?就是在配置文件配置数据源的实现类的时候,选择你想要使用的连接池/数据源实现类即可。
怎么用?
//applicationContext-mem-common.xml//工厂类,主要配置2个东西:1.选择哪一种连接池/数据源实现类 2.ibatis的sql映射文件 //连接池/数据源实现类:选择了哪一种数据源实现类,就选择了哪一种数据源。classpath:mem_SqlMapConfig.xml //1.选择哪一种连接池/数据源实现类//2.ibatis的sql映射文件 复制代码 ${db.jdbc.mem.password} 10000 60000 60000 //连接池最大数量
//db_mem.propertiesdb.jdbc.mem.driverClass=oracle.jdbc.OracleDriverdb.jdbc.mem.jdbcUrl=jdbc:oracle:thin:@(DESCRIPTION =(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.49.140)(PORT = 1521))(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.49.140)(PORT = 1521))(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.49.140)(PORT = 1521))(LOAD_BALANCE = yes)(FAILOVER = ON)(CONNECT_DATA =(SERVER = DEDICATED)(SERVICE_NAME = dbdata)(FAILOVER_MODE=(TYPE = SELECT)(METHOD = BASIC)(RETIRES = 20)(DELAY = 15))))db.jdbc.mem.username=ms_memberdb.jdbc.mem.password=XXXdb.jdbc.mem.pool.initialSize=5db.jdbc.mem.pool.minIdle=5db.jdbc.mem.pool.maxActive=20db.jdbc.mem.pool.testWhileIdle=truedb.jdbc.mem.pool.testOnBorrow=falsedb.jdbc.mem.pool.maxPoolPreparedStatement=60复制代码
连接池一般设置多大
1.默认是8
2.工作 测试环境20 线上环境?
3.官网例子 10~20
4.网上工作实践
源码分析(推荐/重点/详细步骤和流程)
步骤,
1.业务层
控制器层——》业务层。
//业务层public String addMerchant(Merchant merchant) throws BaseRuntimeException { try { String isSucc = (String) iGenericDao.save("addMerchant", merchant); return isSucc; } catch (BaseRuntimeException e) { throw new BaseRuntimeException(e); } }复制代码
2.dao层
业务层——》dao层。
真正的入口在这里!
因为这里选择了使用哪一种连接池/数据源实现类。
//dao层/* */ public class GenericDaoIbatisImpl extends org.springframework.orm.ibatis.support.SqlMapClientDaoSupport implements IGenericDao //选择spring-orm-ibatis //spring-orm.jar集成了持久层框架,你想选择哪一种,就继承哪一种。这里继承了ibatis,就代表选择ibatis作为持久层框架。//这里继承了是哪一种持久层框架,dao实现类对应的配置文件就配置dao对应的数据,即如果这里继承了ibatis,配置文件就只能选择ibatis。更具体的说是spring-orm.jar里的ibatis,即包spring.orm.ibatis里的类。/* */ {/* */ private SqlExecutor sqlExecutor;/* */ /* */ public void setSqlExecutor(SqlExecutor sqlExecutor)/* */ {/* 25 */ this.sqlExecutor = sqlExecutor;/* */ }/* */ /* */ public void initialize() throws Exception {/* 29 */ if (sqlExecutor != null) {/* 30 */ SqlMapClient sqlMapClient = getSqlMapClientTemplate().getSqlMapClient();/* 31 */ SqlMapClientImpl client = (SqlMapClientImpl)sqlMapClient;/* 32 */ SqlMapExecutorDelegate delgate = client.getDelegate();/* 33 */ delgate.getSqlExecutor();/* 34 */ ReflectUtils.setFieldValue(((SqlMapClientImpl)sqlMapClient).getDelegate(), "sqlExecutor", SqlExecutor.class, sqlExecutor);/* */ }/* */ }/* */ /* */ public Object save(Object entity) {/* 39 */ String className = ClassUtils.getShortName(entity.getClass());/* 40 */ return getSqlMapClientTemplate().insert("add" + className, entity);/* */ }/* */ /* */ public Object save(String statementName, Object entity) {/* 44 */ if (StringUtils.isEmpty(statementName)) {/* 45 */ return save(entity);/* */ }/* 47 */ return getSqlMapClientTemplate().insert(statementName, entity);/* */ }复制代码
再看与dao对象对应的配置文件。 配置文件以dao对象的继承类为基础。
//applicationContext-mem-common.xml//执行器 //数据源:只能选择ibatis,具体的说是spring-orm.jar里的ibatis,即包spring.orm.ibatis里的类。 classpath:mem_SqlMapConfig.xml //数据源 //dao实现类,包含了2个数据:1.选择哪一种数据源 2.执行器 复制代码
3.spring-orm.jar
dao——》springorm.jar。
选择包spring.orm.ibatis里的类。
//SqlMapClientTemplatepackage org.springframework.orm.ibatis;public class SqlMapClientTemplate extends JdbcAccessor implements SqlMapClientOperations { private SqlMapClient sqlMapClient; public Object insert(final String statementName, final Object parameterObject) throws DataAccessException { return execute(new SqlMapClientCallback
4.ibatis.jar
持久层框架。
//SqlMapSessionImpl/* */ package com.ibatis.sqlmap.engine.impl;/* */ public class SqlMapSessionImpl/* */ implements SqlMapSession/* */ {/* */ public Connection getCurrentConnection() throws SQLException {/* */ try {/* 208 */ Connection conn = null;/* 209 */ Transaction trans = delegate.getTransaction(sessionScope);/* 210 */ if (trans != null) {}/* 211 */ return trans.getConnection(); //获取连接对象/* */ }/* */ catch (TransactionException e)/* */ {/* 215 */ throw new NestedSQLException("Error getting Connection from Transaction. Cause: " + e, e);/* */ }/* */ }/* */ /* */ public DataSource getDataSource() {/* 220 */ return delegate.getDataSource();/* */ }复制代码
//JdbcTransaction/* */ package com.ibatis.sqlmap.engine.transaction.jdbc;/* */ public class JdbcTransaction/* */ implements Transaction/* */ {/* */ public Connection getConnection() throws SQLException, TransactionException {/* 88 */ if (connection == null) {/* 89 */ init();//初始化/* */ }/* 91 */ return connection;/* */ }/* */ }/* */ private void init() throws SQLException, TransactionException/* */ {/* 48 */ connection = dataSource.getConnection(); //从数据源取连接对象:配置文件里有选择哪一个数据源/连接池/* 49 */ if (connection == null) {/* 50 */ throw new TransactionException("JdbcTransaction could not start transaction. Cause: The DataSource returned a null connection.");/* */ }/* */ /* 53 */ isolationLevel.applyIsolationLevel(connection);/* */ /* 55 */ if (connection.getAutoCommit()) {/* 56 */ connection.setAutoCommit(false);/* */ }/* */ /* 59 */ if (connectionLog.isDebugEnabled()) {/* 60 */ connection = ConnectionLogProxy.newInstance(connection);/* */ }/* */ }复制代码
5.数据源/连接池框架druid.jar
持久层框架——》连接池框架。
//DruidDataSourcepackage com.alibaba.druid.pool;/** * @author ljw* @author wenshao */public class DruidDataSource extends DruidAbstractDataSource implements DruidDataSourceMBean, ManagedDataSource, Referenceable, Closeable, Cloneable, ConnectionPoolDataSource {public DruidPooledConnection getConnection(long maxWaitMillis) throws SQLException { init(); //1.初始化连接池 if (filters.size() > 0) { FilterChainImpl filterChain = new FilterChainImpl(this); return filterChain.dataSource_connect(this, maxWaitMillis); } else { return getConnectionDirect(maxWaitMillis); //2.从连接池取一个连接对象 } }//初始化连接池-连接对象数组public void init() throws SQLException { if (inited) { return; } try { lock.lockInterruptibly(); } catch (InterruptedException e) { throw new SQLException("interrupt", e); } boolean init = false; try { if (inited) { return; } init = true; initStackTrace = IOUtils.toString(Thread.currentThread().getStackTrace()); this.id = DruidDriver.createDataSourceId(); loadFilterFromSystemProperty(); if (this.dbType == null || this.dbType.length() == 0) { this.dbType = JdbcUtils.getDbType(jdbcUrl, null); } for (Filter filter : filters) { filter.init(this); } if (maxActive <= 0) { throw new IllegalArgumentException("illegal maxActive " + maxActive); } if (maxActive < minIdle) { throw new IllegalArgumentException("illegal maxActive " + maxActive); } if (getInitialSize() > maxActive) { throw new IllegalArgumentException("illegal initialSize " + this.initialSize + ", maxActieve " + maxActive); } if (this.driverClass != null) { this.driverClass = driverClass.trim(); } validationQueryCheck(); if (this.jdbcUrl != null) { this.jdbcUrl = this.jdbcUrl.trim(); initFromWrapDriverUrl(); } initFromSPIServiceLoader(); if (this.driver == null) { if (this.driverClass == null || this.driverClass.isEmpty()) { this.driverClass = JdbcUtils.getDriverClassName(this.jdbcUrl); } if (MockDriver.class.getName().equals(driverClass)) { driver = MockDriver.instance; } else { driver = JdbcUtils.createDriver(driverClassLoader, driverClass); } } else { if (this.driverClass == null) { this.driverClass = driver.getClass().getName(); } } if (this.dbType == null || this.dbType.length() == 0) { this.dbType = JdbcUtils.getDbType(jdbcUrl, driverClass.getClass().getName()); } initCheck(); initExceptionSorter(); initValidConnectionChecker(); if (driver.getClass().getName().equals("com.mysql.jdbc.Driver")) { if (this.isPoolPreparedStatements()) { LOG.error("mysql should not use 'PoolPreparedStatements'"); } } dataSourceStat = new JdbcDataSourceStat(this.name, this.jdbcUrl, this.dbType); connections = new DruidConnectionHolder[maxActive]; //连接池,其实就是一个连接对象的数组 SQLException connectError = null; try { // 初始化连接 for (int i = 0, size = getInitialSize(); i < size; ++i) { //填满连接对象到连接池 Connection conn = createPhysicalConnection(); DruidConnectionHolder holder = new DruidConnectionHolder(this, conn); connections[poolingCount++] = holder; } if (poolingCount > 0) { poolingPeak = poolingCount; poolingPeakTime = System.currentTimeMillis(); } } catch (SQLException ex) { LOG.error("init datasource error", ex); connectError = ex; } createAndStartCreatorThread(); createAndStartDestroyThread(); initedLatch.await(); initedTime = new Date(); ObjectName objectName = DruidDataSourceStatManager.addDataSource(this, this.name); this.setObjectName(objectName); if (connectError != null && poolingCount == 0) { throw connectError; } } catch (SQLException e) { LOG.error("dataSource init error", e); throw e; } catch (InterruptedException e) { throw new SQLException(e.getMessage(), e); } finally { inited = true; lock.unlock(); if (init && LOG.isInfoEnabled()) { LOG.info("{dataSource-" + this.getID() + "} inited"); } } } //从连接对象数组取一个连接对象private DruidPooledConnection getConnectionInternal(long maxWait) throws SQLException { if (closed) { connectErrorCount.incrementAndGet(); throw new DataSourceClosedException("dataSource already closed at " + new Date(closeTimeMillis)); } if (!enable) { connectErrorCount.incrementAndGet(); throw new DataSourceDisableException(); } final long nanos = TimeUnit.MILLISECONDS.toNanos(maxWait); final int maxWaitThreadCount = getMaxWaitThreadCount(); DruidConnectionHolder holder; try { lock.lockInterruptibly(); } catch (InterruptedException e) { connectErrorCount.incrementAndGet(); throw new SQLException("interrupt", e); } try { if (maxWaitThreadCount > 0) { if (notEmptyWaitThreadCount >= maxWaitThreadCount) { connectErrorCount.incrementAndGet(); throw new SQLException("maxWaitThreadCount " + maxWaitThreadCount + ", current wait Thread count " + lock.getQueueLength()); } } connectCount++; if (maxWait > 0) { holder = pollLast(nanos); } else { holder = takeLast(); } activeCount++; if (activeCount > activePeak) { activePeak = activeCount; activePeakTime = System.currentTimeMillis(); } } catch (InterruptedException e) { connectErrorCount.incrementAndGet(); throw new SQLException(e.getMessage(), e); } catch (SQLException e) { connectErrorCount.incrementAndGet(); throw e; } finally { lock.unlock(); } holder.incrementUseCount(); DruidPooledConnection poolalbeConnection = new DruidPooledConnection(holder); return poolalbeConnection; }复制代码
参考
1.ibatis