master
miaoqingshuai 2025-12-30 17:03:08 +08:00
commit 7115a81084
2959 changed files with 275029 additions and 0 deletions

BIN
game_web/.DS_Store vendored Normal file

Binary file not shown.

BIN
game_web/event_mgr/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,96 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>10</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://192.168.0.11:6060/wb_game</jdbcUrl>
<userName>proto_ff</userName>
<password>37du_game</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>20</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>5</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="123456" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="123456" port="6379" database="1" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="123456" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="123456" port="6379" database="8" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="123456" port="6379" database="10" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,100 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>5</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://192.168.0.11:6060/wb_game</jdbcUrl>
<userName>proto_ff</userName>
<password>37du_game</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>8</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>2</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="123456" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="123456" port="6379" database="1" timeout="5000"/>
<info name="group1_db2" host="127.0.0.1" password="123456" port="6379" database="2" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="123456" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="123456" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="123456" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="123456" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="123456" port="6379" database="11" timeout="5000"/>
<info name="group1_db15" host="127.0.0.1" password="123456" port="6379" database="15" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>1</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://192.168.0.11:6060/wb_game</jdbcUrl>
<userName>proto_ff</userName>
<password>37du_game</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>8</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>2</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="123456" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="123456" port="6379" database="1" timeout="5000"/>
<info name="group1_db2" host="127.0.0.1" password="123456" port="6379" database="2" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="123456" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="123456" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="123456" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="123456" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="123456" port="6379" database="11" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,54 @@
<?xml version="1.0" encoding="UTF-8"?>
<module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
<component name="FacetManager">
<facet type="web" name="Web">
<configuration>
<descriptors>
<deploymentDescriptor name="web.xml" url="file://$MODULE_DIR$/src/main/webapp/WEB-INF/web.xml" />
</descriptors>
<webroots>
<root url="file://$MODULE_DIR$/build/pro" relative="config/" />
<root url="file://$MODULE_DIR$/src/main/webapp" relative="/" />
</webroots>
<sourceRoots>
<root url="file://$MODULE_DIR$/src/main/java" />
</sourceRoots>
</configuration>
</facet>
</component>
<component name="NewModuleRootManager" LANGUAGE_LEVEL="JDK_1_8">
<output url="file://$MODULE_DIR$/target/classes" />
<output-test url="file://$MODULE_DIR$/target/test-classes" />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/target" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="Maven: junit:junit:3.8.1" level="project" />
<orderEntry type="module" module-name="data_cache" />
<orderEntry type="module" module-name="taurus-core" />
<orderEntry type="module" module-name="taurus-web" />
<orderEntry type="library" name="Maven: redis.clients:jedis:2.9.0" level="project" />
<orderEntry type="library" name="Maven: org.apache.commons:commons-pool2:2.4.2" level="project" />
<orderEntry type="library" name="Maven: com.zaxxer:HikariCP:3.3.1" level="project" />
<orderEntry type="library" name="Maven: org.slf4j:slf4j-api:1.7.25" level="project" />
<orderEntry type="library" name="Maven: mysql:mysql-connector-java:8.0.16" level="project" />
<orderEntry type="library" name="Maven: com.google.protobuf:protobuf-java:3.6.1" level="project" />
<orderEntry type="library" name="Maven: jdom:jdom:1.0" level="project" />
<orderEntry type="library" name="Maven: log4j:log4j:1.2.17" level="project" />
<orderEntry type="library" name="Maven: org.quartz-scheduler:quartz:2.2.3" level="project" />
<orderEntry type="library" name="Maven: c3p0:c3p0:0.9.1.1" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-webapp:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-xml:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-util:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-servlet:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-security:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-server:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty.orbit:javax.servlet:3.0.0.v201112011016" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-continuation:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-http:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-io:8.2.0.v20160908" level="project" />
</component>
</module>

117
game_web/event_mgr/pom.xml Normal file
View File

@ -0,0 +1,117 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.evt</groupId>
<artifactId>event_mgr</artifactId>
<packaging>war</packaging>
<version>1.0.0</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<build.type>pro</build.type>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<!--依赖 data_cache -->
<dependency>
<groupId>com.data</groupId>
<artifactId>data_cache</artifactId>
<version>1.0.1</version>
</dependency>
<!--依赖 taurus-core -->
<dependency>
<groupId>com.taurus</groupId>
<artifactId>taurus-core</artifactId>
<version>1.0.1</version>
</dependency>
<!--依赖 taurus-web -->
<dependency>
<groupId>com.taurus</groupId>
<artifactId>taurus-web</artifactId>
<version>1.0.1</version>
</dependency>
<!-- 需要用redis时导入 -->
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
<!-- 需要用HikariCP时导入 ,自己在项目中添加 -->
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.3.1</version>
</dependency>
<!-- 需要用mysql时导入 ,自己在项目中添加 -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.16</version>
</dependency>
<!-- https://mvnrepository.com/artifact/jdom/jdom -->
<dependency>
<groupId>jdom</groupId>
<artifactId>jdom</artifactId>
<version>1.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>2.2.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-webapp -->
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-webapp</artifactId>
<version>8.2.0.v20160908</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<finalName>ROOT</finalName>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-war-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<warSourceExcludes>logs/**,config/**</warSourceExcludes>
<webResources>
<resource>
<targetPath>config/</targetPath>
<directory>${project.basedir}/build/${build.type}/</directory>
</resource>
</webResources>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,149 @@
package com.evt.mgr;
import java.sql.SQLException;
import com.data.util.CountUtil;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.database.DataBase;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.StringUtil;
import redis.clients.jedis.Jedis;
public class EventController {
private static final String EVENT_TYPE = "E";
private static final String EVENT_GID = "gid";
private static final String EVENT_UID = "uid";
public static final int execEvt(String key,boolean isGroupEvt) {
int count = 0;
do {
String jsonStr = Redis.use(EventReceiver.CHACHE_KEY).rpop(key);
if(StringUtil.isEmpty(jsonStr))
{
break;
}
long startTime = System.currentTimeMillis();
ITObject data = null;
try {
data = TObject.newFromJsonData(jsonStr);
} catch (Exception e) {
EventServer.log.error(jsonStr + ":event json error!", e);
}
int type = 0;
int uid = 0;
int groupId =0;
int reulst = 0;
try {
type = data.getInt(EVENT_TYPE);
if(isGroupEvt) {
groupId = data.getInt(EVENT_GID);
}else {
uid = data.getInt(EVENT_UID);
}
IHandler handler = EventServer.eventReceiver.getHandler(type);
if(handler!=null) {
if(isGroupEvt) {
reulst = handler.processGroup(groupId, data);
EventServer.log.info(jsonStr + " use time:" + (System.currentTimeMillis() - startTime) + " ms");
}else {
reulst = handler.process(uid, data);
EventServer.log.info(jsonStr + " use time:" + (System.currentTimeMillis() - startTime) + " ms");
}
if(reulst!=0) {
if(reulst == -1) {
jsonStr = data.toJson();
}
Redis.use(EventReceiver.CHACHE_KEY).lpush(key, jsonStr);
}
}
}catch (Exception e) {
e.printStackTrace();
Redis.use("group1_db15").lpush(key, jsonStr);
break;
}
if(reulst == 0) {
count++;
}
} while(true);
return count;
}
/**
*
* @param uid
* @param pay
* @param cur_diamo
* @param reason
* @param gameId
* @param groupId
* @param pid
* @throws SQLException
*/
public static final void payDiamo(int uid,int pay,int cur_diamo,int reason,int gameId,int groupId,int pid) throws SQLException{
ITArray data = TArray.newInstance();
data.addInt(uid);
data.addInt(pay);
data.addInt(reason);
data.addInt(gameId);
data.addInt(groupId);
data.addInt(cur_diamo);
DataBase.use().prepareCallNonResult("sp_transfer_diamo", data);
if(groupId>0) {
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
CountUtil.countLog(String.format("g%s:diamo_cost",groupId), pay, jedis9);
if(pid > 0) {
CountUtil.countLog(String.format("g%s:diamo_cost:p%s",groupId,pid), pay, jedis9,false,true);
}
}finally {
jedis9.close();
}
}
}
public static final int execSql(String key) {
int count = 0;
do {
String str = Redis.use(EventReceiver.CHACHE_KEY).rpop(key);
//EventServer.log.info( " execsql:"+str);
if(StringUtil.isEmpty(str))
{
break;
}
int reulst = 0;
long startTime = System.currentTimeMillis();
try {
int type = Integer.parseInt(str.substring(0,1));
String sql = str.substring(1);
if(type==1) {
DataBase.use().executeUpdate(sql);
EventServer.log.info(sql + " use time:" + (System.currentTimeMillis() - startTime) + " ms");
}else {
DataBase.use().executeCall(sql, false);
EventServer.log.info(sql + " use time:" + (System.currentTimeMillis() - startTime) + " ms");
}
}catch (Exception e) {
e.printStackTrace();
Redis.use("group1_db15").lpush(key, str);
break;
}
if(reulst == 0) {
count++;
}
} while(true);
return count;
}
}

View File

@ -0,0 +1,101 @@
package com.evt.mgr;
import java.util.HashMap;
import java.util.Map;
import com.taurus.core.util.Logger;
public class EventReceiver {
private final static String GROUP_EVT_KEY = "evt_group_";
private final static String EVT_KEY = "event_";
private final static String EVT_DB_KEY = "evt_db_";
public final static String CHACHE_KEY = "group1_db8";
Map<Integer, IHandler> handlerMap;
volatile boolean run;
public EventReceiver() {
handlerMap = new HashMap<>();
}
void start() {
run = true;
for (int i = 0; i < 10; ++i) {
new GroupRunnable(i, this, 1);
}
for (int i = 0; i < 10; ++i) {
new GroupRunnable(i, this, 2);
}
for (int i = 0; i < 10; ++i) {
new GroupRunnable(i, this, 3);
}
}
void destroy() {
run = false;
}
IHandler getHandler(int type) {
IHandler handler = handlerMap.get(type);
return handler;
}
private static final class GroupRunnable implements Runnable {
private Logger log = Logger.getLogger(GroupRunnable.class);
EventReceiver receiver;
int type;
int id;
Thread thread;
public GroupRunnable(int id, EventReceiver receiver, int type) {
this.receiver = receiver;
this.type = type;
this.id = id;
this.thread = new Thread(this, id + "");
this.thread.start();
}
@Override
public void run() {
while (receiver.run) {
int count=0;
long startTime = System.currentTimeMillis();
try {
switch (type) {
case 1:
count = EventController.execEvt(GROUP_EVT_KEY + this.id, true);
break;
case 2:
count = EventController.execEvt(EVT_KEY + id, false);
break;
case 3:
count = EventController.execSql(EVT_DB_KEY+id);
break;
}
long useTime = (System.currentTimeMillis() - startTime);
if (count > 0)
{
EventServer.log.info("handle event:" + count + " use time:" + useTime + " ms");
}
if (useTime >= 30 || count >= 30)
{
Thread.sleep(1);
}
else if (count > 0){
Thread.sleep(5);
}
else {
Thread.sleep(20);
}
} catch (Exception e) {
log.error(e);
}
}
}
}
}

View File

@ -0,0 +1,105 @@
package com.evt.mgr;
import static org.quartz.CronScheduleBuilder.cronSchedule;
import static org.quartz.JobBuilder.newJob;
import static org.quartz.TriggerBuilder.newTrigger;
import com.evt.mgr.job.CleanGroupLogJob;
import org.quartz.CronTrigger;
import org.quartz.JobDetail;
import org.quartz.Scheduler;
import org.quartz.SchedulerFactory;
import org.quartz.impl.StdSchedulerFactory;
import com.data.util.ConsumeCode;
import com.data.util.EventType;
import com.evt.mgr.handler.HandlerGroupMemberRound;
import com.evt.mgr.handler.HandlerGroupRound;
import com.evt.mgr.handler.HandlerHpConsume;
import com.evt.mgr.handler.HandlerLose;
import com.evt.mgr.handler.HandlerOver;
import com.evt.mgr.handler.HandlerPay;
import com.evt.mgr.handler.HandlerWin;
import com.evt.mgr.job.CleanTimeOutRoomJob;
import com.taurus.core.routes.Extension;
import com.taurus.core.routes.Routes;
import com.taurus.core.util.Logger;
public class EventServer extends Extension{
static EventReceiver eventReceiver;
static Logger log;
@Override
public void onStart() {
log = Logger.getLogger(EventServer.class);
eventReceiver = new EventReceiver();
initHandler();
initJob();
eventReceiver.start();
}
@Override
public void configRoute(Routes me) {
}
public void initJob() {
try {
log.info("clean invalid room, no diamond");
SchedulerFactory sf = new StdSchedulerFactory();
Scheduler sched = sf.getScheduler();
JobDetail job = newJob(CleanTimeOutRoomJob.class).withIdentity("clean_group_room", "group").build();
CronTrigger trigger= newTrigger().withIdentity("clean_group_room_trigger", "group").withSchedule(cronSchedule("0 0 5 * * ? ")).build();
sched.scheduleJob(job, trigger);
job = newJob(CleanTimeOutRoomJob.class).withIdentity("clean_invalid_room", "group").build();
trigger= newTrigger().withIdentity("clean_invalid_room_trigger", "group").withSchedule(cronSchedule("0 30 5 * * ? ")).build();
sched.scheduleJob(job, trigger);
JobDetail log_job = newJob(CleanGroupLogJob.class).withIdentity("clean_group_member_log", "group").build();
CronTrigger log_trigger= newTrigger().withIdentity("clean_group_member_log_trigger", "group").withSchedule(cronSchedule("0 0 0/1 * * ? ")).build();
sched.scheduleJob(log_job, log_trigger);
log_job = newJob(CleanGroupLogJob.class).withIdentity("clean_group_hp_log", "group").build();
log_trigger= newTrigger().withIdentity("clean_group_hp_log_trigger", "group").withSchedule(cronSchedule("0 0 0/1 * * ? ")).build();
sched.scheduleJob(log_job, log_trigger);
log_job = newJob(CleanGroupLogJob.class).withIdentity("clean_rec_room_log", "group").build();
log_trigger= newTrigger().withIdentity("clean_rec_room_log_trigger", "group").withSchedule(cronSchedule("0 0 0/1 * * ? ")).build();
sched.scheduleJob(log_job, log_trigger);
sched.start();
}
catch(Exception e) {
}
}
private void initHandler() {
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_OVER, new HandlerOver());
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_LOSE, new HandlerLose());
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_WIN, new HandlerWin());
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_PAY, new HandlerPay(ConsumeCode.DIAMO_JOIN_ROOM,true));
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_BACK_PAY, new HandlerPay(ConsumeCode.DIAMO_REFUND,false));
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_GROUP_ROOM, new HandlerPay(ConsumeCode.DIAMO_AGENT_ROOM,true));
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_CREATE_ROOM, new HandlerPay(ConsumeCode.DIAMO_CREAT_ROOM,true));
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_MAIL, new HandlerPay(ConsumeCode.DIAMO_MAIL,false));
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_WHEEL, new HandlerPay(ConsumeCode.DIAMO_WHEEL,false));
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_GROUP_ROUND, new HandlerGroupRound());
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_GROUP_MEMBER_ROUND, new HandlerGroupMemberRound());
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_GROUP_HP, new HandlerHpConsume());
eventReceiver.handlerMap.put(EventType.REDIS_EVENT_ADDPAY, new HandlerPay(ConsumeCode.DIAMO_ADD,false));
}
@Override
public void onStop() {
eventReceiver.destroy();
}
}

View File

@ -0,0 +1,69 @@
package com.evt.mgr;
import com.data.util.EventType;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.redis.Redis;
public class GroupPublisherService {
public static final String CHANNEL_NAME = "mgr_group";
private static final String CMD_UPDATE_MEMBER = "update_member";
private static final String CMD_DEL_ROOM = "del_room";
private final static String EVT_TYPE = "E";
private final static String EVT_UID = "uid";
private final static String EVT_KEY = "event_";
/**
* 1 hp 2 3
*
* @param groupId
* @param uid
* @param type
* @param value
*/
public static void updateMemberEvt(int groupId, int uid, int type, int value) {
ITObject data = TObject.newInstance();
data.putInt("gid", groupId);
data.putInt("uid", uid);
data.putInt("type", type);
data.putInt("value", value);
data.putString("cmd", CMD_UPDATE_MEMBER);
Redis.use("group1_db11").publish(CHANNEL_NAME, data.toJson());
}
public static void delRoomEvt(int groupId, String roomid) {
ITObject data = TObject.newInstance();
data.putInt("gid", groupId);
data.putString("roomid", roomid);
data.putString("cmd", CMD_DEL_ROOM);
Redis.use("group1_db11").publish(CHANNEL_NAME, data.toJson());
}
/**
*
*
* @param uid
* @param pay
* @param groupId
*/
public static void refundDiamo(int uid, int pay, int groupId, int gameId) {
ITObject data = TObject.newInstance();
data.putInt("pay", -pay);
data.putInt("game", gameId);
if (groupId > 0) {
data.putInt("group", groupId);
}
//sendEvt(EventType.REDIS_EVENT_BACK_PAY, uid, data);
}
private static void sendEvt(int type, int uid, ITObject data) {
int id = uid % 10;
data.putInt(EVT_UID, uid);
data.putInt(EVT_TYPE, type);
Redis.use(EventReceiver.CHACHE_KEY).lpush(EVT_KEY + id, data.toJson());
}
}

View File

@ -0,0 +1,10 @@
package com.evt.mgr;
import com.taurus.core.entity.ITObject;
public interface IHandler {
public int process(int uid,ITObject param) throws Exception;
public int processGroup(int groupId,ITObject param) throws Exception;
}

View File

@ -0,0 +1,211 @@
package com.evt.mgr;
import java.util.List;
import com.data.cache.GroupCache;
import com.data.util.CountUtil;
import com.data.util.Utility;
import com.taurus.core.entity.ITArray;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.DateUtils;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
public class Utils {
public static final void countValidAndTotal(Jedis jedis10, Pipeline pipeline9,int uid,int groupId,int pid, int valid_count, int valid_diamo, int all_count) {
if(valid_count > 0) {
String gmv = String.format("g{%s}:m%s:valid_round", groupId,uid);
CountUtil.countLogByDay30(gmv, valid_count, pipeline9);
CountUtil.countLogByDay30(gmv + "_self", valid_count, pipeline9);
}
if (valid_diamo > 0)
{
String gmv = String.format("g{%s}:m%s:valid_diamo", groupId,uid);
CountUtil.countLogByDay30(gmv, valid_diamo, pipeline9);
CountUtil.countLogByDay30(gmv + "_self", valid_diamo, pipeline9);
}
String gmr = String.format("g{%s}:m%s:round_log", groupId, uid);
CountUtil.countLogByDay30(gmr, 1, pipeline9,true);
CountUtil.countLogByDay30(gmr + "_self", 1, pipeline9,true);
List<Integer> temp = Utility.getMemberParents(jedis10, groupId, uid, true);
if(temp != null && temp.size() > 0) {
if(valid_count > 0) {
for(int i=0;i<temp.size();++i) {
int par = temp.get(i);
if(par != uid) {
String gmv = String.format("g{%s}:m%s:valid_round", groupId,par);
CountUtil.countLogByDay30(gmv, valid_count, pipeline9);
}
String gmvp = String.format("g{%s}:m%s:valid_round:p%s", groupId,par,pid);
CountUtil.countLogByDay30(gmvp, valid_count, pipeline9);
String gmvp2 = String.format("g{%s}:m%s:valid_round2:p%s", groupId,par,pid);
CountUtil.countLogByDay30(gmvp2, valid_count, pipeline9);
boolean add_uid = (uid == par || i == 0);
if(add_uid == true) {
String gmv = String.format("g{%s}:m%s:d_valid_round", groupId,par);
CountUtil.countLogByDay30(gmv, valid_count, pipeline9);
}
}
}
for(int i=0;i<temp.size();++i) {
int par = temp.get(i);
if(par != uid) {
gmr = String.format("g{%s}:m%s:round_log", groupId,par);
CountUtil.countLogByDay30(gmr, 1, pipeline9,true);
}
gmr = String.format("g{%s}:m%s:valid_diamo:p%s", groupId,par,pid);
CountUtil.countLogByDay30(gmr, valid_diamo, pipeline9);
gmr = String.format("g{%s}:m%s:all_count:p%s", groupId,par,pid);
CountUtil.countLogByDay30(gmr, all_count, pipeline9,true);
boolean add_uid = (uid == par || i == 0);
if(add_uid == true) {
gmr = String.format("g{%s}:m%s:d_round_log", groupId,par);
CountUtil.countLogByDay30(gmr, 1, pipeline9,true);
}
}
}
}
public static final void countValidAndTotal(Pipeline pipeline9,ITArray playerList, int groupId,int pid, int valid_count, int valid_diamo, int all_count) {
Jedis jedis10 = Redis.use("group1_db10").getJedis();
try {
for(int i = 0; i < playerList.size(); i++) {
int uid = playerList.getInt(i);
countValidAndTotal(jedis10, pipeline9, uid, groupId, pid, valid_count, valid_diamo, all_count);
}
}
finally {
jedis10.close();
}
}
/**
*
* @param jedis9
* @param groupId
* @param uid
* @param win
*/
public static final void countRoundWin(Pipeline pipeline9,int groupId,int pid, int uid,boolean win,int score) {
List<Integer> par_list = null;
Jedis jedis10 = Redis.use("group1_db10").getJedis();
try {
par_list = Utility.getMemberParents(jedis10, groupId, uid, true);
Pipeline pipeline10 = jedis10.pipelined();
if(par_list==null)return;
String key = String.format("g{%s}:m%s:total_win",groupId,uid);
CountUtil.countLogByDay30(key, score, pipeline9);
CountUtil.countLogByDay30(key + "_self", score, pipeline9);
for(int i=0;i<par_list.size();++i) {
int par = par_list.get(i);
boolean add_uid = (uid == par || i == 0);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_total_win",groupId,par);
CountUtil.countLogByDay30(key, score, pipeline9);
}
String mlk = GroupCache.genMemberListKey(groupId, par);
pipeline10.sadd(mlk, uid+"");
if(par != uid) {
key = String.format("g{%s}:m%s:total_win",groupId,par);
CountUtil.countLogByDay30(key, score, pipeline9);
}
}
pipeline10.sync();
}finally {
jedis10.close();
}
}
/**
* ,
* @param jedis9
* @param groupId
* @param uid
* @param pump
* @param hp
*/
public static final void countHpconsume(Pipeline pipeline9,int groupId,int uid,int pump,int hp) {
List<Integer> par_list = null;
Jedis jedis10 = Redis.use("group1_db10").getJedis();
try {
par_list = Utility.getMemberParents(jedis10, groupId, uid, true);
}finally {
jedis10.close();
}
if(par_list==null)return;
String key;
if(hp!=0) {
key = String.format("g{%s}:m%s:hp_consume_log",groupId,uid);
CountUtil.countLogByDay(key, hp, pipeline9);
}
int day = DateUtils.getBeginDay();
boolean self = par_list.get(0) == uid;
for(int i=0;i<par_list.size();++i) {
int par = par_list.get(i);
boolean add_uid = i==0||(i<=1&&self);
if(pump>0) {
key = String.format("g%s:hp_cost:par%s_%s",groupId,par,day);
pipeline9.hincrBy(key, "total", pump);
if(add_uid) {
pipeline9.hincrBy(key, uid+"", pump);
}
pipeline9.expire(key, 3600*24*2);
}
if(hp!=0) {
if(uid != par) {
key = String.format("g{%s}:m%s:hp_consume_log",groupId,par);
CountUtil.countLogByDay(key, hp, pipeline9);
}
key = String.format("g%s:hp_consume:par%s_%s",groupId,par,day);
pipeline9.hincrBy(key, "total", hp);
if(add_uid) {
pipeline9.hincrBy(key, uid+"", hp);
}
pipeline9.expire(key, 3600*24*2);
}
}
}
}

View File

@ -0,0 +1,97 @@
package com.evt.mgr.handler;
import com.data.util.ConsumeCode;
import com.data.util.CountUtil;
import com.data.util.Utility;
import com.evt.mgr.IHandler;
import com.evt.mgr.Utils;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.plugin.database.DataBase;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.DateUtils;
import com.taurus.core.util.Logger;
import com.taurus.core.util.StringUtil;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
import java.sql.SQLException;
public class HandlerGroupMemberRound implements IHandler{
static Logger log = Logger.getLogger(HandlerHpConsume.class);
@Override
public int process(int uid, ITObject param) throws Exception {
// TODO Auto-generated method stub
return 0;
}
private static boolean addMemberLog(int gid, int pid,int uid, int score, int win, int time, int perfectRound,int valid_count) throws SQLException {
String sql = String.format("{call sp_add_member_log(%s,%s,%s,%s,%s,%s,%s,%s)}",
gid,pid,uid,score,win,time,perfectRound,valid_count);
Utility.evtdbLog(gid, 2, sql);
return false;
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
int pid = param.getInt("pid");
int score = param.getInt("score");
int uid = param.getInt("uid");
int win = param.getInt("win");
int time = DateUtils.getBeginDay();
int perfectRound = param.getInt("perfect_round");
int valid_count = param.getInt("valid_count");
int pump = param.getInt("pump");
int cur_hp = param.getInt("cur_hp");
int xipai_total = param.getInt("xi_pai_total");
int anchou_total = param.getInt("an_chou_total");
String roomid = param.getString("room");
int cur_time = (int)(System.currentTimeMillis() / 1000);
if (param.containsKey("time"))
{
cur_time = param.getInt("time");
}
log.info("uid:"+uid+"anchou_total:"+ anchou_total+"xipai_total:"+xipai_total);
//ITArray param1 = TArray.newInstance();
//param1.addInt(groupId);
//param1.addInt(pid);
//param1.addInt(uid);
//param1.addInt(score - pump - xipai_total);
//param1.addInt(win);
//param1.addInt(time);
//param1.addInt(perfectRound);
//param1.addInt(valid_count);
//DataBase.use().prepareCallNonResult("sp_add_member_log", param1);
addMemberLog(groupId, pid, uid, score - pump - xipai_total-anchou_total, win, time, perfectRound, valid_count);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
groupId,uid,ConsumeCode.HP_PUMP_TOTAL,score - pump - xipai_total-anchou_total,
cur_hp,pid,roomid,(int) (cur_time));
Utility.evtdb(groupId, 1, sql);
//DataBase.use().executeUpdate(sql);
String gmrp_key = String.format("g%s:m%s:round:p%s",groupId,uid,pid);
String gmr_key = String.format("g%s:m%s:round",groupId,uid);
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
CountUtil.countLog(gmrp_key, 1, jedis9);
CountUtil.countLog(gmr_key, 1, jedis9);
Pipeline pipeline9 = jedis9.pipelined();
Utils.countRoundWin(pipeline9, groupId, pid, uid, win==1,score - pump);
pipeline9.sync();
}finally {
jedis9.close();
}
return 0;
}
}

View File

@ -0,0 +1,72 @@
package com.evt.mgr.handler;
import com.data.util.CountUtil;
import com.evt.mgr.IHandler;
import com.evt.mgr.Utils;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.plugin.redis.Redis;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
public class HandlerGroupRound implements IHandler{
@Override
public int process(int uid, ITObject param) throws Exception {
// TODO Auto-generated method stub
return 0;
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
int pid = param.getInt("pid");
int valid = param.getInt("valid");
ITArray playerList = param.getTArray("player_list");
int valid_count = param.getInt("valid_count");
int valid_diamo = 0;
int all_count = 0;
if (param.containsKey("valid_diamo"))
{
valid_diamo = param.getInt("valid_diamo");
}
if (param.containsKey("all_count"))
{
all_count = param.getInt("all_count");
}
String rp_key = String.format("g%s:round:p%s",groupId,pid);
String round_key = String.format("g%s:round",groupId);
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
Pipeline pipeline9 = jedis9.pipelined();
CountUtil.countLog(rp_key, 1, pipeline9,false,true);
CountUtil.countLog(round_key, 1, pipeline9,true);
if(valid == 1) {
String valid_key = String.format("g%s:valid_room",groupId);
String p_valid_key = String.format("g%s:valid_room:p%s",groupId,pid);
CountUtil.countLogByDay(p_valid_key, 1, pipeline9,true);
CountUtil.countLog(valid_key, 1, pipeline9);
}else {
String no_valid_key = String.format("g%s:no_valid_room",groupId);
String p_no_valid_key = String.format("g%s:no_valid_room:p%s",groupId,pid);
CountUtil.countLogByDay(p_no_valid_key, 1, pipeline9,true);
CountUtil.countLog(no_valid_key, 1, pipeline9);
}
Utils.countValidAndTotal(pipeline9, playerList, groupId, pid, valid_count, valid_diamo, all_count);
pipeline9.sync();
}finally {
jedis9.close();
}
return 0;
}
}

View File

@ -0,0 +1,695 @@
package com.evt.mgr.handler;
import java.sql.SQLException;
import com.data.bean.GroupBean;
import com.data.cache.GroupCache;
import com.data.cache.GroupMemberCache;
import com.data.util.ConsumeCode;
import com.data.util.CountUtil;
import com.data.util.Utility;
import com.evt.mgr.IHandler;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.plugin.database.DataBase;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.Logger;
import com.taurus.core.util.StringUtil;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
public class HandlerHpConsume implements IHandler{
static Logger log = Logger.getLogger(HandlerHpConsume.class);
final static String DB = "group1_db10";
private static boolean hpConsume(int gid, int uid,int pid, int hp,int cur_hp,int reason, String roomid, int time,String desc) throws SQLException {
String sql = String.format("{call sp_update_hp(%s,%s,%s,%s,%s,%s,'%s',%s,'%s')}",
gid,uid,hp,cur_hp,reason,pid,roomid,time,desc);
Utility.evtdb(gid, 2, sql);
return false;
}
@Override
public int process(int uid, ITObject param) throws Exception {
return 0;
}
private void to_reward(Pipeline pipeline, int uid, String prs,int rewardType, int valueType, int real_pump, int pump,int groupId,int pid,String roomid,int time,int max_player) throws Exception {
int useValue = 0;
if(StringUtil.isNotEmpty(prs)) {
int round = 100 / max_player;
ITArray list = TArray.newFromJsonData(prs);
for(int i=list.size()-1;i>=0;--i) {
ITObject obj1 = list.getTObject(i);
int parentId = obj1.getInt("p");
int rewardPercent = 0;
if (obj1.containsKey("r"))
{
rewardPercent = obj1.getInt("r");
}
int rv = 0;
if(i > 0) {
ITObject obj2 = list.getTObject(i - 1);
if (!obj2.containsKey("r"))
{
rv =rewardPercent - 0;
}
else {
rv =rewardPercent -obj2.getInt("r");
}
}else {
rv=rewardPercent;
}
int reward_value = rv;
if(valueType == 1) {
reward_value = Math.round(pump * (rv / 100f));
}
else {
if(rewardType == 2) {
reward_value = reward_value / max_player;
}
}
if (reward_value < 0)
{
continue;
}
String key = String.format("g{%s}:m%s:reward_hp", groupId,parentId);
Redis.use(DB).incrBy(key, reward_value);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), parentId,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,parentId);
//Redis.use(DB).incrBy(all_key, reward_value);
useValue += reward_value;
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,parentId), reward_value, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round, info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,parentId,ConsumeCode.HP_PARTNER_REWARD,reward_value,cur_hp,pid,roomid,time,round, desc);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, reward_value, pipeline);
key = String.format("g{%s}:m%s:reward_log", groupId,parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
key = String.format("g{%s}:m%s:p%s:reward_log", groupId,parentId,pid);
CountUtil.countLogByDay30(key, reward_value, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:reward_log_to:par%s", groupId,uid, parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
boolean add_uid = (uid == parentId || i == 0);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_reward", groupId,parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
}
if(i>0) {
for(int j = i - 1; j >= 0; j--) {
ITObject obj2 = list.getTObject(j);
int temp = obj2.getInt("p");
if(temp != uid) {
key = String.format("g{%s}:m%s:reward_log_to:par%s", groupId,temp, parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
}
}
}
//某个玩法的抽水总值
CountUtil.countLogByDay30(String.format("g%s:hp_cost:m%s:p%s",groupId,parentId,pid),pump,pipeline);
CountUtil.countLogByDay30(String.format("g%s:hp_reward",groupId), reward_value, pipeline);
}
}
GroupBean gb = GroupCache.getGroup(groupId);
if (gb != null) {
int round = 100 / max_player;
int leftValue = real_pump - useValue;
String key = String.format("g{%s}:m%s:reward_hp", groupId,gb.owner);
Redis.use(DB).incrBy(key, leftValue);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), gb.owner,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,gb.owner);
//Redis.use(DB).incrBy(all_key, leftValue);
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:reward_log", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:p%s:reward_log", groupId,gb.owner,pid);
CountUtil.countLogByDay30(key, leftValue, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:reward_log_to:par%s", groupId,uid, gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
boolean add_uid = (uid == gb.owner);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_reward", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
}
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,gb.owner), leftValue, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round,info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,gb.owner,ConsumeCode.HP_PARTNER_REWARD,leftValue,cur_hp,pid,roomid,time,round,desc);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
}
}
private void anchou_to_reward(Pipeline pipeline, int uid, String prs,int rewardType, int valueType, int real_pump, int pump,int groupId,int pid,String roomid,int time,int max_player) throws Exception {
log.info("uid:"+uid+"prs:"+prs+"valueType:"+valueType+"real_pump:"+real_pump+"pump:"+pump+"groupId:"+groupId+"pid:"+pid+"time:"+time+"max_player:"+max_player);
int useValue = 0;
log.info("uid:"+uid+"prs:"+StringUtil.isNotEmpty(prs)+"time:"+time);
if(StringUtil.isNotEmpty(prs)) {
int round = 100 / max_player;
ITArray list = TArray.newFromJsonData(prs);
log.info("uid:"+uid+"size:"+list.size()+"time:"+time);
for(int i=list.size()-1;i>=0;--i) {
ITObject obj1 = list.getTObject(i);
int parentId = obj1.getInt("p");
int rewardPercent = 0;
if (obj1.containsKey("a"))
{
rewardPercent = obj1.getInt("a");
}
int rv = 0;
if(i > 0) {
ITObject obj2 = list.getTObject(i - 1);
if (!obj2.containsKey("a"))
{
rv = rewardPercent - 0;
}
else {
rv = rewardPercent -obj2.getInt("a");
}
}else {
rv=rewardPercent;
}
int reward_value = rv;
if(valueType == 1) {
reward_value = Math.round(pump * (rv / 100f));
}
log.info("uid:"+uid+"reward_value:"+reward_value+"time:"+time);
if (reward_value < 0)
{
continue;
}
String key = String.format("g{%s}:m%s:reward_hp", groupId,parentId);
Redis.use(DB).incrBy(key, reward_value);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), parentId,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,parentId);
//Redis.use(DB).incrBy(all_key, reward_value);
useValue += reward_value;
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,parentId), reward_value, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round,info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,parentId,ConsumeCode.HP_PARTNER_ANCHOU_REWARD,reward_value,cur_hp,pid,roomid,time,round,desc);
log.info("uid:"+uid+"sql:"+sql+"time:"+time);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, reward_value, pipeline);
key = String.format("g{%s}:m%s:anchou_reward_log", groupId,parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
key = String.format("g{%s}:m%s:p%s:anchou_reward_log", groupId,parentId,pid);
CountUtil.countLogByDay30(key, reward_value, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:anchou_reward_log_to:par%s", groupId,uid, parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
boolean add_uid = (uid == parentId || i == 0);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_anchou_reward", groupId,parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
}
if(i>0) {
for(int j = i - 1; j >= 0; j--) {
ITObject obj2 = list.getTObject(j);
int temp = obj2.getInt("p");
if(temp != uid) {
key = String.format("g{%s}:m%s:anchou_reward_log_to:par%s", groupId,temp, parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
}
}
}
//某个玩法的抽水总值
CountUtil.countLogByDay30(String.format("g%s:anchou_hp_cost:m%s:p%s",groupId,parentId,pid),pump,pipeline);
CountUtil.countLogByDay30(String.format("g%s:anchou_hp_reward",groupId), reward_value, pipeline);
}
}
GroupBean gb = GroupCache.getGroup(groupId);
if (gb != null) {
int round = 100 / max_player;
int leftValue = real_pump - useValue;
String key = String.format("g{%s}:m%s:reward_hp", groupId,gb.owner);
Redis.use(DB).incrBy(key, leftValue);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), gb.owner,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,gb.owner);
//Redis.use(DB).incrBy(all_key, leftValue);
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:anchou_reward_log", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:p%s:anchou_reward_log", groupId,gb.owner,pid);
CountUtil.countLogByDay30(key, leftValue, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:anchou_reward_log_to:par%s", groupId,uid, gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
boolean add_uid = (uid == gb.owner);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_anchou_reward", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
}
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,gb.owner), leftValue, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round,info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,gb.owner,ConsumeCode.HP_PARTNER_ANCHOU_REWARD,leftValue,cur_hp,pid,roomid,time,round,desc);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
}
}
private void xipai_to_reward(Pipeline pipeline, int uid, String prs,int rewardType, int valueType, int real_pump, int pump,int groupId,int pid,String roomid,int time,int max_player) throws Exception {
log.info("xipai uid:"+uid+"prs:"+prs+"time:"+time);
int useValue = 0;
log.info("xipaiuid:"+uid+"prs:"+StringUtil.isNotEmpty(prs)+"time:"+time);
if(StringUtil.isNotEmpty(prs)) {
log.info("xipaiok uid:"+uid+"prs:"+prs+"time:"+time);
int round = 100 / max_player;
ITArray list = TArray.newFromJsonData(prs);
for(int i=list.size()-1;i>=0;--i) {
ITObject obj1 = list.getTObject(i);
int parentId = obj1.getInt("p");
int rewardPercent = 0;
if (obj1.containsKey("x"))
{
rewardPercent = obj1.getInt("x");
}
int rv = 0;
if(i > 0) {
ITObject obj2 = list.getTObject(i - 1);
if (!obj2.containsKey("x"))
{
rv = rewardPercent - 0;
}
else {
rv = rewardPercent -obj2.getInt("x");
}
}else {
rv=rewardPercent;
}
int reward_value = rv;
if(valueType == 1) {
reward_value = Math.round(pump * (rv / 100f));
}
if (reward_value < 0)
{
continue;
}
String key = String.format("g{%s}:m%s:reward_hp", groupId,parentId);
Redis.use(DB).incrBy(key, reward_value);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), parentId,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,parentId);
//Redis.use(DB).incrBy(all_key, reward_value);
useValue += reward_value;
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,parentId), reward_value, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round,info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,parentId,ConsumeCode.HP_PARTNER_XIPAI_REWARD,reward_value,cur_hp,pid,roomid,time,round,desc);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, reward_value, pipeline);
key = String.format("g{%s}:m%s:xipai_reward_log", groupId,parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
key = String.format("g{%s}:m%s:p%s:xipai_reward_log", groupId,parentId,pid);
CountUtil.countLogByDay30(key, reward_value, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:xipai_reward_log_to:par%s", groupId,uid, parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
boolean add_uid = (uid == parentId || i == 0);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_xipai_reward", groupId,parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
}
if(i>0) {
for(int j = i - 1; j >= 0; j--) {
ITObject obj2 = list.getTObject(j);
int temp = obj2.getInt("p");
if(temp != uid) {
key = String.format("g{%s}:m%s:xipai_reward_log_to:par%s", groupId,temp, parentId);
CountUtil.countLogByDay30(key, reward_value, pipeline);
}
}
}
//某个玩法的抽水总值
CountUtil.countLogByDay30(String.format("g%s:xipai_hp_cost:m%s:p%s",groupId,parentId,pid),pump,pipeline);
CountUtil.countLogByDay30(String.format("g%s:xipai_hp_reward",groupId), reward_value, pipeline);
}
}
GroupBean gb = GroupCache.getGroup(groupId);
if (gb != null) {
int round = 100 / max_player;
int leftValue = real_pump - useValue;
String key = String.format("g{%s}:m%s:reward_hp", groupId,gb.owner);
Redis.use(DB).incrBy(key, leftValue);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), gb.owner,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,gb.owner);
//Redis.use(DB).incrBy(all_key, leftValue);
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:xipai_reward_log", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:p%s:xipai_reward_log", groupId,gb.owner,pid);
CountUtil.countLogByDay30(key, leftValue, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:xipai_reward_log_to:par%s", groupId,uid, gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
boolean add_uid = (uid == gb.owner);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_xipai_reward", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
}
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,gb.owner), leftValue, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round,info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,gb.owner,ConsumeCode.HP_PARTNER_XIPAI_REWARD,leftValue,cur_hp,pid,roomid,time,round,desc);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
}
}
private void to_mengzhu_reward(Pipeline pipeline, int uid, String prs,int rewardType, int valueType, int real_pump, int pump,int groupId,int pid,String roomid,int time,int max_player) throws Exception {
GroupBean gb = GroupCache.getGroup(groupId);
if (gb != null) {
int round = 100 / max_player;
int leftValue = real_pump;
String key = String.format("g{%s}:m%s:reward_hp", groupId,gb.owner);
Redis.use(DB).incrBy(key, leftValue);
String rewardsql = String.format("update group_member set reward_hp = %s where uid = %s AND groupId = %s", Redis.use(DB).get(key), gb.owner,groupId);
Utility.evtdb(groupId, 1, rewardsql);
//String all_key = String.format("g{%s}:m%s:all_reward_hp", groupId,gb.owner);
//Redis.use(DB).incrBy(all_key, leftValue);
// 记录代理每天的推广奖励
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:reward_log", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
key = String.format("g{%s}:m%s:p%s:reward_log", groupId,gb.owner,pid);
CountUtil.countLogByDay30(key, leftValue, pipeline);
// 记录每个人对上级的推广奖励是多少
key = String.format("g{%s}:m%s:reward_log_to:par%s", groupId,uid, gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
boolean add_uid = (uid == gb.owner);
if(add_uid == true) {
key = String.format("g{%s}:m%s:d_reward", groupId,gb.owner);
CountUtil.countLogByDay30(key, leftValue, pipeline);
}
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String desc = "" + uid;
long cur_hp = CountUtil.countLogByDay3(String.format("g%s:hp_reward:m%s",groupId,gb.owner), leftValue, jedis9);
String sql = String.format("INSERT INTO group_hp_log(gid,uid,reason,hp,cur_hp,pid,roomid,time,round,info) "
+ "VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", groupId,gb.owner,ConsumeCode.HP_PARTNER_REWARD,leftValue,cur_hp,pid,roomid,time,round,desc);
DataBase.use().executeUpdate(sql);
}finally {
jedis9.close();
}
}
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
log.info("processGroup",param);
int uid = param.getInt("uid");
try {
int hp = param.getInt("hp");
int cur_hp = param.getInt("cur_hp");
boolean is_pump = param.containsKey("pump");
int pump = 0;
String roomid = param.getString("room");
int pid = param.getInt("pid");
int time = param.getInt("time");
int reward_value = 0;
int xipai_reward_value = 0;
int max_player = 0;
int rewardValueType = 0;
int xipai_rewardValueType = 0;
int rewardType = 1;
int xipai_rewardType = 1;
int anchou_rewardType = 1;
int anchou_reward_value = 0;
int anchou_rewardValueType = 0;
boolean is_xipai = param.containsKey("xipai");
int xipai = 0;
String prs =null;
if(is_pump) {
pump = param.getInt("pump");
max_player = param.getInt("max_player");
if(param.containsKey("reward_value")) {
reward_value = param.getInt("reward_value");
prs = param.getString("prs");
}
if(param.containsKey("reward_type")) {
rewardType = param.getInt("reward_type");
}
if(param.containsKey("rewardValueType")) {
rewardValueType = param.getInt("rewardValueType");
}
}
if (is_xipai)
{
xipai = param.getInt("xipai");
max_player = param.getInt("max_player");
if(param.containsKey("xipai_reward_value")) {
xipai_reward_value = param.getInt("xipai_reward_value");
prs = param.getString("prs");
}
if(param.containsKey("xipai_reward_type")) {
xipai_rewardType = param.getInt("xipai_reward_type");
}
if(param.containsKey("xipai_rewardValueType")) {
xipai_rewardValueType = param.getInt("xipai_rewardValueType");
}
}
int anchou = 0;
if(param.containsKey("anchou")) {
anchou = param.getInt("anchou");
}
if(param.containsKey("anchou_reward_value")) {
max_player = param.getInt("max_player");
anchou_reward_value = param.getInt("anchou_reward_value");
prs = param.getString("prs");
}
if(param.containsKey("anchou_reward_type")) {
anchou_rewardType = param.getInt("anchou_reward_type");
}
if(param.containsKey("anchou_rewardValueType")) {
anchou_rewardValueType = param.getInt("anchou_rewardValueType");
}
log.info("jefe anchou_reward_value:"+anchou_reward_value);
if (param.containsKey("hp_than_max_value"))
{
int more_hp = param.getInt("hp_than_max_value");
String gm_key = GroupMemberCache.genKey(groupId, uid);
String bank_hp = Redis.use("group1_db10").hget(gm_key, "bank_hp");
String sql = String.format("{call sp_bank_hp(%s,%s,%s,%s)}", groupId, uid, more_hp, cur_hp);
Utility.evtdb(groupId, 2, sql);
String sql2 = String.format("update group_member set bank_hp = %s where uid = %s AND groupId = %s", bank_hp, uid, groupId);
Utility.evtdb(groupId, 1, sql2);
}
String desc = param.getString("desc");
desc = StringUtil.isNotEmpty(desc) ? desc : StringUtil.Empty;
if(hp!=0) {
hpConsume(groupId, uid,pid, hp,cur_hp + pump,ConsumeCode.HP_CLEARING, roomid,time, desc);
}
Jedis jedis9 = Redis.use("group1_db9").getJedis();
try {
String key = String.format("g%s:m%s:consume_hp", groupId,uid);
if(hp!=0) {
CountUtil.countLogByDay(key, hp, jedis9, 691200);
}
if(pump>0) {
CountUtil.countLogByDay(key, -pump, jedis9, 691200);
time += 1;
hpConsume(groupId, uid,pid, -pump,cur_hp, ConsumeCode.HP_PUMP, roomid,time, desc);
CountUtil.countLog(String.format("g%s:hp_cost",groupId), pump, jedis9);
CountUtil.countLogByDay(String.format("g%s:hp_cost:p%s",groupId,pid),pump,jedis9);
CountUtil.countLogByDay(String.format("g%s:hp_cost:m%s",groupId,uid),pump,jedis9);
}
if (xipai > 0)
{
CountUtil.countLogByDay(key, -xipai, jedis9, 691200);
time += 1;
hpConsume(groupId, uid,pid, -xipai,cur_hp, ConsumeCode.HP_XIPAI_PUMP, roomid,time, desc);
CountUtil.countLog(String.format("g%s:hp_cost",groupId), xipai, jedis9);
CountUtil.countLogByDay(String.format("g%s:hp_cost:p%s",groupId,pid),xipai,jedis9);
CountUtil.countLogByDay(String.format("g%s:hp_cost:m%s",groupId,uid),xipai,jedis9);
}
if (anchou > 0)
{
CountUtil.countLogByDay(key, -anchou, jedis9, 691200);
time += 1;
hpConsume(groupId, uid,pid, -anchou,cur_hp, ConsumeCode.HP_ANCHOU_PUMP, roomid,time, desc);
CountUtil.countLog(String.format("g%s:hp_cost",groupId), anchou, jedis9);
CountUtil.countLogByDay(String.format("g%s:hp_cost:p%s",groupId,pid),anchou,jedis9);
CountUtil.countLogByDay(String.format("g%s:hp_cost:m%s",groupId,uid),anchou,jedis9);
}
Pipeline pipeline = jedis9.pipelined();
if(reward_value > 0 || (rewardValueType == 2 && is_pump)) {
time += 1;
to_reward(pipeline,uid,prs,rewardType,rewardValueType, pump, reward_value ,groupId,pid,roomid,time,max_player);
}
/*
else {
if (is_pump && pump > 0)
{
log.error("pump to_mengzhu_reward:"+pump+" uid:" + uid + " groupId:" + groupId);
to_mengzhu_reward(pipeline,uid,prs,rewardType,rewardValueType, pump, reward_value ,groupId,pid,roomid,time,max_player);
}
}
*/
if(xipai_reward_value > 0 || (xipai_rewardValueType == 2 && is_xipai)) {
time += 1;
xipai_to_reward(pipeline,uid,prs,xipai_rewardType,xipai_rewardValueType, xipai, xipai_reward_value ,groupId,pid,roomid,time,max_player);
}
if (anchou_reward_value>0){
time += 1;
anchou_to_reward(pipeline,uid,prs,anchou_rewardType,anchou_rewardValueType, anchou, anchou_reward_value ,groupId,pid,roomid,time,max_player);
}
/*
else {
if (is_xipai && xipai > 0)
{
log.error("xipai to_mengzhu_reward:"+pump+" uid:" + uid + " groupId:" + groupId);
to_mengzhu_reward(pipeline,uid,prs,xipai_rewardType,xipai_rewardValueType, xipai, xipai_reward_value ,groupId,pid,roomid,time,max_player);
}
}
*/
pipeline.sync();
}finally {
jedis9.close();
}
}catch (Exception e) {
e.printStackTrace();
}
return 0;
}
}

View File

@ -0,0 +1,18 @@
package com.evt.mgr.handler;
import com.evt.mgr.IHandler;
import com.taurus.core.entity.ITObject;
public class HandlerLose implements IHandler{
@Override
public int process(int uid, ITObject param) throws Exception {
return 0;
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
return 0;
}
}

View File

@ -0,0 +1,67 @@
package com.evt.mgr.handler;
import com.data.cache.GroupMemberCache;
import com.evt.mgr.IHandler;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.plugin.database.DataBase;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.StringUtil;
public class HandlerOver implements IHandler {
public static final int _SHARE_NUM = 20;
private static void share(String id) throws Exception {
if (!Redis.use("group1_db1").sismember("shares", id)) {
return;
}
String sql = "SELECT succ,playtimes FROM shares WHERE uid = " + id;
ITArray resultArray = DataBase.use().executeQueryByTArray(sql);
if (resultArray.size() == 0) {
return;
}
ITObject dbData = resultArray.getTObject(0);
if (dbData.getInt("succ") != 0) {
Redis.use("group1_db1").srem("shares", id);
return;
}
int playTimes = dbData.getInt("playtimes") + 1;
dbData.putInt("playtimes", playTimes);
if (playTimes >= _SHARE_NUM) {
dbData.putInt("succ", 1);
Redis.use("group1_db1").srem("shares", id);
}
DataBase.use().update("shares", dbData, "uid=" + id);
}
@Override
public int process(int uid,ITObject param) throws Exception {
int is_rec = param.getInt("is_rec");
if(is_rec == 1) {
String roomid =param.getString("roomid");
String rec_key =param.getString("rec_key");
int gid =param.containsKey("gid")?param.getInt("gid"):0;
int time =param.getInt("time");
int par = 0;
if(gid>0) {
String gm_key = GroupMemberCache.genKey(gid, uid);
String parentId = Redis.use("group1_db10").hget(gm_key, "parentId");
par = StringUtil.isEmpty(parentId)?0:Integer.parseInt(parentId);
}
String sql =String.format("insert into room_rec_log(roomid,gid,uid,rec_key,time,parentId) values('%s',%s,%s,'%s',%s,%s)",
roomid,gid,uid,rec_key,time,par);
DataBase.use().executeUpdate(sql);
}
share(uid+"");
return 0;
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
return 0;
}
}

View File

@ -0,0 +1,82 @@
package com.evt.mgr.handler;
import java.util.ArrayList;
import com.data.cache.AccountCache;
import com.data.util.ConsumeCode;
import com.data.util.Utility;
import com.evt.mgr.EventController;
import com.evt.mgr.IHandler;
import com.taurus.core.entity.ITObject;
import com.taurus.core.plugin.redis.Redis;
import redis.clients.jedis.Jedis;
public class HandlerPay implements IHandler{
int reason;
boolean isPay;
public HandlerPay(int reason,boolean isPay) {
this.reason = reason;
this.isPay = isPay;
}
@Override
public int process(int uid, ITObject param) throws Exception {
int pay = param.getInt("pay");
if(pay==0) {
return 0;
}
int result = 1;
int gameId =param.getInt("game");
int groupId = 0;
int pid = 0;
if(param.containsKey("group")) {
groupId = param.getInt("group");
}
if(param.containsKey("pid")) {
pid = param.getInt("pid");
}
if(reason == ConsumeCode.DIAMO_JOIN_ROOM) {
pay = Math.abs(pay);
}
if(reason == ConsumeCode.DIAMO_REFUND) {
pay = -Math.abs(pay);
}
if(reason == ConsumeCode.DIAMO_ADD) {
pay = -pay;
}
if(isPay) {
int cur_diamo = param.getInt("diamo");
EventController.payDiamo(uid, pay, cur_diamo, reason, gameId, groupId, pid);
result = 0;
}else {
String session = AccountCache.genKey(uid);
Jedis jedis0 = Redis.use("group1_db0").getJedis();
try {
ArrayList<Long> result_list = Utility.payDiamo(jedis0, session,pay);
if(result_list!=null) {
result = result_list.get(0).intValue();
if(result ==0) {
long cur_diamo = result_list.get(1);
EventController.payDiamo(uid, pay, (int)cur_diamo, reason, gameId, groupId, pid);
}
result = 0;
}
}finally {
jedis0.close();
}
}
return result;
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
return 0;
}
}

View File

@ -0,0 +1,18 @@
package com.evt.mgr.handler;
import com.evt.mgr.IHandler;
import com.taurus.core.entity.ITObject;
public class HandlerWin implements IHandler{
@Override
public int process(int uid, ITObject param) throws Exception {
return 0;
}
@Override
public int processGroup(int groupId, ITObject param) throws Exception {
return 0;
}
}

View File

@ -0,0 +1,94 @@
package com.evt.mgr.job;
import java.util.List;
import java.util.Set;
import com.taurus.core.plugin.database.DataBase;
import com.taurus.core.util.DateUtils;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobKey;
import com.data.util.EventType;
import com.data.util.Utility;
import com.evt.mgr.GroupPublisherService;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.TArray;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.plugin.redis.RedisLock;
import com.taurus.core.util.Logger;
import com.taurus.core.util.StringUtil;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
public class CleanGroupLogJob implements Job {
private Logger logger = Logger.getLogger(CleanGroupLogJob.class);
public CleanGroupLogJob() {
}
/**
*
*/
private void cleanGroupMemberLog() {
int time = DateUtils.getBeginDay() - 5 * 24 * 3600;
String deleteSql = String.format("delete from group_member_log where time < %s", time);
try {
long startTime = System.currentTimeMillis();
DataBase.use().executeUpdate(deleteSql);
logger.info("cleanGroupMemberLog use time:"+(System.currentTimeMillis()-startTime)+ " " + deleteSql);
}
catch (Exception e)
{
logger.error(e);
}
}
private void CleanGroupHpLog() {
int time = DateUtils.getBeginDay() - 20 * 24 * 3600;
String deleteSql3 = String.format("delete from group_hp_log where time < %s limit 1000000", time);
try {
long startTime = System.currentTimeMillis();
DataBase.use().executeUpdate(deleteSql3);
logger.info("CleanGroupHpLog use time:"+(System.currentTimeMillis()-startTime)+ " " + deleteSql3);
}
catch (Exception e)
{
logger.error(e);
}
}
private void CleanRecRoomLog() {
int time = DateUtils.getBeginDay() - 5 * 24 * 3600;
String deleteSql2 = String.format("delete from room_rec_log where time < %s limit 1000000", time);
try {
long startTime = System.currentTimeMillis();
DataBase.use().executeUpdate(deleteSql2);
logger.info("CleanRecRoomLog use time:"+(System.currentTimeMillis()-startTime) + " " + deleteSql2);
}
catch (Exception e)
{
logger.error(e);
}
}
@Override
public void execute(JobExecutionContext context) {
JobKey jobKey = context.getJobDetail().getKey();
if(jobKey.getName().equals("clean_group_member_log")) {
logger.info("执行成功" + jobKey.getName());
cleanGroupMemberLog();
}
if(jobKey.getName().equals("clean_group_hp_log")) {
logger.info("执行成功" + jobKey.getName());
CleanGroupHpLog();
}
if(jobKey.getName().equals("clean_rec_room_log")) {
logger.info("执行成功" + jobKey.getName());
CleanRecRoomLog();
}
}
}

View File

@ -0,0 +1,322 @@
package com.evt.mgr.job;
import java.util.List;
import java.util.Set;
import com.taurus.core.util.DateUtils;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobKey;
import com.data.util.EventType;
import com.data.util.Utility;
import com.evt.mgr.GroupPublisherService;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.TArray;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.plugin.redis.RedisLock;
import com.taurus.core.util.Logger;
import com.taurus.core.util.StringUtil;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
public class CleanTimeOutRoomJob implements Job{
private Logger logger = Logger.getLogger(CleanTimeOutRoomJob.class);
public CleanTimeOutRoomJob() {
}
private void deleteRoomFromRedis(String tag_key,Jedis jedis0) {
RedisLock room_lock = new RedisLock(tag_key, jedis0);
try {
// 0 1 2 3 4 5 6 7 8 9
List<String> paramList = jedis0.hmget(tag_key, "AA", "payer", "pay", "group","game","delete_status","status","create_time","id","players");
if(StringUtil.isEmpty(paramList.get(4))) {
Redis.use().expire(tag_key, 20);
return;
}
String delete_status = paramList.get(5);
if(StringUtil.isEmpty(delete_status)) {
String status = paramList.get(6);
int _status = Integer.parseInt(status);
if(_status == 2 || _status == 3) {
logger.info(tag_key + "房间的状态不对,此时房间的状态["+status + "]");
Redis.use().expire(tag_key, 20);
return;
}
// 如果房间的存活时间小于4个小时考虑到游戏服务器也在删除 所以延时10分钟
String create_time = paramList.get(7);
String roomid = paramList.get(8);
long now = System.currentTimeMillis() / 1000;
if((now - Long.parseLong(create_time)) > 14400 + 600) {
deleteRoomFromServer(tag_key,jedis0,false);
}
return;
}
boolean pay_AA = Integer.parseInt(paramList.get(0)) == 1;
int payer = Integer.parseInt(paramList.get(1));
int pay = Integer.parseInt(paramList.get(2));
int gameId = Integer.parseInt(paramList.get(4));
String group = paramList.get(3);
int _gid = 0;
if(StringUtil.isNotEmpty(group)) {
_gid = Integer.parseInt(group);
}
int _delete_status = Integer.parseInt(delete_status);
if(_delete_status == 0) {
// 如果需要支付钻石
if(pay > 0) {
if (pay_AA) {
String players_json = paramList.get(9);
if(StringUtil.isEmpty(players_json) == false) {
ITArray players = TArray.newFromJsonData(players_json);
for (int i = 0; i < players.size(); i++) {
//GroupPublisherService.refundDiamo(players.getInt(i), pay, _gid, gameId);
}
}
}else{
if(_gid == 0) {
//GroupPublisherService.refundDiamo(payer, pay, _gid, gameId);
}
}
}
}
Redis.use().hset(tag_key, "status", 3 + "");
Redis.use().hincrBy(tag_key, "cache_ver", 1);
Redis.use().expire(tag_key, 20);
}
catch(Exception e) {
logger.info(tag_key + "删除房间发生异常["+e.getMessage() + "]");
}
finally {
room_lock.unlock(false);
}
}
private boolean deleteRoomFromServer(String roomid,Jedis jedis0,boolean lock) {
String tag_key = roomid;
RedisLock room_lock = null;
if(lock) {
room_lock = new RedisLock(tag_key, jedis0);
}
try {
if( jedis0.exists(tag_key) == false) {
return false;
}
// 0 1 2 3 4 5 6 7
List<String> paramList = jedis0.hmget(tag_key, "AA", "payer", "pay", "group","game","status","create_time","id");
String status = paramList.get(5);
int _status = Integer.parseInt(status);
if(_status == 2 || _status == 3) {
logger.info("删除房间" + roomid + "失败,原因状态不对,此时的状态是[" + _status +"]");
return true;
}
String group = paramList.get(3);
int _gid = 0;
if(StringUtil.isNotEmpty(group)) {
_gid = Integer.parseInt(group);
}
// 如果房间的存活时间小于4个小时考虑到游戏服务器也在删除 所以延时10分钟
String create_time = paramList.get(6);
long now = System.currentTimeMillis() / 1000;
if((now - Long.parseLong(create_time)) < 14400 + 600) {
logger.info("删除房间" + roomid + "失败房间距离创建没有超过4个小时");
return true;
}
if(_gid != 0) {
// 如果游戏没有开始_status = 0
// 不是aa支付则退出回创建者创建钻石的费用
// aa支付设置status=2之后则交给游戏服务器去做(如果游戏服务器挂了呢,或者房间在游戏服务器中根本就不存在呢)
// 如果游戏已经开始_status = 1则不用考虑钻石回退的问题下一次执行的时候如果redis还存在房间的信息则直接从redis删除就可以了
if(_status==0) {
int gameId = Integer.parseInt(paramList.get(4));
boolean pay_AA = Integer.parseInt(paramList.get(0)) == 1;
if (!pay_AA) {
int payer = Integer.parseInt(paramList.get(1));
int pay = Integer.parseInt(paramList.get(2));
//Utility.payDiamo(EventType.REDIS_EVENT_BACK_PAY, payer, gameId, pay, _gid,0);
}
}
String strRoomID = "";
if(StringUtil.isNotEmpty(paramList.get(7))) {
strRoomID =paramList.get(7);
}
GroupPublisherService.delRoomEvt(_gid, strRoomID);
}
jedis0.hset(tag_key, "status", "2");
jedis0.hset(tag_key, "delete_status", "" + status);
jedis0.hincrBy(tag_key, "cache_ver", 1);
logger.info("删除房间" + roomid + "成功");
}
catch(Exception e){
}
finally {
if(room_lock != null) {
room_lock.unlock(false);
}
}
return true;
}
/**
*
*/
private void cleanInvalidRoom() {
try {
Jedis jedis0 = Redis.use("group1_db0").getJedis();
try {
String cursor = ScanParams.SCAN_POINTER_START;
String key = "room:*";
ScanParams scanParams = new ScanParams();
scanParams.match(key);
scanParams.count(1000);
// 处理所有redis的房间
while (true){
ScanResult<String> scanResult = jedis0.scan(cursor, scanParams);
cursor = scanResult.getStringCursor();
List<String> list = scanResult.getResult();
for(int m = 0; m < list.size(); m++){
String mapentry = list.get(m);
logger.info("正在检查房间 " + mapentry);
deleteRoomFromRedis(mapentry,jedis0);
}
if ("0".equals(cursor)){
break;
}
}
}
finally {
jedis0.close();
}
}
catch(Exception e) {
}
}
/**
*
*/
private void cleanGroupRoom() {
try {
Jedis jedis11 = Redis.use("group1_db11").getJedis();
Jedis jedis0 = Redis.use("group1_db0").getJedis();
RedisLock lock = new RedisLock("room_clean", jedis11);
try {
String cursor = ScanParams.SCAN_POINTER_START;
String key = "*:rooms*";
ScanParams scanParams = new ScanParams();
scanParams.match(key);
scanParams.count(1000);
// 处理亲友圈的房间
while (true){
ScanResult<String> scanResult = jedis11.scan(cursor, scanParams);
cursor = scanResult.getStringCursor();
List<String> list = scanResult.getResult();
for(int m = 0; m < list.size(); m++){
String mapentry = list.get(m);
logger.info("正在查询群" +mapentry + "的无效房间");
Set<String> rooms = jedis11.zrangeByScore(mapentry, 100000, 1000000);
for (String roomId : rooms) {
logger.info("正在查询群" +mapentry + "的无效房间" + roomId);
boolean existed = deleteRoomFromServer(roomId,jedis0,true);
if(!existed) {
jedis11.zrem(mapentry, roomId);
logger.info("删除群" +mapentry + "的无效房间" + roomId + "因为房间不存在");
}
}
}
if ("0".equals(cursor)){
break;
}
}
}
finally {
lock.unlock();
jedis0.close();
}
}
catch(Exception e) {
}
}
/**
*
*/
private void cleanGroupMemberLog() {
int time = DateUtils.getBeginDay() - 5 * 24 * 3600;
String deleteSql = String.format("delete from group_member_log where time < %s", time);
Utility.evtdbLog(1, 1, deleteSql);
String deleteSql2 = String.format("delete from room_rec_log where time < %s and time > %s", time, time - 24 * 3600);
Utility.evtdbLog(1, 1, deleteSql2);
String deleteSql3 = String.format("delete from group_hp_log where time < %s and time > %s", time, time - 24 * 3600);
Utility.evtdbLog(1, 1, deleteSql3);
}
@Override
public void execute(JobExecutionContext context) {
JobKey jobKey = context.getJobDetail().getKey();
if(jobKey.getName().equals("clean_group_room") || jobKey.getName().equals("clean_group_room1")) {
logger.info("执行成功" + jobKey.getName());
cleanGroupRoom();
logger.info("执行成功cleanGroupMemberLog");
cleanGroupMemberLog();
}
else if(jobKey.getName().equals("clean_invalid_room")|| jobKey.getName().equals("clean_invalid_room1")) {
logger.info("执行成功" + jobKey.getName());
cleanInvalidRoom();
}
}
}

View File

@ -0,0 +1,19 @@
<!DOCTYPE web-app PUBLIC
"-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
"http://java.sun.com/dtd/web-app_2_3.dtd" >
<web-app>
<filter>
<filter-name>taurus-web</filter-name>
<filter-class>com.taurus.web.WebFilter</filter-class>
<init-param>
<param-name>main</param-name>
<param-value>com.evt.mgr.EventServer</param-value>
</init-param>
</filter>
<filter-mapping>
<filter-name>taurus-web</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,98 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>10</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://8.134.123.86:8060/wb_game</jdbcUrl>
<userName>root</userName>
<password>cssq@2020</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>20</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>5</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="cssq@2020" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="cssq@2020" port="6379" database="1" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="cssq@2020" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="cssq@2020" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="cssq@2020" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="cssq@2020" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="cssq@2020" port="6379" database="11" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,12 @@
import com.taurus.web.JettyServer;
public class Main {
public static void main(String[] args) {
new JettyServer("src/main/webapp",8083,"/").start();
}
}

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<mgr-config>
<host>124.220.176.78:4013</host>
<mgrId>1000</mgrId>
<loggerDebug>true</loggerDebug>
</mgr-config>

View File

@ -0,0 +1,98 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>10</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://192.168.0.11:6060/wb_game</jdbcUrl>
<userName>proto_ff</userName>
<password>37du_game</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>20</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>5</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="123456" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="123456" port="6379" database="1" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="123456" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="123456" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="123456" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="123456" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="123456" port="6379" database="11" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,75 @@
<server>
<!-- 计时器线程池大小-->
<timerThreadPoolSize>1</timerThreadPoolSize>
<!-- 协议包压缩门槛 (单位字节),当协议包大于设定的值则会进行压缩 -->
<protocolCompression>128</protocolCompression>
<!-- 读缓冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<readBufferType>Heap</readBufferType>
<!-- 写入冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<writeBufferType>Heap</writeBufferType>
<!-- 最大的数据包大小 -->
<maxPacketSize>524288</maxPacketSize>
<!-- 最大读取缓存大小 -->
<maxReadBufferSize>1024</maxReadBufferSize>
<!-- 最大写入缓存大小 -->
<maxWriteBufferSize>32768</maxWriteBufferSize>
<!-- 会话队列的大小-->
<sessionPacketQueueSize>160</sessionPacketQueueSize>
<!-- Thread Pool Size of the 3 main stages of the Bitswarm Engine -->
<socketAcceptorThreadPoolSize>1</socketAcceptorThreadPoolSize>
<socketReaderThreadPoolSize>3</socketReaderThreadPoolSize>
<socketWriterThreadPoolSize>3</socketWriterThreadPoolSize>
<!-- Enable disable Nagle algorithm on sockets, true == disable -->
<tcpNoDelay>true</tcpNoDelay>
<!-- 会话超时时间(单位秒)-->
<sessionTimeout>15</sessionTimeout>
<!-- Bind socket addresses -->
<socketAddresses>
<socket address="0.0.0.0" port="11050" type="TCP" />
</socketAddresses>
<!-- Ip addresses filter-->
<ipFilter>
<addressBlackList>
<string>1.2.3.4</string>
</addressBlackList>
<addressWhiteList>
<string>127.0.0.1</string>
</addressWhiteList>
<maxConnectionsPerAddress>10000</maxConnectionsPerAddress>
</ipFilter>
<webSocket>
<isActive>false</isActive>
<address>0.0.0.0</address>
<port>8080</port>
</webSocket>
<!-- Main extension class -->
<extensionConfig>
<name>extension - group_mgr </name>
<className>com.mgr.group.MainServer</className>
</extensionConfig>
<!-- The system thread pool config -->
<systemThreadPoolConfig>
<name>Sys</name>
<corePoolSize>2</corePoolSize>
<maxPoolSize>8</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</systemThreadPoolConfig>
<!-- The extension thread pool config -->
<extensionThreadPoolConfig>
<name>Ext</name>
<corePoolSize>2</corePoolSize>
<maxPoolSize>8</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</extensionThreadPoolConfig>
</server>

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<mgr-config>
<host>124.220.176.78:4013</host>
<mgrId>1000</mgrId>
<loggerDebug>true</loggerDebug>
</mgr-config>

View File

@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>160</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>5</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://192.168.0.11:6060/wb_game</jdbcUrl>
<userName>proto_ff</userName>
<password>37du_game</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>160</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>16</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>10</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="123456" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="123456" port="6379" database="1" timeout="5000"/>
<info name="group1_db2" host="127.0.0.1" password="123456" port="6379" database="2" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="123456" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="123456" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="123456" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="123456" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="123456" port="6379" database="11" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,75 @@
<server>
<!-- 计时器线程池大小-->
<timerThreadPoolSize>1</timerThreadPoolSize>
<!-- 协议包压缩门槛 (单位字节),当协议包大于设定的值则会进行压缩 -->
<protocolCompression>128</protocolCompression>
<!-- 读缓冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<readBufferType>Heap</readBufferType>
<!-- 写入冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<writeBufferType>Heap</writeBufferType>
<!-- 最大的数据包大小 -->
<maxPacketSize>524288</maxPacketSize>
<!-- 最大读取缓存大小 -->
<maxReadBufferSize>1024</maxReadBufferSize>
<!-- 最大写入缓存大小 -->
<maxWriteBufferSize>32768</maxWriteBufferSize>
<!-- 会话队列的大小-->
<sessionPacketQueueSize>160</sessionPacketQueueSize>
<!-- Thread Pool Size of the 3 main stages of the Bitswarm Engine -->
<socketAcceptorThreadPoolSize>1</socketAcceptorThreadPoolSize>
<socketReaderThreadPoolSize>3</socketReaderThreadPoolSize>
<socketWriterThreadPoolSize>20</socketWriterThreadPoolSize>
<!-- Enable disable Nagle algorithm on sockets, true == disable -->
<tcpNoDelay>true</tcpNoDelay>
<!-- 会话超时时间(单位秒)-->
<sessionTimeout>15</sessionTimeout>
<!-- Bind socket addresses -->
<socketAddresses>
<socket address="0.0.0.0" port="11050" type="TCP" />
</socketAddresses>
<!-- Ip addresses filter-->
<ipFilter>
<addressBlackList>
<string>1.2.3.4</string>
</addressBlackList>
<addressWhiteList>
<string>127.0.0.1</string>
</addressWhiteList>
<maxConnectionsPerAddress>10000</maxConnectionsPerAddress>
</ipFilter>
<webSocket>
<isActive>false</isActive>
<address>0.0.0.0</address>
<port>8080</port>
</webSocket>
<!-- Main extension class -->
<extensionConfig>
<name>extension - group_mgr </name>
<className>com.mgr.group.MainServer</className>
</extensionConfig>
<!-- The system thread pool config -->
<systemThreadPoolConfig>
<name>Sys</name>
<corePoolSize>4</corePoolSize>
<maxPoolSize>16</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</systemThreadPoolConfig>
<!-- The extension thread pool config -->
<extensionThreadPoolConfig>
<name>Ext</name>
<corePoolSize>4</corePoolSize>
<maxPoolSize>16</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</extensionThreadPoolConfig>
</server>

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<mgr-config>
<host>39.101.179.66:11050</host>
<mgrId>1000</mgrId>
<loggerDebug>true</loggerDebug>
</mgr-config>

View File

@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>1</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://192.168.0.11:6060/wb_game</jdbcUrl>
<userName>proto_ff</userName>
<password>37du_game</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>8</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>2</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="123456" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="123456" port="6379" database="1" timeout="5000"/>
<info name="group1_db2" host="127.0.0.1" password="123456" port="6379" database="2" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="123456" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="123456" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="123456" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="123456" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="123456" port="6379" database="11" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,75 @@
<server>
<!-- 计时器线程池大小-->
<timerThreadPoolSize>1</timerThreadPoolSize>
<!-- 协议包压缩门槛 (单位字节),当协议包大于设定的值则会进行压缩 -->
<protocolCompression>128</protocolCompression>
<!-- 读缓冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<readBufferType>Heap</readBufferType>
<!-- 写入冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<writeBufferType>Heap</writeBufferType>
<!-- 最大的数据包大小 -->
<maxPacketSize>524288</maxPacketSize>
<!-- 最大读取缓存大小 -->
<maxReadBufferSize>1024</maxReadBufferSize>
<!-- 最大写入缓存大小 -->
<maxWriteBufferSize>32768</maxWriteBufferSize>
<!-- 会话队列的大小-->
<sessionPacketQueueSize>160</sessionPacketQueueSize>
<!-- Thread Pool Size of the 3 main stages of the Bitswarm Engine -->
<socketAcceptorThreadPoolSize>1</socketAcceptorThreadPoolSize>
<socketReaderThreadPoolSize>3</socketReaderThreadPoolSize>
<socketWriterThreadPoolSize>3</socketWriterThreadPoolSize>
<!-- Enable disable Nagle algorithm on sockets, true == disable -->
<tcpNoDelay>true</tcpNoDelay>
<!-- 会话超时时间(单位秒)-->
<sessionTimeout>15</sessionTimeout>
<!-- Bind socket addresses -->
<socketAddresses>
<socket address="0.0.0.0" port="11050" type="TCP" />
</socketAddresses>
<!-- Ip addresses filter-->
<ipFilter>
<addressBlackList>
<string>1.2.3.4</string>
</addressBlackList>
<addressWhiteList>
<string>127.0.0.1</string>
</addressWhiteList>
<maxConnectionsPerAddress>10000</maxConnectionsPerAddress>
</ipFilter>
<webSocket>
<isActive>false</isActive>
<address>0.0.0.0</address>
<port>8080</port>
</webSocket>
<!-- Main extension class -->
<extensionConfig>
<name>extension - group_mgr </name>
<className>com.mgr.group.MainServer</className>
</extensionConfig>
<!-- The system thread pool config -->
<systemThreadPoolConfig>
<name>Sys</name>
<corePoolSize>2</corePoolSize>
<maxPoolSize>8</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</systemThreadPoolConfig>
<!-- The extension thread pool config -->
<extensionThreadPoolConfig>
<name>Ext</name>
<corePoolSize>2</corePoolSize>
<maxPoolSize>8</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</extensionThreadPoolConfig>
</server>

View File

@ -0,0 +1,58 @@
<?xml version="1.0" encoding="UTF-8"?>
<module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
<component name="FacetManager">
<facet type="web" name="Web">
<configuration>
<descriptors>
<deploymentDescriptor name="web.xml" url="file://$MODULE_DIR$/src/main/webapp/WEB-INF/web.xml" />
</descriptors>
<webroots>
<root url="file://$MODULE_DIR$/build/pro" relative="config/" />
<root url="file://$MODULE_DIR$/src/main/webapp" relative="/" />
</webroots>
<sourceRoots>
<root url="file://$MODULE_DIR$/src/main/java" />
</sourceRoots>
</configuration>
</facet>
</component>
<component name="NewModuleRootManager" LANGUAGE_LEVEL="JDK_1_8">
<output url="file://$MODULE_DIR$/target/classes" />
<output-test url="file://$MODULE_DIR$/target/test-classes" />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/target" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="Maven: junit:junit:3.8.1" level="project" />
<orderEntry type="module" module-name="data_cache" />
<orderEntry type="module" module-name="taurus-core" />
<orderEntry type="module" module-name="taurus-web" />
<orderEntry type="module" module-name="taurus-permanent" />
<orderEntry type="library" name="Maven: io.undertow:undertow-core:2.0.16.Final" level="project" />
<orderEntry type="library" name="Maven: org.jboss.logging:jboss-logging:3.3.2.Final" level="project" />
<orderEntry type="library" name="Maven: org.jboss.xnio:xnio-api:3.3.8.Final" level="project" />
<orderEntry type="library" scope="RUNTIME" name="Maven: org.jboss.xnio:xnio-nio:3.3.8.Final" level="project" />
<orderEntry type="library" name="Maven: redis.clients:jedis:2.9.0" level="project" />
<orderEntry type="library" name="Maven: org.apache.commons:commons-pool2:2.4.2" level="project" />
<orderEntry type="library" name="Maven: com.zaxxer:HikariCP:3.3.1" level="project" />
<orderEntry type="library" name="Maven: org.slf4j:slf4j-api:1.7.25" level="project" />
<orderEntry type="library" name="Maven: mysql:mysql-connector-java:5.1.48" level="project" />
<orderEntry type="library" name="Maven: jdom:jdom:1.0" level="project" />
<orderEntry type="library" name="Maven: log4j:log4j:1.2.17" level="project" />
<orderEntry type="library" name="Maven: org.quartz-scheduler:quartz:2.2.3" level="project" />
<orderEntry type="library" name="Maven: c3p0:c3p0:0.9.1.1" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-webapp:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-xml:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-util:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-servlet:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-security:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-server:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty.orbit:javax.servlet:3.0.0.v201112011016" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-continuation:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-http:8.2.0.v20160908" level="project" />
<orderEntry type="library" scope="PROVIDED" name="Maven: org.eclipse.jetty:jetty-io:8.2.0.v20160908" level="project" />
</component>
</module>

View File

@ -0,0 +1,126 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.mgr.group</groupId>
<artifactId>group_mgr</artifactId>
<packaging>war</packaging>
<version>1.0.0</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<build.type>pro</build.type>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<!--依赖 data_cache -->
<dependency>
<groupId>com.data</groupId>
<artifactId>data_cache</artifactId>
<version>1.0.1</version>
</dependency>
<!--依赖 taurus-core -->
<dependency>
<groupId>com.taurus</groupId>
<artifactId>taurus-core</artifactId>
<version>1.0.1</version>
</dependency>
<!--依赖 taurus-web -->
<dependency>
<groupId>com.taurus</groupId>
<artifactId>taurus-web</artifactId>
<version>1.0.1</version>
</dependency>
<!--依赖 taurus-permanent -->
<dependency>
<groupId>com.taurus</groupId>
<artifactId>taurus-permanent</artifactId>
<version>1.0.1</version>
</dependency>
<!-- 需要用redis时导入 -->
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
<!-- 需要用HikariCP时导入 ,自己在项目中添加 -->
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.3.1</version>
</dependency>
<!-- 需要用mysql时导入 ,自己在项目中添加 -->
<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.48</version>
</dependency>
<!-- https://mvnrepository.com/artifact/jdom/jdom -->
<dependency>
<groupId>jdom</groupId>
<artifactId>jdom</artifactId>
<version>1.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>2.2.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-webapp -->
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-webapp</artifactId>
<version>8.2.0.v20160908</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<finalName>ROOT</finalName>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-war-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<warSourceExcludes>logs/**,config/**</warSourceExcludes>
<webResources>
<resource>
<targetPath>config/</targetPath>
<directory>${project.basedir}/build/${build.type}/</directory>
</resource>
</webResources>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,9 @@
package com.mgr.group;
public class Config {
public int mgrId;
public String host;
public String webHost;
public int webPort;
public boolean loggerDebug;
}

View File

@ -0,0 +1,28 @@
package com.mgr.group;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.mgr.group.data.Group;
import com.taurus.core.util.Logger;
public class Global {
/**
* debug
*/
public static boolean loggerDebug = false;
// 日志
public static Logger logger;
public static SessionManager sessionMgr;
public static GroupController groupCtr;
public static ConcurrentMap<Integer, Group> groupMap;
public static void init() {
groupMap = new ConcurrentHashMap<>();
sessionMgr = new SessionManager();
groupCtr = new GroupController();
}
}

View File

@ -0,0 +1,306 @@
package com.mgr.group;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import com.data.bean.AccountBean;
import com.data.bean.GroupMemberBean;
import com.data.bean.GroupPlayBean;
import com.data.cache.AccountCache;
import com.data.cache.GroupCache;
import com.data.cache.GroupMemberCache;
import com.data.util.ErrorCode;
import com.mgr.group.data.CommandData;
import com.mgr.group.data.Group;
import com.mgr.group.data.Room;
import com.mgr.group.data.User;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.routes.ActionKey;
import com.taurus.core.routes.IController;
import com.taurus.core.util.StringUtil;
import com.taurus.core.util.Utils;
import com.taurus.permanent.TPServer;
import com.taurus.permanent.data.Session;
import com.taurus.web.WebException;
import redis.clients.jedis.Jedis;
/**
*
*
*/
public class GroupController implements IController{
/**
*
*/
@ActionKey(Router.FGMGR_ENTER_GROUP)
public void RouterJoinGroup(Session sender, ITObject params, int gid) {
String session_id = params.getUtfString("session");
String token = null;
if (StringUtil.isEmpty(session_id)) {
Global.logger.info("---------------参数session为null参数params{}",params);
session_id="";
}
else {
String[] sourceStrArray = session_id.split(",");
if (sourceStrArray.length == 2)
{
session_id = sourceStrArray[0];
token = sourceStrArray[1];
}
if (StringUtil.isNotEmpty(token) && StringUtil.isNotEmpty(session_id))
{
String token_session = Redis.use("group1_db0").hget(token, "user");
if (StringUtil.isEmpty(token_session))
{
TPServer.me().getController().sendResponse(gid, ErrorCode._NO_SESSION,null,sender);
return;
}
else {
if (!token_session.equals(session_id))
{
TPServer.me().getController().sendResponse(gid, ErrorCode._NO_SESSION,null,sender);
return;
}
}
}
else {
TPServer.me().getController().sendResponse(gid, ErrorCode._NO_SESSION,null,sender);
return;
}
}
sender.setHashId(session_id);
int groupId = params.getInt("groupId");
Global.groupCtr.joinGroup(sender, groupId, gid);
}
private static AtomicInteger invi_id = new AtomicInteger(0);
public void joinGroup(Session sender,int groupId, int gid) {
Group group = null;
synchronized (Global.groupMap) {
if(Global.groupMap.containsKey(groupId)) {
group = Global.groupMap.get(groupId);
}else {
group = new Group(groupId);
Global.groupMap.put(groupId, group);
}
group.lastTime = System.currentTimeMillis();
}
group.start();
if(group.isDestroy) {
TPServer.me().getController().sendResponse(gid, ErrorCode._FAILED,null,sender);
return;
}
group.enqueueRunnable(new Runnable() {
@Override
public void run() {
Group group = Global.groupMap.get(groupId);
User user = null;
AccountBean acc = null;
if(sender!=null && StringUtil.isNotEmpty(sender.getHashId())){
acc = AccountCache.getAccount(sender.getHashId());
}
else {
if(sender!=null)
{
if (StringUtil.isNotEmpty(sender.getHashId()))
{
Global.logger.info("---------------session.id:" + sender.getId() + " hashId null");
}
}
else {
Global.logger.info("---------------session为null");
}
}
if(acc==null) {
TPServer.me().getController().sendResponse(gid, ErrorCode._NO_SESSION, null, sender);
return;
}
int uid = acc.id;
if(!group.userMap.containsKey(uid)) {
user = new User();
user.uid = uid;
user.session_key = AccountCache.genKey(uid);
user.group = group;
user.setSender(sender);
group.userMap.put(uid, user);
}else {
user = group.userMap.get(uid);
user.setSender(sender);
}
if(user.gm_key == null) {
user.gm_key = GroupMemberCache.genKey(groupId, uid);
}
String gm_key = user.gm_key;
sender.setHashId(gm_key);
GroupMemberBean gmb = GroupCache.getMember(groupId, uid);
if(gmb==null) {
user.response(null, gid, ErrorCode._FAILED);
return;
}
int partnerLev =gmb.partnerLev;
int lev = gmb.lev;
long hp =0;
int permission = gmb.permission;
int ban = gmb.ban;
int mail_tip = 0;
int queueid = 0;
int seeid = gmb.seeid;
Jedis jedis10 = Redis.use("group1_db10").getJedis();
try {
hp = Long.parseLong(jedis10.hget(gm_key, "hp"));
queueid = Integer.parseInt(jedis10.hget(gm_key,"queueid")==null?"0":jedis10.hget(gm_key,"queueid"));
seeid = Integer.parseInt(jedis10.hget(gm_key,"seeid")==null?"0":jedis10.hget(gm_key,"seeid"));
long last_time = System.currentTimeMillis() / 1000;
jedis10.hset(gm_key, "last_time", last_time+"");
jedis10.hset(gm_key, "on_line",1 + "");
String mail_tip_key = GroupCache.genMailTipKey(groupId);
Double tem = jedis10.zscore(mail_tip_key, uid+"");
if(tem!=null && tem > 0) {
mail_tip = 1;
}
}finally {
jedis10.close();
}
Global.logger.info("jefeprint:" + uid + "");
String fp = Redis.use("group1_db1").hget("alllook",uid+"");
Global.logger.info("jefeprint fp:" + fp + "");
user.partnerLev = partnerLev;
if (fp!=null){
user.lev = 1;
}else{
user.lev = lev;
}
user.hp = hp;
//user.queueid = queueid;
user.ban = ban;
ITObject info = group.getInfo(lev,uid);
if (fp!=null){
info.putInt("lev", 1);
}else{
info.putInt("lev", lev);
}
//info.putInt("lev", lev);
info.putInt("partnerLev", partnerLev);
info.putInt("permission", permission);
info.putLong("hp", hp);
info.putInt("mail_tip", mail_tip);
info.putInt("queueid",queueid);
info.putInt("seeid",seeid);
user.response(info, gid, 0);
//Global.logger.info("joinGroup / playlist --> info: "+info);
}
});
}
public void addRoom(Group group,String roomid) {
Room room = group.addRoom(roomid);
if(room==null)return;
ITObject param = room.data;
group.addRoomCommand(roomid, CommandData.ADD_ROOM, param);
}
public void delRoom(Group group, String roomid) {
group.delRoom(roomid);
ITObject param = TObject.newInstance();
param.putUtfString("roomid", roomid);
group.addRoomCommand(roomid, CommandData.DEL_ROOM, param);
}
public void updateRoom(Group group, String roomid) {
Room room = group.updateRoom(roomid);
if (room == null)
return;
if (room.priorityValue == 0) {
ITObject param = TObject.newInstance();
param.putUtfString("roomid", roomid);
group.addRoomCommand(roomid, CommandData.DEL_ROOM, param);
} else {
ITObject param = room.data;
group.addRoomCommand(roomid, CommandData.UPDATE_ROOM, param);
}
}
public void delPlay(Group group,int pid) {
group.delPlay(pid);
ITObject param = TObject.newInstance();
param.putInt("pid", pid);
group.broadCastToClient(Router.FGMGR_EVT_DEL_PLAY, param);
}
public void addPlay(Group group,int pid) {
GroupPlayBean gp = group.addPlay(pid);
if(gp!=null) {
//
group.broadCastToClient(Router.FGMGR_EVT_ADD_PLAY, gp.data);
Global.logger.info("addPlay: -------------> "+gp.data);
}
}
public void updatePlay(Group group,int pid) {
GroupPlayBean gp = group.addPlay(pid);
if(gp!=null) {
group.broadCastToClient(Router.FGMGR_EVT_UPDATE_PLAY, gp.data);
Global.logger.info("updatePlay: -------------> "+gp.data);
}
}
public void updateGroup(Group group,String name,int ban,String notice,int option,int showNum) {
ITObject param = TObject.newInstance();
param.putUtfString("name", name);
param.putInt("ban", ban);
param.putUtfString("notice", notice);
param.putInt("option", option);
param.putInt("show_num", showNum);
group.broadCastToClient(Router.FGMGR_EVT_UPDATE_GROUP, param);
}
public void updateJoins(Group group,int joins) {
if(joins<=0)return;
List<Session> list = group.getSessionListByMgr();
if(list.size()==0)return;
ITObject param = TObject.newInstance();
param.putInt("joins", joins);
TPServer.me().getController().sendEvent(Router.FGMGR_EVT_UPDATE_JOINS, param, list);
}
public void updateMember(Group group,int uid,int type,int value) {
// User user = group.userMap.get(uid);
// if(user!=null) {
// ITObject param = TObject.newInstance();
// param.putInt("type", type);
// param.putInt("value", value);
// MainServer.instance.sendEvent(Router.FGMGR_EVT_UPDATE_MEMBER, param, user.sender);
// }
}
public void updateMailTip(Group group,int uid) {
User user = group.userMap.get(uid);
if(user!=null) {
ITObject param = TObject.newInstance();
TPServer.me().getController().sendEvent(Router.FGMGR_EVT_UPDATE_MAILTIP, param, user.sender);
}
}
}

View File

@ -0,0 +1,161 @@
package com.mgr.group;
import com.mgr.group.data.Group;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TObject;
import com.taurus.core.util.Logger;
import redis.clients.jedis.JedisPubSub;
public class GroupSubscriber extends JedisPubSub {
public static final String CHANNEL_NAME = "mgr_group";
private static final String CMD_UPDATE_ROOM="update_room";
private static final String CMD_DEL_ROOM="del_room";
private static final String CMD_ADD_ROOM="add_room";
private static final String CMD_DEL_PLAY="del_play";
private static final String CMD_ADD_PLAY="add_play";
private static final String CMD_UPDATE_PLAY="update_play";
private static final String CMD_DEL_GROUP="del_group";
private static final String CMD_UPDATE_GROUP="update_group";
private static final String CMD_UPDATE_JOINS="update_joins";
private static final String CMD_UPDATE_MEMBER="update_member";
private static final String CMD_UPDATE_MAIL_TIP = "update_mail_tip";
private Logger log;
public GroupSubscriber() {
log = Logger.getLogger(this.getClass());
}
private void updateRoomEvt(Group group,ITObject param) {
String roomid = param.getUtfString("roomid");
Global.groupCtr.updateRoom(group,roomid);
}
private void addRoomEvt(Group group,ITObject param) {
String roomid = param.getUtfString("roomid");
Global.groupCtr.addRoom(group, roomid);
}
private void delRoomEvt(Group group,ITObject param) {
String roomid = param.getUtfString("roomid");
Global.groupCtr.delRoom(group, roomid);
}
private void addPlay(Group group,ITObject param) {
int pid = param.getInt("pid");
Global.groupCtr.addPlay(group, pid);
}
private void updatePlay(Group group,ITObject param) {
int pid = param.getInt("pid");
Global.groupCtr.updatePlay(group, pid);
}
private void delPlay(Group group,ITObject param) {
int pid = param.getInt("pid");
Global.groupCtr.delPlay(group, pid);
}
private void updateGroup(Group group,ITObject param) {
String name = param.getUtfString("name");
int ban = param.getBoolean("ban")?1:0;
String notice = param.getUtfString("notice");
int option = param.getInt("option");
int showNum = param.getInt("show_num");
Global.groupCtr.updateGroup(group, name, ban,notice,option,showNum);
}
private void updateJoins(Group group,ITObject param) {
int joins = param.getInt("joins");
Global.groupCtr.updateJoins(group, joins);
}
private void updateMailTip(Group group,ITObject param) {
int uid = param.getInt("uid");
Global.groupCtr.updateMailTip(group, uid);
}
// private void updateMember(Group group,ITObject param) {
// int uid = param.getInt("uid");
// int type = param.getInt("type");
// int value = param.getInt("value");
// Global.groupCtr.updateMember(group, uid, type, value);
// }
public void onMessage(String channel, String message) {
if(channel.equals(CHANNEL_NAME)) {
if(Global.loggerDebug) {
log.info(message);
}
try {
ITObject data = TObject.newFromJsonData(message);
final int groupId = data.getInt("gid");
Group group = Global.groupMap.get(groupId);
if(group!=null) {
group.enqueueRunnable(new Runnable() {
@Override
public void run() {
String cmd = data.getUtfString("cmd");
Group group = Global.groupMap.get(groupId);
switch(cmd) {
case CMD_UPDATE_ROOM:
updateRoomEvt(group,data);
break;
case CMD_DEL_ROOM:
delRoomEvt(group,data);
break;
case CMD_ADD_ROOM:
addRoomEvt(group,data);
break;
case CMD_DEL_GROUP:
group.destroy();
break;
case CMD_UPDATE_GROUP:
updateGroup(group,data);
break;
case CMD_ADD_PLAY:
addPlay(group,data);
break;
case CMD_UPDATE_PLAY:
updatePlay(group, data);
break;
case CMD_DEL_PLAY:
delPlay(group,data);
break;
case CMD_UPDATE_JOINS:
updateJoins(group,data);
break;
case CMD_UPDATE_MEMBER:
// updateMember(group,data);
break;
case CMD_UPDATE_MAIL_TIP:
updateMailTip(group,data);
break;
}
}
});
}
}catch (Exception e) {
log.error(e);
}
}
}
public void onSubscribe(String channel, int subscribedChannels) {
System.out.println(String.format("subscribe redis channel success, channel %s, subscribedChannels %d",
channel, subscribedChannels));
}
public void onUnsubscribe(String channel, int subscribedChannels) {
System.out.println(String.format("unsubscribe redis channel, channel %s, subscribedChannels %d",
channel, subscribedChannels));
}
}

View File

@ -0,0 +1,161 @@
package com.mgr.group;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.input.SAXBuilder;
import com.mgr.group.data.Group;
import com.taurus.core.events.Event;
import com.taurus.core.events.IEventListener;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.routes.Extension;
import com.taurus.core.routes.Routes;
import com.taurus.core.util.Logger;
import com.taurus.permanent.TPServer;
import com.taurus.permanent.core.TPEvents;
import com.taurus.permanent.data.Session;
import com.taurus.web.TWebServer;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.exceptions.JedisConnectionException;
/**
*
*
*/
public class MainServer extends Extension implements IEventListener {
public static MainServer instance;
private GroupSubscriber subscriber;
public Config config;
@Override
public void onStart() {
try {
instance = this;
Global.logger = Logger.getLogger(MainServer.class);
Global.init();
try {
loadConfig();
} catch (Exception e1) {
Global.logger.error(e1);
}
final String svr_key = "svr_mgr_" + config.mgrId;
Global.loggerDebug = config.loggerDebug;
TPServer.me().getEventManager().addEventListener(TPEvents.EVENT_SESSION_DISCONNECT, this);
TPServer.me().getTimerPool().scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
int size = Global.sessionMgr.size();
final Map<String, String> svr_info = new HashMap<>();
svr_info.put("host", config.host);
svr_info.put("conns", size + "");
Jedis jedis11 = Redis.use("group1_db11").getJedis();
try {
jedis11.hmset(svr_key, svr_info);
jedis11.expire(svr_key, 15);
jedis11.zadd(GroupSubscriber.CHANNEL_NAME, size, svr_key);
}finally {
jedis11.close();
}
} catch (Exception e) {
Global.logger.error(e);
}
}
}, 0, 5, TimeUnit.SECONDS);
TPServer.me().getTimerPool().scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
List<Group> list = new ArrayList<Group>(Global.groupMap.values());
for (Group group : list) {
if ((System.currentTimeMillis() - group.lastTime) >= 360000000) {
group.enqueueRunnable(new Runnable() {
@Override
public void run() {
group.destroy();
}
});
}
}
}
}, 10, 10, TimeUnit.SECONDS);
subscriber = new GroupSubscriber();
Thread subscribeThread = new Thread(new Runnable() {
@Override
public void run() {
try {
Redis.use("group1_db11").subscribe(subscriber, GroupSubscriber.CHANNEL_NAME);
} catch (JedisConnectionException e) {
Redis.use("group1_db11").subscribe(subscriber, GroupSubscriber.CHANNEL_NAME);
}
}
});
subscribeThread.start();
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
Redis.use("group1_db11").unsubscribe(subscriber);
Redis.use("group1_db11").zrem(GroupSubscriber.CHANNEL_NAME, svr_key);
}
}));
}catch (Exception e){
Global.logger.error(e);
}
}
protected void loadConfig() throws Exception {
FileInputStream is = new FileInputStream(TWebServer.me().getContextRealPath()+"/config/mgr-config.xml");
SAXBuilder builder = new SAXBuilder();
Document document = builder.build(is);
Element root = document.getRootElement();
Config config = new Config();
config.host = root.getChildTextTrim("host");
config.mgrId = Integer.parseInt(root.getChildTextTrim("mgrId"));
config.loggerDebug = Boolean.parseBoolean(root.getChildTextTrim("loggerDebug"));
this.config = config;
}
public void onStop() {
super.onStop();
}
@Override
public void handleEvent(Event evt) {
String evtName = evt.getName();
switch (evtName) {
case TPEvents.EVENT_SESSION_DISCONNECT:
Session session = (Session) evt.getParameter(TPEvents.PARAM_SESSION);
Global.sessionMgr.disconnect(session);
break;
}
}
@Override
public void configRoute(Routes me) {
Global.groupCtr = new GroupController();
me.add("", Global.groupCtr);
}
}

View File

@ -0,0 +1,87 @@
package com.mgr.group;
import com.taurus.core.entity.ITObject;
import com.taurus.permanent.data.Session;
/**
*
*
*/
public abstract class Router {
/**
*
*/
public static final String FGMGR_ENTER_GROUP = "11001";
/**
* 线
*/
public static final String FGMGR_GET_ONLINE_LIST = "11002";
/**
*
*/
public static final String FGMGR_INVITATION = "11003";
/**
*
*/
public static final String FGMGR_INVITATION_RESPONSE = "11004";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_ROOM = "12001";
// /**
// * 删除房间
// */
// public static final String FGMGR_EVT_DEL_ROOM = "12002";
//
// /**
// * 添加房间
// */
// public static final String FGMGR_EVT_ADD_ROOM = "12003";
/**
*
*/
public static final String FGMGR_EVT_DEL_PLAY = "12004";
/**
*
*/
public static final String FGMGR_EVT_ADD_PLAY = "12005";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_PLAY = "12006";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_JOINS = "12007";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_GROUP = "12008";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_MEMBER = "12009";
/**
*
*/
public static final String FGMGR_EVT_INVITATION = "12010";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_NET = "update_net";
/**
*
*/
public static final String FGMGR_EVT_UPDATE_MAILTIP = "update_mail_tip";
public void handel(Session sender, ITObject params, int gid){
}
}

View File

@ -0,0 +1,81 @@
package com.mgr.group;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.mgr.group.data.User;
import com.taurus.core.util.json.JSONUtils;
import com.taurus.permanent.data.Session;
/**
* session
*
*
*/
public class SessionManager {
private ConcurrentMap<Session, User> sessionMap = null;
public SessionManager() {
this.sessionMap = new ConcurrentHashMap<Session, User>();
}
/**
* sessionidUser
* @param sessionid
* @return
*/
public User getUser(Session sessionid) {
return this.sessionMap.get(sessionid);
}
/**
* session
* @param session
* @param player
*/
public void putUser(Session session, User user) {
this.sessionMap.put(session, user);
}
/**
* session
* @param session
*/
public User delSession(Session session) {
Global.logger.info("delSession----------------清理session:"+session.toString());
if(session == null)return null;
session.setHashId(null);
User user =this.sessionMap.remove(session);
return user;
}
/**
* 线
* @return
*/
public int size() {
return this.sessionMap.size();
}
/**
* 线
* @param sender
*/
public void disconnect(Session sender) {
User user = this.delSession(sender);
if (user == null) {
return;
}
user.group.enqueueRunnable(new Runnable() {
@Override
public void run() {
if (user.sender == sender) {
user.isConnect = false;
}
}
});
}
}

View File

@ -0,0 +1,34 @@
package com.mgr.group;
import com.taurus.core.routes.Extension;
import com.taurus.core.routes.Routes;
import com.taurus.permanent.TPServer;
import com.taurus.permanent.core.DefaultConstants;
import com.taurus.web.TWebServer;
public class WebMain extends Extension {
public void onStart() {
DefaultConstants.SERVER_CFG_FILE = TWebServer.me().getContextRealPath()+"/" + DefaultConstants.SERVER_CFG_FILE;
TPServer.me().start();
}
@Override
public void onStop() {
TPServer.me().shutdown();
}
@Override
public void configRoute(Routes me) {
// TODO Auto-generated method stub
}
public int getConcurrentSize() {
return Global.sessionMgr.size();
}
}

View File

@ -0,0 +1,36 @@
package com.mgr.group.data;
import com.taurus.core.entity.ITObject;
public class CommandData {
public static final int ADD_ROOM = 1;
public static final int UPDATE_ROOM = 2;
public static final int DEL_ROOM = 3;
private static final String TYPE_KEY = "$ct";
public ITObject param;
/**1添加 2更新 3删除*/
public int type = ADD_ROOM;
public void setData(int type,ITObject param) {
if(type>=this.type) {
this.type = type;
this.param = param;
this.param.putInt(TYPE_KEY, type);
}
}
// public void send(Group group,List<Session> list) {
// this.param.putInt(TYPE_KEY, type);
// if(type==1) {
// MainServer.instance.sendEvent(Router.FGMGR_EVT_UPDATE_ROOM, param, list);
// }else {
// MainServer.instance.sendEvent(Router.FGMGR_EVT_DEL_ROOM, param, list);
// }
// }
}

View File

@ -0,0 +1,362 @@
package com.mgr.group.data;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.data.bean.GroupBean;
import com.data.bean.GroupPlayBean;
import com.data.cache.AccountCache;
import com.data.cache.GroupCache;
import com.data.cache.GroupPlayCache;
import com.mgr.group.Global;
import com.mgr.group.Router;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.StringUtil;
import com.taurus.core.util.Utils;
import com.taurus.permanent.TPServer;
import com.taurus.permanent.data.Session;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
public class Group implements Runnable {
public int id;
public String group_key = "";
/**
*
*/
public int owner;
/**
* session
*/
public String owner_session;
/**
*
*/
public int owner_diamo;
/**
*
*/
public volatile boolean isActive = false;
/**
*
*/
public volatile boolean isDestroy = false;
private Thread updateThread;
public volatile long lastTime;
final Queue<Runnable> updateHandleList = new LinkedList<Runnable>();
public ConcurrentMap<Integer, User> userMap;
public GroupPlayCache playCache;
public RoomCache roomCache;
public Map<String,CommandData> cmdMap = new HashMap<>();
private long lastSendTime;
// private long lastUpdateOnline;
public Group(int id) {
this.userMap = new ConcurrentHashMap<>();
this.id = id;
this.group_key = GroupCache.genKey(id);
String pay_type = Redis.use("group1_db11").hget(group_key, "pay_type");
this.playCache = new GroupPlayCache(id, Integer.parseInt(pay_type));
this.roomCache = new RoomCache(id);
}
public synchronized void start() {
if (isActive || isDestroy)
return;
isActive = true;
GroupBean gb = GroupCache.getGroup(id);
this.owner = gb.owner;
this.owner_session = AccountCache.genKey(owner);
String _diamo = Redis.use("group1_db0").hget(owner_session, "diamo");
if(StringUtil.isNotEmpty(_diamo)) {
owner_diamo = Integer.parseInt(_diamo);
}
updateThread = new Thread(this, this.group_key);
updateThread.start();
lastTime = System.currentTimeMillis();
}
private void handleTask(Runnable tem) {
if (isDestroy) {
return;
}
try {
tem.run();
} catch (Throwable t) {
Global.logger.error("[" + id + "] exception!", t);
}
handleBroadcast();
}
public void handleBroadcast()
{
long curTime = System.currentTimeMillis();
if(curTime - lastSendTime >=2000) {
try {
if (cmdMap.size()>0) {
List<Session> list = this.getSessionList();
if(list.size()>0) {
ITArray arr = TArray.newInstance();
ITObject param = TObject.newInstance();
param.putTArray("cmds", arr);
for (Entry<String,CommandData> entry : cmdMap.entrySet()) {
CommandData cmd = entry.getValue();
arr.addTObject(cmd.param);
}
TPServer.me().getController().sendEvent(Router.FGMGR_EVT_UPDATE_ROOM, 0, param, list);
}
}
List<User> u_list = null;
synchronized (userMap) {
u_list = new ArrayList<User>(userMap.values());
}
List<Object> r_list = null;
Jedis jedis = Redis.use("group1_db10").getJedis();
try {
Pipeline pip = jedis.pipelined();
for (User user : u_list) {
pip.hget(user.gm_key, "hp");
}
r_list = pip.syncAndReturnAll();
}finally {
jedis.close();
}
for(int i=0;i<u_list.size();++i) {
User user = u_list.get(i);
Object obj = r_list.get(i);
if(StringUtil.isNotEmpty((String)obj)) {
if(!user.isConnect)continue;
ITObject param = TObject.newInstance();
param.putInt("type", 1);
try {
long cur_hp = Long.parseLong((String)obj);
if(cur_hp == user.hp) {
continue;
}
user.hp = cur_hp;
param.putLong("value",cur_hp);
}
catch(Exception e)
{
Global.logger.error("user:"+user.sender+" hp:"+obj);
param.putLong("value",user.hp);
}
TPServer.me().getController().sendEvent(Router.FGMGR_EVT_UPDATE_MEMBER, user.uid, param, user.sender);
}
}
}
catch(Exception e) {
Global.logger.error("[" + id + "] exception!", e);
}
cmdMap.clear();
lastSendTime = System.currentTimeMillis();
}
}
public void run() {
while (isActive && !isDestroy) {
try {
if (updateHandleList.size() > 0) {
this.lastTime = System.currentTimeMillis();
Runnable tem = null;
synchronized (updateHandleList) {
tem = updateHandleList.poll();
}
if (tem != null){
try {
handleTask(tem);
} catch (Throwable t) {
Global.logger.error("[" + id + "] exception!", t);
}
}
}
handleBroadcast();
if (isDestroy)
continue;
Thread.sleep(5);
} catch (InterruptedException e) {
isActive = false;
Global.logger.error("[" + id + "] thread interrupted!");
} catch (Throwable t) {
//isActive = false;
Global.logger.error("[" + id + "] exception!", t);
}
}
}
public void enqueueRunnable(Runnable runnable) {
if (runnable == null)
return;
synchronized (updateHandleList) {
updateHandleList.add(runnable);
}
}
public void addRoomCommand(String roomid,int type,ITObject param) {
CommandData cmd = cmdMap.get(roomid);
if(cmd==null) {
cmd = new CommandData();
cmdMap.put(roomid, cmd);
}
cmd.setData(type, param);
}
public ITObject getInfo(int lev,int uid) {
ITObject info = TObject.newInstance();
GroupBean gb = GroupCache.getGroup(this.group_key);
info.putBoolean("ban", gb.ban == 1 ? true : false);
String joins_key = GroupCache.genJoinsKey(id);
int joins = Redis.use("group1_db11").scard(joins_key).intValue();
info.putInt("joins", joins);
info.putInt("diamo", owner_diamo);
info.putInt("dissolve_opt", gb.dissolve_opt);
info.putInt("kick_opt", gb.kick_opt);
info.putBoolean("ban_chat1", gb.ban_chat1);
info.putBoolean("ban_chat2", gb.ban_chat2);
info.putInt("ban_apply", gb.ban_apply);
info.putInt("exit_opt", gb.exit_opt);
info.putInt("option", gb.option);
ITArray rooms = this.roomCache.getRoomList();
info.putTArray("rooms", rooms);
ITArray arrPlayList = TArray.newInstance();
Utils.arrayCopy(this.playCache.getPlayList(lev,uid), arrPlayList);
info.putTArray("play_list", arrPlayList);
return info;
}
public boolean delRoom(String roomid) {
String key = "room:" + roomid;
boolean result =false;
if (this.roomCache.delBean(key)) {
this.roomCache.updateRoom();
result = true;
}
String grooms_key = GroupCache.genRoomsKey(id);
Redis.use("group1_db11").zrem(grooms_key, key);
return result;
}
public Room updateRoom(String roomid) {
String key = "room:" + roomid;
String grooms_key = GroupCache.genRoomsKey(id);
Room room = this.roomCache.getBean(key);
this.roomCache.updateRoom();
if (room == null) {
Redis.use("group1_db11").zrem(grooms_key, key);
} else {
Redis.use("group1_db11").zadd(grooms_key, room.priorityValue, key);
}
return room;
}
public Room addRoom(String roomid) {
String key = "room:" + roomid;
String grooms_key = GroupCache.genRoomsKey(id);
Room room = this.roomCache.getBean(key);
this.roomCache.updateRoom();
if (room == null) {
Redis.use("group1_db11").zrem(grooms_key, key);
} else {
Redis.use("group1_db11").zadd(grooms_key, room.priorityValue, key);
}
return room;
}
public GroupPlayBean addPlay(int pid) {
GroupPlayBean gp = this.playCache.getBean(pid);
this.playCache.updatePlay();
return gp;
}
public boolean delPlay(int pid) {
boolean del = this.playCache.delBean(pid);
this.playCache.updatePlay();
return del;
}
/**
* 广
*
* @param cmd
* @param param
*/
public void broadCastToClient(String cmd, ITObject param) {
if (!isActive)
return;
List<Session> list = getSessionList();
if (list == null || list.size() == 0)
return;
TPServer.me().getController().sendEvent(cmd, param, list);
}
public List<Session> getSessionList() {
List<Session> list = new ArrayList<Session>();
List<User> tem = null;
synchronized (userMap) {
tem = new ArrayList<User>(userMap.values());
}
for (User user : tem) {
if (user.isConnect) {
list.add(user.sender);
}
}
return list;
}
public List<Session> getSessionListByMgr() {
List<Session> list = new ArrayList<Session>();
List<User> tem = null;
synchronized (userMap) {
tem = new ArrayList<User>(userMap.values());
}
for (User user : tem) {
if (user.isConnect && user.lev < 3) {
list.add(user.sender);
}
}
return list;
}
public void destroy() {
if (this.isDestroy)
return;
this.isDestroy = true;
Global.groupMap.remove(id);
Collection<User> tem = userMap.values();
for (User user : tem) {
if (user.isConnect) {
TPServer.me().getController().disconnect(user.sender);
}
}
}
}

View File

@ -0,0 +1,45 @@
package com.mgr.group.data;
import com.data.bean.AccountBean;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TObject;
/**
*
*
*
*/
public class Player extends AccountBean{
/**
*
*/
public int seat = 0;
public int off_time = 0;
public long hp =0;
public int queueid = 0;
public int seeid = 0;
private ITObject playerData = new TObject();
/**
*
* @return
*/
public ITObject getInfo() {
playerData.putInt("aid", this.id);
playerData.putUtfString("nick", this.nick);
playerData.putUtfString("portrait", this.portrait);
playerData.putInt("seat", this.seat);
playerData.putInt("off_time", off_time);
playerData.putLong("hp", hp);
playerData.putInt("queueid",queueid);
playerData.putInt("seeid",seeid);
return playerData;
}
public String toString() {
return redis_key;
}
}

View File

@ -0,0 +1,25 @@
package com.mgr.group.data;
import com.data.bean.BaseBean;
import com.data.cache.AccountCache;
public class PlayerCache extends AccountCache{
protected BaseBean newBean() {
return new Player();
}
static PlayerCache inst;
public static Player getPlayer(int id) {
if(inst==null)inst = new PlayerCache();
return inst.getBean(id);
}
public static Player getPlayer(String session) {
if(inst==null)inst = new PlayerCache();
return inst.getBean(session);
}
}

View File

@ -0,0 +1,179 @@
package com.mgr.group.data;
import java.util.Map;
import com.data.bean.BaseBean;
import com.data.cache.GroupMemberCache;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.StringUtil;
import com.taurus.core.util.Utils;
/**
*
*
*
*/
public class Room extends BaseBean {
/**
* ID
*/
public String roomid = "";
/**
* ID
*/
public final ITArray players = TArray.newInstance();
/**
*
*/
public int maxPlayers;
public int limitInRoom;
/**
* ID
*/
public int groupPlayId;
/**
*
*/
public int round = 0;
/**
*
*/
public int maxRound = 0;
/**
* 0 1 2 3
*/
public int status = 0;
/**
* 0 1
*/
public int open = 1;
/**
*
* pid open status
* 11 1 1 00
*/
public int priorityValue = 1;
/*
*/
public boolean fake = false;
/**
*
*/
public long lastTime = 0;
public ITObject data = TObject.newInstance();
private void setInfo() {
data.putUtfString("id", roomid);
data.putInt("round", round);
data.putInt("times", maxRound);
data.putInt("status", status);
data.putInt("maxPlayers", maxPlayers);
data.putInt("pid", groupPlayId);
ITArray arr = TArray.newInstance();
Utils.arrayCopy(players, arr);
data.putTArray("plist", arr);
data.putInt("limitInRoom", limitInRoom);
}
public void fillData(Map<String, String> redis_map) {
String _id = redis_map.get("id");
String _status = redis_map.get("status");
if (StringUtil.isEmpty(_id)) {
this.del = true;
return;
}
if(StringUtil.isNotEmpty(_status)) {
this.status = Integer.parseInt(_status);
}
if (this.status == 2 || this.status == 3) {
this.del = true;
return;
}
this.roomid = _id;
this.id = Integer.parseInt(_id);
this.groupPlayId = Integer.parseInt(redis_map.get("gpid"));
this.round = Integer.parseInt(redis_map.get("round"));
this.maxRound = Integer.parseInt(redis_map.get("times"));
this.maxPlayers = Integer.parseInt(redis_map.get("maxPlayers"));
this.limitInRoom = Integer.parseInt(redis_map.get("limitInRoom"));
this.open = Integer.parseInt(redis_map.get("open"));
if (StringUtil.isEmpty(redis_map.get("fake")))
{
this.fake = false;
}
else {
this.fake = true;
}
this.loadRedisPlayer(redis_map.get("players"), redis_map.get("seats"),redis_map);
setInfo();
updatePriority();
}
/**
*
*/
private void loadRedisPlayer(String players_json, String seats_json,Map<String, String> redis_map) {
players.clear();
if (StringUtil.isEmpty(players_json))
return;
ITArray players = TArray.newFromJsonData(players_json);
ITArray seats = TArray.newFromJsonData(seats_json);
for (int i = 0; i < players.size(); i++) {
int player_id = players.getInt(i);
Player player = PlayerCache.getPlayer(player_id);
player.seat = seats.getInt(i);
Object tem = redis_map.get("net_"+player_id);
player.off_time =tem ==null?0:(int)(Long.parseLong((String)tem)/1000);
String strGroup = redis_map.get("group");
int groupId = strGroup ==null?0:(Integer.parseInt((String)strGroup));
String gm_key1 = GroupMemberCache.genKey(groupId, player.id);
if (this.fake)
{
String strHp = Redis.use("group1_db10").hget("fake_"+player.id, "fake_hp");
player.hp = strHp ==null?0:(Long.parseLong((String)strHp));
}
else {
String strHp = Redis.use("group1_db10").hget(gm_key1, "hp");
player.hp = strHp ==null?0:(Long.parseLong((String)strHp));
}
String strQuid = Redis.use("group1_db10").hget(gm_key1, "queueid");
player.queueid = strQuid ==null?0:(Integer.parseInt((String)strQuid));
String strseeid = Redis.use("group1_db10").hget(gm_key1, "seeid");
player.seeid = strseeid ==null?0:(Integer.parseInt((String)strseeid));
this.players.addTObject(player.getInfo());
}
}
/**
*
*/
public void updatePriority() {
if (this.status == 2 || this.status == 3) {
this.priorityValue = 0;
return;
}
int t_status = this.status == 0 ? 1 : 0;
int t_mc = this.players.size() + 1;
t_mc = this.players.size() == this.maxPlayers ? 0 : t_mc;
this.priorityValue = this.groupPlayId * 10000 + this.open * 1000 + t_status * 100 + t_mc;
}
}

View File

@ -0,0 +1,87 @@
package com.mgr.group.data;
import java.util.Map.Entry;
import java.util.Set;
import com.data.bean.BaseBean;
import com.data.cache.BaseCache;
import com.data.cache.GroupCache;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.Utils;
import redis.clients.jedis.Jedis;
public class RoomCache extends BaseCache{
private int groupId;
private long last_time;
private ITArray rooms;
protected RoomCache(int groupId) {
super("room:","group1_db0");
this.groupId = groupId;
this.fillSize = 6;
this.readTime = 1;
rooms = TArray.newInstance();
}
@Override
protected BaseBean newBean() {
return new Room();
}
public final void updateRoom() {
rooms.clear();
Set<Entry<Integer, BaseBean>> set = this.mapById.entrySet();
for(Entry<Integer, BaseBean> entry : set) {
Room gp =(Room)entry.getValue();
if(!gp.del) {
rooms.addTObject(gp.data);
}else {
this.mapById.remove(gp.id);
this.mapByKey.remove(gp.redis_key);
}
}
}
/**
*
* @return
*/
public final ITArray getRoomList(){
if(System.currentTimeMillis() - last_time < 10000) {
ITArray arr = TArray.newInstance();
Utils.arrayCopy(rooms, arr);
return arr;
}
rooms.clear();
last_time = System.currentTimeMillis();
String grooms_key = GroupCache.genRoomsKey(groupId);
Jedis jedis11 = Redis.use("group1_db11").getJedis();
Set<String> rooms = jedis11.zrangeByScore(grooms_key, 100000, 2000000);
try {
for (String tem : rooms) {
Room room = this.getBean(tem);
if(room==null) {
jedis11.zrem(grooms_key, tem);
continue;
}
room.del = false;
jedis11.zadd(grooms_key, room.priorityValue, tem);
this.rooms.addTObject(room.data);
}
jedis11.zremrangeByScore(grooms_key, 0, 0);
}finally {
jedis11.close();
}
ITArray arr = TArray.newInstance();
Utils.arrayCopy(this.rooms, arr);
return arr;
}
}

View File

@ -0,0 +1,135 @@
package com.mgr.group.data;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import com.data.bean.AccountBean;
import com.data.cache.AccountCache;
import com.mgr.group.Global;
import com.taurus.core.entity.ITArray;
import com.taurus.core.entity.ITObject;
import com.taurus.core.entity.TArray;
import com.taurus.core.entity.TObject;
import com.taurus.core.plugin.redis.Redis;
import com.taurus.core.util.StringUtil;
import com.taurus.permanent.TPServer;
import com.taurus.permanent.data.Session;
/**
*
*
*/
public class User {
public int uid;
public String session_key;
/**
*
*/
public int partnerLev;
/**
*
*/
public int lev;
public volatile Group group = null;
/**
*
*/
public volatile boolean isConnect = false;
/**
* mpnet session
*/
public volatile Session sender = null;
public long hp =0;
public int mail_tip = 0;
public int ban =0;
public int queueid = 0;
public int seeid = 0;
public String gm_key;
/**最后邀请时间*/
public long last_invitation_time;
/**拒绝邀请时间*/
public long invitation_refuse_time;
public int last_invitation_id;
/**最后刷新时间*/
public long last_refresh_time;
public boolean updateOffline;
private List<User> onlineList = new ArrayList<>();
public ITArray getOnlineList(){
if((System.currentTimeMillis() - last_refresh_time)>=2000) {
last_refresh_time = System.currentTimeMillis();
onlineList.clear();
Collection<User> tem = group.userMap.values();
for (User user : tem) {
if (user!=this&&user.isConnect&&user.ban==0) {
String room = Redis.use("group1_db0").hget(user.session_key, "room");
if(StringUtil.isEmpty(room)) {
onlineList.add(user);
}
}
}
}
Collections.shuffle(onlineList);
int size = Math.min(onlineList.size(), 9);
ITArray arr = TArray.newInstance();
for(int i=0;i<size;i++) {
User u = onlineList.get(i);
AccountBean acc = AccountCache.getAccount(u.session_key);
ITObject obj = TObject.newInstance();
obj.putInt("uid", acc.id);
obj.putUtfString("nick", acc.nick);
obj.putUtfString("portrait", acc.portrait);
arr.addTObject(obj);
}
return arr;
}
/**
* session
* @param sender
*/
public void setSender(Session sender) {
// 服务器从崩溃中重启
if (sender == null) {
this.isConnect = false;
return;
}
if (this.isConnect && this.sender != null&&this.sender!=sender) {
Global.sessionMgr.delSession(this.sender);
TPServer.me().getController().disconnect(this.sender);
}
// 已经连接
this.sender = sender;
Global.sessionMgr.putUser(sender, this);
this.isConnect = true;
this.updateOffline = false;
}
/**
*
* @param params
* @param gid
* @param error
*/
public void response(ITObject params, int gid, int error) {
if (!this.isConnect)
return;
if (error == 0) {
TPServer.me().getController().sendResponse(gid, error, uid, params, this.sender);
} else {
TPServer.me().getController().sendResponse(gid, error, uid, null, this.sender);
}
}
}

View File

@ -0,0 +1,19 @@
<!DOCTYPE web-app PUBLIC
"-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
"http://java.sun.com/dtd/web-app_2_3.dtd" >
<web-app>
<filter>
<filter-name>taurus-web</filter-name>
<filter-class>com.taurus.web.WebFilter</filter-class>
<init-param>
<param-name>main</param-name>
<param-value>com.mgr.group.WebMain</param-value>
</init-param>
</filter>
<filter-mapping>
<filter-name>taurus-web</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
</web-app>

View File

@ -0,0 +1,20 @@
log4j.rootLogger = INFO,consoleAppender,fileAppender
# ConsoleAppender
log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender
log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.consoleAppender.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p [%t] %c{2} %3x - %m%n
# Regular FileAppender
log4j.appender.fileAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.fileAppender.File=${WORKDIR}/logs/web_main.log
log4j.appender.fileAppender.layout.ConversionPattern=%d{dd MMM yyyy | HH:mm:ss,SSS} | %-5p | %t | %c{3} | %3x | %m%n
log4j.appender.fileAppender.Encoding=UTF-8
log4j.appender.fileAppender.DatePattern='.'yyyy-MM-dd
log4j.appender.dailyFile.Append=true
# The file is rolled over very day
log4j.appender.fileAppender.DatePattern ='.'yyyy-MM-dd

View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<mgr-config>
<host>192.168.14.1:8050</host>
<mgrId>1000</mgrId>
<loggerDebug>true</loggerDebug>
</mgr-config>

View File

@ -0,0 +1,98 @@
<?xml version="1.0" encoding="UTF-8"?>
<serivce-core>
<log4jPath>log4j.properties</log4jPath>
<plugin>
<id>database</id>
<class>com.taurus.core.plugin.database.DataBasePlugin</class>
<poolConfig>
<!-- 最大连接数, 默认10个 -->
<maxPool>100</maxPool>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>10</minIdle>
<!-- 配置获取连接等待超时的时间,单位是毫秒, 默认180000 -->
<maxLifetime>180000</maxLifetime>
<!--hsqldb - "select 1 from INFORMATION_SCHEMA.SYSTEM_USERS"
Oracle - "select 1 from dual"
DB2 - "select 1 from sysibm.sysdummy1"
mysql - "select 1" -->
<validationQuery>select 1</validationQuery>
<!-- 连接超时时间,默认30000-->
<connectionTimeout>10000</connectionTimeout>
<!-- 待机超时时间,单位是毫秒, 默认60000 -->
<idleTimeout>60000</idleTimeout>
<!-- jdbc 属性 -->
<props>
<useSSL>false</useSSL>
<useUnicode>true</useUnicode>
<characterEncoding>utf-8</characterEncoding>
<!-- 服务器时区 -->
<serverTimezone>UTC</serverTimezone>
<!-- 预编译缓存 -->
<cachePrepStmts>true</cachePrepStmts>
<!-- 预编译缓存大小 -->
<prepStmtCacheSize>250</prepStmtCacheSize>
<!-- 控制长度多大的sql可以被缓存 -->
<prepStmtCacheSqlLimit>2048</prepStmtCacheSqlLimit>
</props>
</poolConfig>
<databases>
<db>
<name>db1</name>
<driverName>com.mysql.cj.jdbc.Driver</driverName>
<jdbcUrl>jdbc:mysql://8.134.123.86:8060/wb_game</jdbcUrl>
<userName>root</userName>
<password>cssq@2020</password>
</db>
</databases>
</plugin>
<plugin>
<id>redis</id>
<class>com.taurus.core.plugin.redis.RedisPlugin</class>
<poolConfig>
<!-- 最大连接数, 默认8个 -->
<maxTotal>80</maxTotal>
<!-- 最大空闲连接数, 默认8个 -->
<maxIdle>20</maxIdle>
<!-- 最小空闲连接数, 默认0个 -->
<minIdle>5</minIdle>
<!-- 获取连接时的最大等待毫秒数(如果设置为阻塞时BlockWhenExhausted),如果超时就抛异常, 小于零:阻塞不确定的时间, 默认-1 -->
<maxWaitMillis>-1</maxWaitMillis>
<!-- 在borrow一个jedis实例时是否提前进行alidate操作, 默认false -->
<testOnBorrow>true</testOnBorrow>
<!-- 在return给pool时是否提前进行validate操作, 默认false -->
<testOnReturn>true</testOnReturn>
<!-- 表示有一个idle object evitor线程对idle object进行扫描如果validate失败
此object会被从pool中drop掉这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义, 默认true -->
<testWhileIdle>true</testWhileIdle>
<!-- 表示idle object evitor每次扫描的最多的对象数, 默认-1 -->
<numTestsPerEvictionRun>100</numTestsPerEvictionRun>
<!-- 表示一个对象至少停留在idle状态的最短时间然后才能被idle object evitor扫描并驱逐
这一项只有在timeBetweenEvictionRunsMillis大于0时才有意义 , 默认60000-->
<minEvictableIdleTimeMillis>60000</minEvictableIdleTimeMillis>
<!-- 逐出扫描的时间间隔(毫秒) 如果为负数,则不运行逐出线程, 默认30000 -->
<timeBetweenEvictionRunsMillis>30000</timeBetweenEvictionRunsMillis>
<!-- 在minEvictableIdleTimeMillis基础上加入了至少minIdle个对象已经在pool里面了。
如果为-1evicted不会根据idle time驱逐任何对象。如果minEvictableIdleTimeMillisd大于0
则此项设置无意义且只有在timeBetweenEvictionRunsMillis大于0时才有意义默认1800000 -->
<softMinEvictableIdleTimeMillis>1800000</softMinEvictableIdleTimeMillis>
<!-- 连接耗尽时是否阻塞, false报异常,ture阻塞直到超时, 默认true -->
<blockWhenExhausted>true</blockWhenExhausted>
</poolConfig>
<infos>
<info name="group1_db0" host="127.0.0.1" password="cssq@2020" port="6379" database="0" timeout="5000"/>
<info name="group1_db1" host="127.0.0.1" password="cssq@2020" port="6379" database="1" timeout="5000"/>
<info name="group1_db5" host="127.0.0.1" password="cssq@2020" port="6379" database="5" timeout="5000"/>
<info name="group1_db8" host="127.0.0.1" password="cssq@2020" port="6379" database="8" timeout="5000"/>
<info name="group1_db9" host="127.0.0.1" password="cssq@2020" port="6379" database="9" timeout="5000"/>
<info name="group1_db10" host="127.0.0.1" password="cssq@2020" port="6379" database="10" timeout="5000"/>
<info name="group1_db11" host="127.0.0.1" password="cssq@2020" port="6379" database="11" timeout="5000"/>
</infos>
</plugin>
</serivce-core>

View File

@ -0,0 +1,75 @@
<server>
<!-- 计时器线程池大小-->
<timerThreadPoolSize>1</timerThreadPoolSize>
<!-- 协议包压缩门槛 (单位字节),当协议包大于设定的值则会进行压缩 -->
<protocolCompression>512</protocolCompression>
<!-- 读缓冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<readBufferType>Heap</readBufferType>
<!-- 写入冲区类型 nio Direct Buffer 或者 Heap Buffer-->
<writeBufferType>Heap</writeBufferType>
<!-- 最大的数据包大小 -->
<maxPacketSize>524288</maxPacketSize>
<!-- 最大读取缓存大小 -->
<maxReadBufferSize>1024</maxReadBufferSize>
<!-- 最大写入缓存大小 -->
<maxWriteBufferSize>32768</maxWriteBufferSize>
<!-- 会话队列的大小-->
<sessionPacketQueueSize>160</sessionPacketQueueSize>
<!-- Thread Pool Size of the 3 main stages of the Bitswarm Engine -->
<socketAcceptorThreadPoolSize>1</socketAcceptorThreadPoolSize>
<socketReaderThreadPoolSize>3</socketReaderThreadPoolSize>
<socketWriterThreadPoolSize>3</socketWriterThreadPoolSize>
<!-- Enable disable Nagle algorithm on sockets, true == disable -->
<tcpNoDelay>true</tcpNoDelay>
<!-- 会话超时时间(单位秒)-->
<sessionTimeout>15</sessionTimeout>
<!-- Bind socket addresses -->
<socketAddresses>
<socket address="0.0.0.0" port="8050" type="TCP" />
</socketAddresses>
<!-- Ip addresses filter-->
<ipFilter>
<addressBlackList>
<string>1.2.3.4</string>
</addressBlackList>
<addressWhiteList>
<string>127.0.0.1</string>
</addressWhiteList>
<maxConnectionsPerAddress>10000</maxConnectionsPerAddress>
</ipFilter>
<webSocket>
<isActive>false</isActive>
<address>0.0.0.0</address>
<port>80</port>
</webSocket>
<!-- Main extension class -->
<extensionConfig>
<name>extension - group_mgr </name>
<className>com.mgr.group.MainServer</className>
</extensionConfig>
<!-- The system thread pool config -->
<systemThreadPoolConfig>
<name>Sys</name>
<corePoolSize>2</corePoolSize>
<maxPoolSize>8</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</systemThreadPoolConfig>
<!-- The extension thread pool config -->
<extensionThreadPoolConfig>
<name>Ext</name>
<corePoolSize>2</corePoolSize>
<maxPoolSize>8</maxPoolSize>
<keepAliveTime>60000</keepAliveTime>
<maxQueueSize>20000</maxQueueSize>
</extensionThreadPoolConfig>
</server>

View File

@ -0,0 +1,12 @@
package group_room_mgr;
import com.taurus.web.JettyServer;
public class Main {
public static void main(String[] args) {
new JettyServer("src/main/webapp",8082,"/").start();
}
}

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More