0
点赞
收藏
分享

微信扫一扫

druid生成sqlList日志解析

Sky飞羽 2022-03-12 阅读 43

最近要统计项目组的sql,分析一下性能和sql规范,写了一个小方法,在这记录一下吧,后续还完善。

在druid的web监控点击登记日志。

页面操作就不在这配图说明了

在log文件中,将sqlList找出来

在.log的日志文件中将sqlList后面的json内容通过sed截取出来,输出到xx.txt文件中

find ./* -name '*.log'|xargs grep 'sqlList'|sed -r 's/.*sqlList\"\:(.*)\}\r\n/\1/g' >xx.txt

标题通过java代码解析

解析json文件,按需求输出几个文件。
这里的原始文件路径没做灵活,在INPUT_DIR目录下,还有一层目录(如:不同的module),然后是文件xx.txt

package com.demo;

import com.alibaba.fastjson.JSON;
import com.demo.util.ExportExcel;
import org.thymeleaf.util.StringUtils;

import java.io.*;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.util.*;
import java.util.stream.Collectors;

public class ParseDruidJson {

    /**
     * 原始解析结果
     * key - inputfile name
     */
    private static Map<File, List<DruidLogBean>> beansMap = new HashMap<>();
    /**
     * 去重后的sql
     * key - inputfile name
     */
    private static Map<File, Set<String>> distinctSqlMap = new HashMap<>();

    /**
     * 输入文件的文件夹,输出文件在输入文件的名字后面加上parse
     */
    private static final String INPUT_DIR = "C:\\xx\\TEMP\\xx\\sqlFile";

    /**
     * 一条输出字符串的分隔符
     */
    private static final String SPLIT_STR = "#";


    public static void main(String[] args) throws Exception {
        File inputDirFile = new File(INPUT_DIR);
        if(inputDirFile.isFile()){
            return;
        }
        // 遍历文件夹下所有文件,解析
        for(File inputFileSec  : inputDirFile.listFiles()){
            if(inputFileSec.isFile()){
                continue;
            }
            for(File inputFile: inputFileSec.listFiles()){
                // 原始文件不能有“_"
                if(inputFile.getName().contains("_")){
                    continue;
                }

                BufferedReader reader = new BufferedReader(new FileReader(inputFile));
                String sqlListLine = reader.readLine();
                List<DruidLogBean> beans = new ArrayList<>();
                Set<String> distinctSql = new HashSet<>();
                beansMap.put(inputFile, beans);
                distinctSqlMap.put(inputFile, distinctSql);

                while(!StringUtils.isEmpty(sqlListLine)){
                    beans.addAll(JSON.parseArray(sqlListLine, DruidLogBean.class));
                    sqlListLine= reader.readLine();
                }

                // 遍历每条sql对象
                for (DruidLogBean bean : beans) {
                    // 暂不考虑sql中有写死值得情况,去重的sql,都转成大写
                    distinctSql.add(bean.getSql().toUpperCase(Locale.ROOT));
                }

                System.out.println(inputFile.getAbsolutePath() + "文件有sql数量:" + beans.size());
            }
        }

        printAllSql();
        printSqlDistinct();
        printSqlSortByAvgTimeDesc();
        printSqlWithoutWhere();
    }


    /**
     * 获取输出文件
     * 输出文件是在原来文件加_$outSubfix,如果文件存在,删除后重建
     * @param inputFile
     * @param outSubfix
     * @param expandedName 拓展名,如:.xls
     * @return
     * @throws IOException
     */
    private static File getOutputFile(File inputFile, String outSubfix, String expandedName) throws IOException {
        String fileName = inputFile.getAbsolutePath();
        int pointIndex = fileName.lastIndexOf(".");

        String outputFilename = null;
        if(StringUtils.isEmpty(expandedName)){

            outputFilename = fileName.substring(0, pointIndex) + "_" + outSubfix + fileName.substring(pointIndex);
        }else{
            outputFilename = fileName.substring(0, pointIndex) + "_" + outSubfix + expandedName;
        }
        File outputFile = new File(outputFilename);
        if(outputFile.exists()){
            outputFile.delete();
            outputFile.createNewFile();
        }

        return outputFile;
    }

    /**
     * 打印所有的sql
     * sql#ExecuteMillisTotal#ExecuteCount#avgExecuteMillis
     * @throws IOException
     */
    private static void printAllSql() throws Exception {
        System.out.println("---------printAllSql------------");

        for(Map.Entry<File, List<DruidLogBean>> entity : beansMap.entrySet()){

            Map<String, List<String>> classifyMap = new HashMap();

            File outputFile = getOutputFile(entity.getKey(), "all", ".xls");

            for(DruidLogBean bean : entity.getValue()){
                String printSql = bean.getSql() + SPLIT_STR + bean.getExecuteMillisTotal() + SPLIT_STR + bean.getExecuteCount() + SPLIT_STR + getExecuteAVGTime(bean.getExecuteMillisTotal(), bean.getExecuteCount());
                classfiyByDMLType(classifyMap, printSql);
            }

            List<String> printSql = new ArrayList<>();
            for(Map.Entry<String, List<String>> entrySql: classifyMap.entrySet()){
                for(String sql : entrySql.getValue()){
                    printSql.add(sql);
                }
            }
            String[] rowName = new String[]{"SQL", "ExecuteMillisTotal(ms)", "ExecuteCount", "ExecuteAVGTime(ms)"};
            printToExcel(outputFile.getAbsolutePath(), null, rowName, printSql);
//            printToTxt(outputFile.getAbsolutePath(), printSql);
            System.out.println("输出文件:" + outputFile.getAbsolutePath());
        }
    }


    /**
     * 输出成excel
     * @param fileName
     * @param tableName
     * @param sqlList
     * @throws Exception
     */
    private static void printToExcel(String fileName, String tableName, String[] rowName,List<String> sqlList) throws Exception {
        if(fileName.endsWith(".txt")){
            fileName.replace(".txt", ".xls");
        }
        List<Object[]> rows = new ArrayList<>();
        for(String sql : sqlList){
            rows.add(sql.split(SPLIT_STR));
        }

        if(StringUtils.isEmpty(tableName)){
            tableName = "result";
        }

        FileOutputStream exportOutputStream = new FileOutputStream(fileName);
        if(rows != null && rows.size() > 0){
            ExportExcel exportExcel = new ExportExcel(tableName, rowName, rows);
            exportExcel.export(exportOutputStream);
        }
        exportOutputStream.flush();
        exportOutputStream.close();
    }


    /**
     * 输出成txt
     * @param fileName
     * @param sqlList
     * @throws IOException
     */
    private static void printToTxt(String fileName, List<String> sqlList) throws IOException {
        BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(fileName,true)));
        for(String sql : sqlList){
            out.write(sql);
            out.newLine();
        }
        out.flush();
        out.close();
    }

    /**
     * 按照sql的DML类型进行分类存放
     * @param classfiedMap
     * @param sql
     */
    private static void classfiyByDMLType(Map<String, List<String>> classfiedMap, String sql){
        String dMLType = sql.substring(0, sql.indexOf(" ")).toLowerCase(Locale.ROOT);
        List<String> dMLList = classfiedMap.get(dMLType);
        if(dMLList == null){
            dMLList = new ArrayList<>();
            classfiedMap.put(dMLType, dMLList);
        }

        dMLList.add(sql);
    }


    /**
     * 按照平均执行时间倒序排序
     */
    private static void printSqlSortByAvgTimeDesc() throws IOException{
        System.out.println("---------printSqlSortByAvgTimeDesc------------");
        for(Map.Entry<File, List<DruidLogBean>> entity : beansMap.entrySet()){
            File outputFile = getOutputFile(entity.getKey(), "sort", null);
            BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputFile,true)));

            List<Map<String, String>> sortList = new ArrayList<>();
            for(DruidLogBean bean : entity.getValue()){
                Map<String, String> beanMap = new HashMap<>();
                beanMap.put("avgTime", getExecuteAVGTime(bean.getExecuteMillisTotal(), bean.getExecuteCount()));
                beanMap.put("sql", bean.getSql());
                sortList.add(beanMap);
            }

            List<Map<String, String>> sortListNew = sortList.stream().sorted((s2, s1) ->(new BigDecimal(s1.get("avgTime"))).compareTo(new BigDecimal(s2.get("avgTime")))).collect(Collectors.toList());

            for(Map<String, String> beanMap : sortListNew){
                out.write(beanMap.get("sql") + SPLIT_STR + beanMap.get("avgTime"));
                out.newLine();
            }
            out.flush();
            out.close();
            System.out.println("输出文件:" + outputFile.getAbsolutePath());
        }
    }

    /**
     * 获取平均执行时间
     * @param totalTime
     * @param count
     * @return
     */
    private static String getExecuteAVGTime(String totalTime, String count){

        BigDecimal avgTime = (new BigDecimal(totalTime)).divide(new BigDecimal(count), 2, RoundingMode.HALF_UP);
//        if(avgTime.compareTo(BigDecimal.ZERO) <= 0){
//            System.out.println(totalTime);
//        }
        return avgTime.toString();
    }


    /**
     * 输出没有where条件的查询sql
     * @throws IOException
     */
    private static void printSqlWithoutWhere() throws IOException {
        System.out.println("---------printSqlWithoutWhere------------");

        for(Map.Entry<File, Set<String>> entity : distinctSqlMap.entrySet()){
            File outputFile = getOutputFile(entity.getKey(), "withoutWhere", null);
            BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputFile,true)));
            Set<String> sqlSet = entity.getValue();
            for(String sql : sqlSet){
                if(isWithoutWhere(sql)){
                    out.write(sql);
                    out.newLine();
                }
            }

            out.flush();
            out.close();
            System.out.println("输出文件:" + outputFile.getAbsolutePath());
        }
    }

    /**
     * 判断sql是否有where
     * @param sql
     * @return
     */
    private static Boolean isWithoutWhere(String sql){
        sql = sql.toLowerCase(Locale.ROOT);
        if(!sql.startsWith("select")){
            return false;
        }

        if(sql.contains("where")){
            return false;
        }

        return true;
    }

    /**
     * 输出去重后的sql
     */
    private static void printSqlDistinct() throws Exception {
        System.out.println("---------printSqlDistinct------------");

        for(Map.Entry<File, Set<String>> entity : distinctSqlMap.entrySet()){
            File outputFile = getOutputFile(entity.getKey(), "distinct", ".xls");
            Set<String> sqlSet = entity.getValue();
            Map<String, List<String>> classifyMap = new HashMap();

            for(String sql : sqlSet){
                classfiyByDMLType(classifyMap, sql);
            }

            List<String> printSql = new ArrayList<>();
            for(Map.Entry<String, List<String>> entrySql: classifyMap.entrySet()){
                for(String sql : entrySql.getValue()){
                    printSql.add(sql);
                }
            }

            String[] rowName = new String[]{"SQL"};
            printToExcel(outputFile.getAbsolutePath(), null, rowName, printSql);
//            printToTxt(outputFile.getAbsolutePath(), printSql);

            System.out.println("输出文件:" + outputFile.getAbsolutePath());
        }
    }

}

支持输出到txt和excel
输出到excel防范是网上粘贴过来的一段,就不在这parse了
printAllSql 输出所有的sql
printSqlSortByAvgTimeDesc 按照sql平均执行时间倒序输出
printSqlWithoutWhere 输出没有where条件的select语句的
printSqlDistinct 输出去重后的sql

pom.xml的依赖就一股脑粘贴过来了

 <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.47</version>
        </dependency>
        <dependency>
            <groupId>org.apache.poi</groupId>
            <artifactId>poi</artifactId>
            <version>3.11</version>
        </dependency>

        <dependency>
            <groupId>commons-io</groupId>
            <artifactId>commons-io</artifactId>
            <version>2.6</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-thymeleaf</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.session</groupId>
            <artifactId>spring-session-core</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter</artifactId>
        </dependency>

举报

相关推荐

0 条评论