All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.aoindustries.aoserv.master.ReportGenerator Maven / Gradle / Ivy

There is a newer version: 1.91.8
Show newest version
/*
 * aoserv-master - Master server for the AOServ Platform.
 * Copyright (C) 2003-2013, 2015, 2017, 2018, 2019, 2020  AO Industries, Inc.
 *     [email protected]
 *     7262 Bull Pen Cir
 *     Mobile, AL 36695
 *
 * This file is part of aoserv-master.
 *
 * aoserv-master is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * aoserv-master is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with aoserv-master.  If not, see .
 */
package com.aoindustries.aoserv.master;

import com.aoindustries.aoserv.client.backup.BackupReport;
import com.aoindustries.aoserv.client.schema.Table;
import com.aoindustries.cron.CronDaemon;
import com.aoindustries.cron.CronJob;
import com.aoindustries.cron.Schedule;
import com.aoindustries.dbc.DatabaseConnection;
import com.aoindustries.util.logging.ProcessTimer;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 * Automatically generates various reports on a regular basis.  These reports are then
 * used by the accounting system to charge the appropriate amount.  These reports may be missed
 * and will not be created when missed.  Anything depending on these reports should get
 * its information from the reports that are available without depending on all reports
 * being present.  It is an acceptable error condition if not a single report in a month
 * has occurred.
 *
 * @author  AO Industries, Inc.
 */
final public class ReportGenerator implements CronJob {

	private static final Logger logger = Logger.getLogger(ReportGenerator.class.getName());

	/**
	 * The maximum time for a backup reporting.
	 */
	private static final long BACKUP_REPORT_MAX_TIME=2L*60*60*1000;

	/**
	 * The interval in which the administrators will be reminded.
	 */
	private static final long TIMER_REMINDER_INTERVAL=12L*60*60*1000;

	static class TempBackupReport {
		int host;
		int packageNum;
		int fileCount;
		long diskSize;
	}

	private static boolean started=false;

	public static void start() {
		synchronized(System.out) {
			if(!started) {
				System.out.print("Starting " + ReportGenerator.class.getSimpleName() + ": ");
				CronDaemon.addCronJob(new ReportGenerator(), logger);
				started=true;
				System.out.println("Done");
			}
		}
	}

	private ReportGenerator() {
	}

	/**
	 * Runs at {@link BackupReport#BACKUP_REPORT_HOUR}:{@link BackupReport#BACKUP_REPORT_MINUTE} am daily.
	 */
	private static final Schedule schedule = (minute, hour, dayOfMonth, month, dayOfWeek, year) ->
		minute==BackupReport.BACKUP_REPORT_MINUTE
		&& hour==BackupReport.BACKUP_REPORT_HOUR;

	@Override
	public Schedule getSchedule() {
		return schedule;
	}

	@Override
	public int getThreadPriority() {
		return Thread.NORM_PRIORITY-2;
	}

	@Override
	public void run(int minute, int hour, int dayOfMonth, int month, int dayOfWeek, int year) {
		try {
			try (
				ProcessTimer timer=new ProcessTimer(
					logger,
					ReportGenerator.class.getName(),
					"runCronJob",
					"Backup Report Generator",
					"Generating contents for backup.BackupReport",
					BACKUP_REPORT_MAX_TIME,
					TIMER_REMINDER_INTERVAL
				)
			) {
				MasterServer.executorService.submit(timer);

				// Start the transaction
				InvalidateList invalidateList=new InvalidateList();
				DatabaseConnection conn=MasterDatabase.getDatabase().createDatabaseConnection();
				try {
					boolean connRolledBack=false;
					try {
						// Do not make the run twice in one day
						if(
							conn.executeBooleanQuery(
								"select\n"
								+ "  not exists (\n"
								+ "    select\n"
								+ "      *\n"
								+ "    from\n"
								+ "      backup.\"BackupReport\"\n"
								+ "    where\n"
								+ "      CURRENT_DATE = date\n"
								+ "  )"
							)
						) {
							// HashMap keyed on host, containing HashMaps keyed on package, containing TempBackupReport objects
							Map> stats=new HashMap<>();

							/* TODO: Implement as calls to the aoserv daemons to get the quota reports
							String sqlString=null;
							Statement stmt=conn.getConnection(Connection.TRANSACTION_READ_COMMITTED, true).createStatement();
							try {
								// First, count up the total number of files per host and per package
								conn.incrementQueryCount();
								ResultSet results=stmt.executeQuery(sqlString="select server, package, count(*) from file_backups group by server, package");
								try {
									while(results.next()) {
										int host=results.getInt(1);
										int packageNum=results.getInt(2);
										int fileCount=results.getInt(3);

										TempBackupReport tbr=new TempBackupReport();
										tbr.host=host;
										tbr.packageNum=packageNum;
										tbr.fileCount=fileCount;

										Integer hostInteger=Integer.valueOf(host);
										Map packages=stats.get(hostInteger);
										if(packages==null) stats.put(hostInteger, packages=new HashMap());
										packages.put(Integer.valueOf(packageNum), tbr);
									}
								} finally {
									results.close();
								}

								// Count up the data sizes by host and package
								conn.incrementQueryCount();
								results=stmt.executeQuery(
									sqlString=
									  "select\n"
									+ "  fb.server,\n"
									+ "  fb.package,\n"
									+ "  sum(bd.data_size),\n"
									+ "  sum(coalesce(bd.compressed_size, bd.data_size)),\n"
									+ "  sum(\n"
									+ "    case when\n"
									+ "      (coalesce(bd.compressed_size, bd.data_size)%(4096::int8))=0\n"
									+ "    then\n"
									+ "      coalesce(bd.compressed_size, bd.data_size)\n"
									+ "    else\n"
									+ "      ((coalesce(bd.compressed_size, bd.data_size)/4096)+1)*4096\n"
									+ "    end\n"
									+ "  )\n"
									+ "from\n"
									+ "  (\n"
									+ "    select\n"
									+ "      server,\n"
									+ "      package,\n"
									+ "      backup_data\n"
									+ "    from\n"
									+ "      file_backups\n"
									+ "    where\n"
									+ "      backup_data is not null\n"
									+ "    union select\n"
									+ "      ao_server,\n"
									+ "      package,\n"
									+ "      backup_data\n"
									+ "    from\n"
									+ "      interbase_backups\n"
									+ "    union select\n"
									+ "      ms.ao_server,\n"
									+ "      mb.package,\n"
									+ "      mb.backup_data\n"
									+ "    from\n"
									+ "      mysql_backups mb,\n"
									+ "      mysql.\"Server\" ms\n"
									+ "    where\n"
									+ "      mb.mysql_server=ms.bind\n"
									+ "    union select\n"
									+ "      ps.ao_server,\n"
									+ "      pb.package,\n"
									+ "      pb.backup_data\n"
									+ "    from\n"
									+ "      postgres_backups pb\n"
									+ "      INNER JOIN postgresql.\"Server\" ps ON pb.postgres_server = ps.bind\n"
									+ "  ) as fb,\n"
									+ "  backup_data bd\n"
									+ "where\n"
									+ "  fb.backup_data=bd.id\n"
									+ "  and bd.is_stored\n"
									+ "group by\n"
									+ "  fb.server,\n"
									+ "  fb.package"
								);
								try {
									while(results.next()) {
										int host=results.getInt(1);
										int packageNum=results.getInt(2);
										long uncompressedSize=results.getLong(3);
										long compressedSize=results.getLong(4);
										long diskSize=results.getLong(5);

										Integer hostInteger=Integer.valueOf(host);
										Map packages=stats.get(hostInteger);
										if(packages==null) stats.put(hostInteger, packages=new HashMap());
										Integer packageInteger=Integer.valueOf(packageNum);
										TempBackupReport tbr=(TempBackupReport)packages.get(packageInteger);
										if(tbr==null) {
											tbr=new TempBackupReport();
											tbr.host=host;
											tbr.packageNum=packageNum;
											packages.put(packageInteger, tbr);
										}
										tbr.uncompressedSize=uncompressedSize;
										tbr.compressedSize=compressedSize;
										tbr.diskSize=diskSize;
									}
								} finally {
									results.close();
								}
							} finally {
								stmt.close();
							}*/

							// Add these stats to the table
							try (PreparedStatement pstmt = conn.getConnection(Connection.TRANSACTION_READ_COMMITTED, false).prepareStatement("INSERT INTO backup.\"BackupReport\" VALUES (default,?,?,CURRENT_DATE,?,?::int8);")) {
								try {
									Iterator hostKeys = stats.keySet().iterator();
									while(hostKeys.hasNext()) {
										Map packages = stats.get(hostKeys.next());
										Iterator packageKeys=packages.keySet().iterator();
										while(packageKeys.hasNext()) {
											TempBackupReport tbr=packages.get(packageKeys.next());
											pstmt.setInt(1, tbr.host);
											pstmt.setInt(2, tbr.packageNum);
											pstmt.setInt(3, tbr.fileCount);
											pstmt.setLong(4, tbr.diskSize);
											pstmt.addBatch();
										}
									}
									pstmt.executeBatch();
								} catch(SQLException err) {
									System.err.println("Error from update: "+pstmt.toString());
									throw err;
								}
							}

							// Invalidate the table
							invalidateList.addTable(conn, Table.TableID.BACKUP_REPORTS, InvalidateList.allAccounts, InvalidateList.allHosts, false);
						}
					} catch(RuntimeException | IOException err) {
						if(conn.rollback()) {
							connRolledBack=true;
							invalidateList=null;
						}
						throw err;
					} catch(SQLException err) {
						if(conn.rollbackAndClose()) {
							connRolledBack=true;
							invalidateList=null;
						}
						throw err;
					} finally {
						if(!connRolledBack && !conn.isClosed()) conn.commit();
					}
				} finally {
					conn.releaseConnection();
				}
				MasterServer.invalidateTables(invalidateList, null);
			}
		} catch(ThreadDeath TD) {
			throw TD;
		} catch(Throwable T) {
			logger.log(Level.SEVERE, null, T);
		}
	}
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy