#!/usr/bin/python26 ### ### backmon.commands.db.update ### import sys import os import os.path import glob import re from optparse import OptionParser from guppy import hpy from ....lib import * from backup_monitoring.debug import * from backup_monitoring.math import * from backup_monitoring.dal.builtins import * from backup_monitoring.parsing.parsers import ontap_df from backup_monitoring.parsing.parsers import sis_status_l from backup_monitoring.parsing.parsers import reallocate_status_v from backup_monitoring.parsing.parsers import bpimagelist from backup_monitoring.parsing.parsers import bpplclients from backup_monitoring.parsing.parsers import bppllist from backup_monitoring.parsing.parsers import bpstulist from backup_monitoring.parsing.parsers import bpdbjobs from backup_monitoring.parsing.parsers import du METRIC_LIST = [ 'filers', 'filer_volume_tasks', ] METRIC_HELP = { 'filers' : 'netapp filer metrics', 'filer_volume_tasks' : 'netapp filer volume tasks (sis / reallocation)', } ## ## OPTIONS ## usage = """Usage: %prog [options] -e [ENVIRONMENT] db update [metrics]' available metrics: """ + '\n'.join([' %-8s %s' % (cmd, METRIC_HELP[cmd]) for cmd in METRIC_LIST]) parser = OptionParser(usage=usage) def run(args, kwargs): # # add kwargs to local namespace # for key in kwargs.keys(): if re.compile('^[A-Z][A-Z_]+$').match(key): exec(key + ' = kwargs[\'' + key + '\']') (options, args) = parser.parse_args(args) # # Database # conn = CONN curs = conn.cursor() # # Metrics # metrics = args INFO('Metrics to update: %s' % (', '.join(metrics))) for metric in metrics: DEBUG('metric = %s' % (metric)) # # policies # if metric in ['policies', 'daily_statistics']: master_feeds = ['bppllist'] media_feeds = [] environment_ids = lookup_table(curs, "SELECT name, environment_id FROM environments") max_environment_id = one_value(curs, "SELECT MAX(environment_id) FROM environments") for environment in ENVIRONMENTS: environment_name = environment.name INFO('environment = %s' % (environment_name)) if environment_name not in environment_ids: WARN('Environment %s does not exist in the database!' % (environment_name)) continue environment_id = environment_ids[environment_name] environment.load_feeds(master=master_feeds, media=media_feeds) environment.parse_policies() active_policy_ids = lookup_table(curs, "SELECT name, policy_id FROM policies WHERE environment_id = %d AND active = 'Y'" % (environment_id)) max_policy_id = one_value(curs, "SELECT MAX(policy_id) FROM policies") policy_count = 0 for policy in environment.policies: policy_count += 1 policy_name = policy.policy_name policy_type = policy.policy_type if policy_name not in active_policy_ids: max_policy_id += 1 policy_id = max_policy_id source = 'bppllist' active = 'Y' sql = """ INSERT INTO policies (policy_id, environment_id, name, type, description, comment, source, commissioned, active, added, updated, seen) VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (policy_id, environment_id, policy_name, policy_type, '', '', source, TIMESTAMP_ISO, active, TIMESTAMP_ISO, TIMESTAMP_ISO, TIMESTAMP_ISO) print sql curs.execute(sql) conn.commit() active_policy_ids[policy_name] = policy_id INFO('policy_count = %d' % (policy_count)) # # clients # if metric in ['clients', 'daily_statistics']: master_feeds = ['bpplclients'] media_feeds = [] environment_ids = lookup_table(curs, "SELECT name, environment_id FROM environments") max_environment_id = one_value(curs, "SELECT MAX(environment_id) FROM environments") for environment in ENVIRONMENTS: environment_name = environment.name INFO('environment = %s' % (environment_name)) if environment_name not in environment_ids: WARN('Environment %s does not exist in the database!' % (environment_name)) continue environment_id = environment_ids[environment_name] environment.load_feeds(master=master_feeds, media=media_feeds) environment.parse_clients() client_ids = lookup_table(curs, "SELECT name, client_id FROM clients WHERE environment_id = %d" % (environment_id)) max_client_id = one_value(curs, "SELECT MAX(client_id) FROM clients") client_count = 0 for client in environment.clients: client_count += 1 client_name = client.name hardware = client.hw os = client.os priority = client.pri if client_name not in client_ids: max_client_id += 1 client_id = max_client_id source = 'bpplclients' active = 'Y' sql = """ INSERT INTO clients (client_id, environment_id, name, description, comment, source, hardware, os, priority, commissioned, active, added, updated, seen) VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s', %d, '%s', '%s', '%s', '%s', '%s') """ % (client_id, environment_id, client_name, '', '', source, hardware, os, priority, TIMESTAMP_ISO, active, TIMESTAMP_ISO, TIMESTAMP_ISO, TIMESTAMP_ISO) print sql curs.execute(sql) conn.commit() client_ids[client_name] = client_id INFO('client_count = %d' % (client_count)) # # stunits # if metric == 'stunits': master_feeds = ['bpstulist'] media_feeds = [] environment_ids = lookup_table(curs, "SELECT name, environment_id FROM environments") max_environment_id = one_value(curs, "SELECT MAX(environment_id) FROM environments") for environment in ENVIRONMENTS: environment_name = environment.name INFO('environment = %s' % (environment_name)) if environment_name not in environment_ids: WARN('Environment %s does not exist in the database!' % (environment_name)) continue environment_id = environment_ids[environment_name] environment.load_feeds(master=master_feeds, media=media_feeds) environment.parse_stunits() stunit_ids = lookup_table(curs, "SELECT name, stunit_id FROM stunits WHERE environment_id = %d" % (environment_id)) max_stunit_id = one_value(curs, "SELECT MAX(stunit_id) FROM stunits") stunit_count = 0 for stunit in environment.stunits: stunit_count += 1 stunit_name = stunit.label if stunit_name not in stunit_ids: max_stunit_id += 1 stunit_id = max_stunit_id sql = """ INSERT INTO stunits (stunit_id, environment_id, name, description, commissioned, active, added, updated, seen) VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (stunit_id, environment_id, stunit_name, '', TIMESTAMP_ISO, 'Y', TIMESTAMP_ISO, TIMESTAMP_ISO, TIMESTAMP_ISO) print sql curs.execute(sql) conn.commit() stunit_ids[stunit_name] = stunit_id INFO('stunit_count = %d' % (stunit_count)) # # retention levels # if metric == 'retlevels': master_feeds = ['bpretlevel'] media_feeds = [] environment_ids = lookup_table(curs, "SELECT name, environment_id FROM environments") max_environment_id = one_value(curs, "SELECT MAX(environment_id) FROM environments") for environment in ENVIRONMENTS: environment_name = environment.name INFO('environment = %s' % (environment_name)) if environment_name not in environment_ids: WARN('Environment %s does not exist in the database!' % (environment_name)) continue environment_id = environment_ids[environment_name] environment.load_feeds(master=master_feeds, media=media_feeds) environment.parse_retlevels() retlevel_ids = lookup_table(curs, "SELECT retention_level, retlevel_id FROM retlevels WHERE environment_id = %d" % (environment_id)) max_retlevel_id = one_value(curs, "SELECT MAX(retlevel_id) FROM retlevels") retlevel_count = 0 for retlevel in environment.retlevels: retlevel_count += 1 retention_level = retlevel.retention_level equivalent_days = retlevel.equivalent_days retention_seconds = retlevel.retention_seconds retention_period = retlevel.retention_period if retention_level not in retlevel_ids: max_retlevel_id += 1 retlevel_id = max_retlevel_id sql = """ INSERT INTO retlevels (retlevel_id, environment_id, retention_level, equivalent_days, retention_seconds, retention_period, added, updated) VALUES (%d, %d, %d, %d, %d, '%s', '%s', '%s') """ % (retlevel_id, environment_id, retention_level, equivalent_days, retention_seconds, retention_period, TIMESTAMP_ISO, TIMESTAMP_ISO) print sql curs.execute(sql) conn.commit() retlevel_ids[retention_level] = retlevel_id INFO('retlevel_count = %d' % (retlevel_count)) # # NetApp Filers # if metric == 'filers': INFO('Updating filers..') filers = CONFIG['NETAPP']['filers'] host_ids = lookup_table(curs, "SELECT name, host_id FROM hosts") max_host_id = one_value(curs, "SELECT MAX(host_id) FROM hosts") INFO('max_host_id = %d' % (max_host_id)) filer_ids = lookup_table(curs, "SELECT h.name, f.filer_id FROM filers AS f INNER JOIN hosts AS h ON h.host_id = f.host_id") max_filer_id = one_value(curs, "SELECT MAX(filer_id) FROM filers") INFO('max_filer_id = %d' % (max_filer_id)) for filer in filers: if filer not in host_ids: max_host_id += 1 host_id = max_host_id sql = """ INSERT INTO hosts (host_id, name, alias, address, description, active, added, updated) VALUES (%d, '%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (host_id, filer, filer, filer, '', 'Y', UTCTIMESTAMP_ISO, UTCTIMESTAMP_ISO) print sql curs.execute(sql) host_ids[filer] = host_id max_filer_id += 1 filer_id = max_filer_id sql = """ INSERT INTO filers (filer_id, host_id) VALUES (%d, %d) """ % (filer_id, host_id) print sql curs.execute(sql) filer_ids[filer] = filer_id conn.commit() volumes = ExtendedDict() if filer in SERVER_PATHS: INFO('+ %s' % (filer)) filer_id = filer_ids[filer] volume_ids = lookup_table(curs, "SELECT name, volume_id FROM filer_volumes WHERE filer_id = %d" % (filer_id)) max_volume_id = one_value(curs, "SELECT MAX(volume_id) FROM filer_volumes") updates = Updates(LOGGER, SERVER_PATHS[filer]) for record in ontap_df.stream(updates.feed('df_s'), format='df -s'): obj = ontap_df.parse(record) if obj is not None: filesystem = obj.filesystem used = obj.used saved = obj.saved pct_saved = obj.pct_saved volume_name = filesystem.split('/')[-1] volume_path = filesystem if volume_name not in volume_ids: max_volume_id += 1 volume_id = max_volume_id sql = """ INSERT INTO filer_volumes (volume_id, filer_id, name, path, description, active, added, updated) VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s') """ % (volume_id, filer_id, volume_name, volume_path, '', 'Y', UTCTIMESTAMP_ISO, UTCTIMESTAMP_ISO) print sql curs.execute(sql) volume_ids[volume_name] = volume_id INFO(' df %s' % (filesystem)) volume_id = volume_ids[volume_name] if volume_name not in volumes: volumes[volume_name] = ExtendedDict() volumes[volume_name]['volume_id'] = volume_id volumes[volume_name]['name'] = volume_name volumes[volume_name]['path'] = volume_path volumes[volume_name]['kb_used'] = used volumes[volume_name]['kb_saved'] = saved volumes[volume_name]['pct_saved'] = pct_saved volumes[volume_name]['sis_schedule'] = '' volumes[volume_name]['sis_status'] = '' volumes[volume_name]['reallocation_schedule'] = '' volumes[volume_name]['reallocation_status'] = '' for record in sis_status_l.stream(updates.feed('sis_status_l'), format='sis status -l'): obj = sis_status_l.parse(record) if obj is not None: path = obj.path status = obj.status state = obj.state schedule = obj.schedule volume_name = path.split('/')[-1] volumes[volume_name]['sis_schedule'] = schedule volumes[volume_name]['sis_status'] = status INFO(' sis %s' % (path)) for record in reallocate_status_v.stream(updates.feed('reallocate_status_v'), format='reallocate status -v'): obj = reallocate_status_v.parse(record) if obj is not None: volume = obj.volume state = obj.state schedule = obj.schedule volume_name = volume.split('/')[-1] if volume_name in volumes: volumes[volume_name]['reallocation_schedule'] = schedule volumes[volume_name]['reallocation_status'] = status INFO(' reallocate %s' % (volume)) for volume in volumes: volume_id = volume.volume_id sis_status = volume.sis_status reallocation_status = volume.reallocation_status kb_used = volume.kb_used kb_saved = volume.kb_saved pct_saved = volume.pct_saved sql = """ INSERT INTO filer_volume_status (timestamp, volume_id, sis_status, reallocation_status, kb_used, kb_saved, pct_saved) VALUES ('%s', %d, '%s', '%s', %d, %d, %d) """ % (UTCTIMESTAMP_ISO, volume_id, sis_status, reallocation_status, kb_used, kb_saved, pct_saved) print sql curs.execute(sql) conn.commit() else: INFO('- %s' % (filer)) INFO('Done updating filers.') # # NetApp Filers Volume Tasks # if metric == 'filer_volume_tasks': INFO('Updating filer_volume_tasks..') volume_set = ExtendedDict() tasks = [] sis_tasks = ExtendedDict() reallocation_tasks = ExtendedDict() i = 0 curs.execute("SELECT * FROM filer_volume_status WHERE timestamp >= now() - INTERVAL 7 DAY") rows = curs.fetchmany() while rows: for timestamp, volume_id, sis_status, reallocation_status, kb_used, kb_saved, pct_saved in rows: i += 1 if i % 1000 == 0: print i if (volume_id not in volume_set) and (sis_status != 'Active') and (reallocation_status != 'Active'): volume_set[volume_id] = volume_id # # Start SIS Task # if (sis_status == 'Active') and (volume_id in volume_set) and (volume_id not in sis_tasks): sis_tasks[volume_id] = { 'start' : timestamp, 'end' : None } # # Start Reallocation Task # if (reallocation_status == 'Active') and (volume_id in volume_set) and (volume_id not in reallocation_tasks): reallocation_tasks[volume_id] = { 'start' : timestamp, 'end' : None } # # End SIS Task # if (sis_status != 'Active') and (volume_id in sis_tasks): sis_tasks[volume_id]['end'] = timestamp tasks.append( {'volume_id' : volume_id, 'start' : sis_tasks[volume_id]['start'], 'end' : sis_tasks[volume_id]['end'], 'type' : 'SIS' }) del sis_tasks[volume_id] # # End Reallocation Task # if (reallocation_status != 'Active') and (volume_id in reallocation_tasks): reallocation_tasks[volume_id]['end'] = timestamp tasks.append( {'volume_id' : volume_id, 'start' : reallocation_tasks[volume_id]['start'], 'end' : reallocation_tasks[volume_id]['end'], 'type' : 'REALLOC' }) del reallocation_tasks[volume_id] rows = curs.fetchmany() for task in tasks: volume_id = task['volume_id'] start = task['start'] end = task['end'] Type = task['type'] sql = "INSERT INTO filer_volume_tasks (volume_id, start, end, type) VALUES (%d, '%s', '%s', '%s')" % (volume_id, start.strftime('%Y-%m-%d %H:%M:%S'), end.strftime('%Y-%m-%d %H:%M:%S'), Type) print sql try: curs.execute(sql) conn.commit() except conn.IntegrityError: pass INFO('Done updating filers_volume_tasks.') # # daily statistics # if metric == 'daily_statistics': INFO('BACKUP_DAY = %s' % (PREV_BACKUP_DAY_ISO)) master_feeds = ['bpplclients', 'bppllist', 'bpstulist', 'bpdbjobs_most_columns', ] media_feeds = [] environment_ids = lookup_table(curs, "SELECT name, environment_id FROM environments") for environment in ENVIRONMENTS: environment_name = environment.name INFO('') INFO('environment........: %s' % (environment_name)) if environment_name not in environment_ids: WARN('Environment %s does not exist in the database!' % (environment_name)) continue environment_id = environment_ids[environment_name] environment.load_feeds(master=master_feeds, media=media_feeds) environment.parse_clients() environment.parse_policies() environment.parse_stunits() environment.parse_jobs() windows = re.compile('^.*Windows.*$') linux = re.compile('^.*Linux.*$|^.*RedHat.*$') solaris = re.compile('^.*Solaris.*$') aix = re.compile('^.*AIX.*$') osf = re.compile('^.*OSF1.*$') bsd = re.compile('^.*BSD.*$') openvms = re.compile('^.*OpenVMS.*$') added_policies = one_column(curs, "SELECT name FROM policies WHERE environment_id = %d" % (environment_id)) added_clients = one_column(curs, "SELECT name FROM clients WHERE environment_id = %d" % (environment_id)) max_policy_id = one_value(curs, "SELECT MAX(policy_id) FROM policies") max_client_id = one_value(curs, "SELECT MAX(client_id) FROM clients") active_policy_ids = lookup_table(curs, "SELECT name, policy_id FROM policies WHERE environment_id = %d AND active = 'Y'" % (environment_id)) global_policy_ids = lookup_table(curs, "SELECT CONCAT(name, LOWER(type)), policy_id FROM policies WHERE environment_id = %d" % (environment_id)) client_ids = lookup_table(curs, "SELECT name, client_id FROM clients WHERE environment_id = %d" % (environment_id)) retlevel_ids = lookup_table(curs, "SELECT retention_level, retlevel_id FROM retlevels WHERE environment_id = %d" % (environment_id)) image_statistics = ExtendedDict() client_count = 0 windows_count = 0 linux_count = 0 solaris_count = 0 aix_count = 0 osf_count = 0 bsd_count = 0 ndmp_count = 0 openvms_count = 0 other_count = 0 seen_hw = ExtendedDict() seen_os = ExtendedDict() other_hw = ExtendedDict() other_os = ExtendedDict() bpplclients_stream = bpplclients.stream(environment.updates[environment.master].fopen('bpplclients_daily', version=1)) for record in bpplclients_stream: client = bpplclients.parse(record) client_count += 1 seen_hw[client.hw] = True seen_os[client.os] = True if windows.match(client.os): windows_count += 1 elif linux.match(client.os) or linux.match(client.hw): linux_count += 1 elif solaris.match(client.os) or solaris.match(client.hw): solaris_count += 1 elif client.hw == 'NDMP' or client.os == 'NDMP': ndmp_count += 1 elif openvms.match(client.os) or openvms.match(client.hw): openvms_count += 1 elif aix.match(client.os): aix_count += 1 elif osf.match(client.os): osf_count += 1 elif bsd.match(client.os): bsd_count += 1 else: other_count += 1 other_hw[client.hw] = True other_os[client.os] = True INFO('client_count.......: %d' % (client_count)) INFO('windows_count......: %d' % (windows_count)) INFO('linux_count........: %d' % (linux_count)) INFO('solaris_count......: %d' % (solaris_count)) INFO('aix_count..........: %d' % (aix_count)) INFO('osf_count..........: %d' % (osf_count)) INFO('bsd_count..........: %d' % (bsd_count)) INFO('ndmp_count.........: %d' % (ndmp_count)) INFO('openvms_count......: %d' % (openvms_count)) INFO('other_count........: %d' % (other_count)) #INFO('seen_hw............: %s' % (', '.join(seen_hw.keys()))) #INFO('seen_os............: %s' % (', '.join(seen_os.keys()))) INFO('other_hw...........: %s' % (', '.join(other_hw.keys()))) INFO('other_os...........: %s' % (', '.join(other_os.keys()))) policy_count = 0 bppllist_stream = bppllist.stream(environment.updates[environment.master].fopen('bppllist_daily', version=1)) for record in bppllist_stream: policy = bppllist.parse(record) policy_count += 1 policy_name = policy.policy_name INFO('policy_count.......: %d' % (policy_count)) stunit_ids = lookup_table(curs, "SELECT name, stunit_id FROM stunits WHERE environment_id = %d" % (environment_id)) stunit_count = 0 bpstulist_stream = bpstulist.stream(environment.updates[environment.master].fopen('bpstulist', version=1)) for record in bpstulist_stream: stunit = bpstulist.parse(record) stunit_count += 1 INFO('stunit_count.......: %d' % (stunit_count)) # # process db size # netbackup_db_du_k_stream = du.stream(environment.updates[environment.master].fopen('netbackup_db_du_k', version=1)) db_size = -1 for record in netbackup_db_du_k_stream: dir = du.parse(record, tz=environment.tz) if dir.path == '/opt/openv/netbackup/db': db_size = dir.kbytes INFO('db_size............: %s' % (pp_kbytes(db_size, format='%.2f %s'))) # # process log size # netbackup_logs_du_k_stream = du.stream(environment.updates[environment.master].fopen('netbackup_logs_du_k', version=1)) log_size = -1 for record in netbackup_logs_du_k_stream: dir = du.parse(record, tz=environment.tz) if dir.path == '/opt/openv/netbackup/logs': log_size = dir.kbytes INFO('log_size...........: %s' % (pp_kbytes(log_size, format='%.2f %s'))) # # process jobs # job_count = 0 bpdbjobs_stream = bpdbjobs.stream(environment.updates[environment.master].fopen('bpdbjobs_most_columns_daily', version=1)) for job in environment.jobs: job_count += 1 INFO('job_count..........: %d' % (job_count)) # # process image list # bpimagelist_stream = bpimagelist.stream(environment.updates[environment.master].fopen('bpimagelist', version=1)) image_count = 0 copy_count = 0 fragment_count = 0 file_count = 0 image_data = 0 copy_data = 0 fragment_data = 0 full_data = 0 incr_data = 0 cinc_data = 0 ubak_data = 0 uarc_data = 0 compressed_data = 0 tir_data = 0 for record in bpimagelist_stream: image_count += 1 image = bpimagelist.parse(record, tz=environment.tz) if image_count % 1000 == 0: DEBUG('%d images processed..' % (image_count)) retlevel = image.retention_level_id policy = image.policy policy_name = image.policy policy_type = image.policy_type policy_key = '%s%s' % (policy_name, policy_type.lower().replace('_','-')) if policy_key not in global_policy_ids: max_policy_id += 1 policy_id = max_policy_id source = 'bpimagelist' active = 'N' sql = """ INSERT INTO policies (policy_id, environment_id, name, type, description, comment, source, commissioned, active, added, updated, seen) VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (policy_id, environment_id, policy_name, policy_type, '', '', source, BOT_ISO, active, TIMESTAMP_ISO, TIMESTAMP_ISO, TIMESTAMP_ISO) print sql curs.execute(sql) conn.commit() global_policy_ids[policy_key] = policy_id client = image.client client_name = image.client if client not in client_ids: max_client_id += 1 client_id = max_client_id source = 'bpimagelist' active = 'N' sql = """ INSERT INTO clients (client_id, environment_id, name, description, comment, source, commissioned, active, added, updated, seen) VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (client_id, environment_id, client_name, '', '', source, BOT_ISO, active, TIMESTAMP_ISO, TIMESTAMP_ISO, TIMESTAMP_ISO) print sql curs.execute(sql) conn.commit() client_ids[client] = client_id if policy_key not in image_statistics: image_statistics[policy_key] = ExtendedDict() if client not in image_statistics[policy_key]: image_statistics[policy_key][client] = ExtendedDict() retlevel = image.retention_level_id if retlevel not in image_statistics[policy_key][client]: image_statistics[policy_key][client][retlevel] = ExtendedDict() image_statistics[policy_key][client][retlevel]['image_count'] = 0 image_statistics[policy_key][client][retlevel]['copy_count'] = 0 image_statistics[policy_key][client][retlevel]['fragment_count'] = 0 image_statistics[policy_key][client][retlevel]['file_count'] = 0 image_statistics[policy_key][client][retlevel]['image_data'] = 0 image_statistics[policy_key][client][retlevel]['copy_data'] = 0 image_statistics[policy_key][client][retlevel]['fragment_data'] = 0 image_statistics[policy_key][client][retlevel]['full_data'] = 0 image_statistics[policy_key][client][retlevel]['incr_data'] = 0 image_statistics[policy_key][client][retlevel]['cinc_data'] = 0 image_statistics[policy_key][client][retlevel]['ubak_data'] = 0 image_statistics[policy_key][client][retlevel]['uarc_data'] = 0 image_statistics[policy_key][client][retlevel]['compressed_data'] = 0 schedule_type = image.schedule_type number_of_copies = image.number_of_copies number_of_fragments = image.number_of_fragments number_of_files = image.number_of_files kilobytes = image.kilobytes copy_count += number_of_copies fragment_count += number_of_fragments file_count += number_of_files image_data += kilobytes copy_data += kilobytes * number_of_copies image_statistics[policy_key][client][retlevel]['image_count'] += 1 image_statistics[policy_key][client][retlevel]['copy_count'] += number_of_copies image_statistics[policy_key][client][retlevel]['fragment_count'] += number_of_fragments image_statistics[policy_key][client][retlevel]['copy_count'] += number_of_copies image_statistics[policy_key][client][retlevel]['file_count'] += number_of_files image_statistics[policy_key][client][retlevel]['image_data'] += kilobytes image_statistics[policy_key][client][retlevel]['copy_data'] += kilobytes * number_of_copies copy_retlevels = ExtendedDict() for copy in image.copies: copy_number = copy.copy_number for fragment in copy.fragments: fragment_data += fragment.kilobytes if fragment.tir: tir_data += fragment.kilobytes continue if hasattr(fragment, 'retention_lvl_id'): copy_retlevels[copy_number] = fragment.retention_lvl_id if hasattr(fragment, 'expiration_time'): expiration_time = fragment.expiration_time if type(expiration_time) is datetime.datetime: pass copy_retlevel = copy_retlevels[copy_number] if copy_retlevel not in image_statistics[policy_key][client]: image_statistics[policy_key][client][copy_retlevel] = ExtendedDict() image_statistics[policy_key][client][copy_retlevel]['image_count'] = 0 image_statistics[policy_key][client][copy_retlevel]['copy_count'] = 0 image_statistics[policy_key][client][copy_retlevel]['fragment_count'] = 0 image_statistics[policy_key][client][copy_retlevel]['file_count'] = 0 image_statistics[policy_key][client][copy_retlevel]['image_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['copy_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['fragment_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['full_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['incr_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['cinc_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['ubak_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['uarc_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['compressed_data'] = 0 image_statistics[policy_key][client][copy_retlevel]['fragment_data'] += fragment.kilobytes if schedule_type == 'FULL': image_statistics[policy_key][client][copy_retlevel]['full_data'] += fragment.kilobytes elif schedule_type == 'INCR': image_statistics[policy_key][client][copy_retlevel]['incr_data'] += fragment.kilobytes elif schedule_type == 'CINC': image_statistics[policy_key][client][copy_retlevel]['cinc_data'] += fragment.kilobytes elif schedule_type == 'UBAK': image_statistics[policy_key][client][copy_retlevel]['ubak_data'] += fragment.kilobytes elif schedule_type == 'UARC': image_statistics[policy_key][client][copy_retlevel]['uarc_data'] += fragment.kilobytes if image.compressed == 'yes': image_statistics[policy_key][client][copy_retlevel]['compressed_data'] += fragment.kilobytes if schedule_type == 'FULL': full_data += kilobytes * number_of_copies elif schedule_type == 'INCR': incr_data += kilobytes * number_of_copies elif schedule_type == 'CINC': cinc_data += kilobytes * number_of_copies elif schedule_type == 'UBAK': ubak_data += kilobytes * number_of_copies elif schedule_type == 'UARC': uarc_data += kilobytes * number_of_copies if image.compressed == 'yes': compressed_data += kilobytes * number_of_copies environment.updates[environment.master].fclose('bpimagelist') INFO('image_count........: %d' % (image_count)) INFO('copy_count.........: %d' % (copy_count)) INFO('fragment_count.....: %d' % (fragment_count)) INFO('file_count.........: %d' % (file_count)) INFO('image_data.........: %s' % (pp_kbytes(image_data, format='%.2f %s'))) INFO('copy_data..........: %s' % (pp_kbytes(copy_data, format='%.2f %s'))) INFO('fragment_data......: %s' % (pp_kbytes(fragment_data, format='%.2f %s'))) INFO('full_data..........: %s' % (pp_kbytes(full_data, format='%.2f %s'))) INFO('incr_data..........: %s' % (pp_kbytes(incr_data, format='%.2f %s'))) INFO('cinc_data..........: %s' % (pp_kbytes(cinc_data, format='%.2f %s'))) INFO('ubak_data..........: %s' % (pp_kbytes(ubak_data, format='%.2f %s'))) INFO('uarc_data..........: %s' % (pp_kbytes(uarc_data, format='%.2f %s'))) INFO('compressed_data....: %s' % (pp_kbytes(compressed_data, format='%.2f %s'))) INFO('tir_data...........: %s' % (pp_kbytes(tir_data, format='%.2f %s'))) sql = "DELETE FROM environment_statistics_daily WHERE environment_id = %d AND day = '%s'" % (environment_id, PREV_BACKUP_DAY_ISO) print sql curs.execute(sql) sql = """ INSERT INTO environment_statistics_daily ( day, environment_id, client_count, windows_count, linux_count, solaris_count, aix_count, osf_count, bsd_count, ndmp_count, openvms_count, other_count, policy_count, stunit_count, db_size, log_size, image_count, copy_count, fragment_count, file_count, image_data, copy_data, fragment_data, full_data, incr_data, cinc_data, ubak_data, uarc_data, compressed_data, tir_data, added ) VALUES ( '%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, '%s' ) """ % ( PREV_BACKUP_DAY_ISO, environment_id, client_count, windows_count, linux_count, solaris_count, aix_count, osf_count, bsd_count, ndmp_count, openvms_count, other_count, policy_count, stunit_count, db_size, log_size, image_count, copy_count, fragment_count, file_count, image_data, copy_data, fragment_data, full_data, incr_data, cinc_data, ubak_data, uarc_data, compressed_data, tir_data, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') ) #print sql curs.execute(sql) conn.commit() sql = "DELETE FROM image_statistics_daily WHERE environment_id = %d AND day = '%s'" % (environment_id, PREV_BACKUP_DAY_ISO) print sql curs.execute(sql) for policy_key in image_statistics.keys(): policy_id = global_policy_ids[policy_key] for client in image_statistics[policy_key].keys(): client_id = client_ids[client] for retlevel in image_statistics[policy_key][client].keys(): retlevel_id = retlevel_ids[retlevel] sql = """ INSERT INTO image_statistics_daily ( day, environment_id, policy_id, client_id, retlevel_id, image_count, copy_count, fragment_count, file_count, image_data, copy_data, fragment_data, full_data, incr_data, cinc_data, ubak_data, uarc_data, compressed_data, added ) VALUES ( '%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, '%s' ) """ % ( PREV_BACKUP_DAY_ISO, environment_id, policy_id, client_id, retlevel_id, image_statistics[policy_key][client][retlevel]['image_count'], image_statistics[policy_key][client][retlevel]['copy_count'], image_statistics[policy_key][client][retlevel]['fragment_count'], image_statistics[policy_key][client][retlevel]['file_count'], image_statistics[policy_key][client][retlevel]['image_data'], image_statistics[policy_key][client][retlevel]['copy_data'], image_statistics[policy_key][client][retlevel]['fragment_data'], image_statistics[policy_key][client][retlevel]['full_data'], image_statistics[policy_key][client][retlevel]['incr_data'], image_statistics[policy_key][client][retlevel]['cinc_data'], image_statistics[policy_key][client][retlevel]['ubak_data'], image_statistics[policy_key][client][retlevel]['uarc_data'], image_statistics[policy_key][client][retlevel]['compressed_data'], datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') ) #print sql curs.execute(sql) conn.commit() # # unknown metric # else: pass #WARN('Unknown metric %s!' % (metric))