import csv import sys import datetime import re from ....builtins import * from ...exceptions import * from ...expressions import * from ...streams import * from ....dependencies.odict import OrderedDict csv.field_size_limit(2147483647) # increase csv field size limit; default is 131072 which can choke on some NBU jobs ## ## bpdbjobs has one record per-line ## def stream(stream): return NewLineStream(stream) ## ## NBU Job Types ## job_types = { '0' : 'backup', '1' : 'archive', '2' : 'restore', '3' : 'verify', '4' : 'duplication', # Changed to 'duplicate' in NBU 6.5 '5' : 'import', '6' : 'dbbackup', # Changed to 'catalog backup' in NBU 6.5 '7' : 'vault', '8' : 'label', '9' : 'erase', '10' : 'tpreq', # Changed to 'tape request' in NBU 6.5 '11' : 'tpclean', # Changed to 'clean' in NBU 6.5 '12' : 'tpformat', # Changed to 'format tape' in NBU 6.5 '13' : 'vmphyinv', # Changed to 'physical inventory' in NBU 6.5 '14' : 'dqts', # Changed to 'qualification' in NBU 6.5 '15' : 'dbrecover', # Not documented in NBU 6.5 '16' : 'mcontents', # Not documented in NBU 6.5 '17' : 'image_cleanup', # DSSU image cleanup } ## ## NBU Job States ## job_states = { '0' : 'queued', '1' : 'active', '2' : 'wait for retry', '3' : 'done', '4' : 'suspended', '5' : 'incomplete', '6' : '6', '7' : '7', } ## ## NBU Schedule Types ## schedule_types = { '0' : 'FULL', '1' : 'INCR', '2' : 'UBAK', '3' : 'UARC', '4' : 'CINC', } ## ## NBU Job Subtypes ## subtypes = { '0' : 'immediate', '1' : 'scheduled', '2' : 'user-initiated', '3' : 'quick erase', '4' : 'long erase', } ## ## NBU Retention Units ## retention_units = { '0' : 'Unknown', '1' : 'Days', } ## ## NBU Class / Policy Types ## ## http://seer.entsupport.symantec.com/docs/261264.htm ## class_types = { '0' : 'Standard', '1' : 'Proxy', # NetBackup internal setting '2' : 'Non-Standard', # NetBackup internal setting '3' : 'Apollo-wbak', # NetBackup DataCenter only '4' : 'Oracle', '5' : 'Any policy type', '6' : 'Informix-On-BAR', '7' : 'Sybase', '8' : 'MS-Sharepoint', # NetBackup Server / Enterprise Server only '9' : 'MS-Windows', # Not active, use MS-Windows-NT (13) for Windows 2000/NT/XP/2003 clients. '10' : 'NetWare', '11' : 'DataTools-SQL-BackTrack', '12' : 'Auspex-FastBackup', '13' : 'MS-Windows-NT', '14' : 'OS/2', '15' : 'MS-SQL-Server', '16' : 'MS-Exchange-Server', '17' : 'SAP', # NetBackup Server / Enterprise Server only '18' : 'DB2', # NetBackup Server / Enterprise Server only '19' : 'NDMP', '20' : 'FlashBackup', # NetBackup Server / Enterprise Server only '21' : 'Split-Mirror', # NetBackup Server / Enterprise Server only '22' : 'AFS', # NetBackup Server / Enterprise Server only '23' : 'DFS', # Not an active policy type. '24' : 'DataStore', '25' : 'Lotus-Notes', '26' : 'NCR-Teradata', # No longer supported, contact NCR to see if they can provide an extension. '27' : 'OpenVMS', '28' : 'MPE/iX', '29' : 'FlashBackup-Windows', # NetBackup Server / Enterprise Server only '30' : 'Vault', '31' : 'BE-MS-SQL-Server', '32' : 'BE-MS-Exchange-Server', '33' : 'Macintosh', # Not active, use Standard (0) for Macintosh OSX clients. '34' : 'Disk Staging', # NetBackup Server / Enterprise Server only '35' : 'NBU-Catalog', # NetBackup 6.0 only } policy_types = class_types ## ## Operation Types ## operation_types = { '0' : 'mount', '1' : 'position', '2' : 'connect', '3' : 'write', '4' : 'vault initialize', '5' : 'vault duplication', '6' : 'vault duplication complete', '7' : 'vault catalog backup', '8' : 'vault eject', '9' : '9', '10' : 'report', '11' : 'duplicate', '12' : 'import', '13' : 'verify', '14' : 'restore', '15' : 'catalog-backup', '16' : 'vault', '17' : 'label', '18' : 'erase', } ## ## Parse a bpdbjobs record ## ## bpdbjobs -report -all_columns ## def parse(record, format='bpdbjobs -report -all_columns', version=None, tz=None): if format in ['bpdbjobs -report -all_columns', 'bpdbjobs -report -most_columns']: record = record.replace('\r', '') # compensate for embedded carriage-returns record = csv.reader([record], escapechar='\\').next() i = 0 job = ExtendedDict() # # Leading Columns # job['jobid'] = int(record[0]) if re_integer.match(record[0]) else None job['jobtype'] = job_types[record[1]] if record[1] in job_types else None if job.jobtype == 'backup': job.backup = True else: job.backup = False if job.jobtype == 'duplication': job.duplication = True else: job.duplication = False job['state'] = job_states[record[2]] if record[2] in job_states else None if job.state == 'active': job.active = True else: job.active = False if job.state == 'queued': job.queued = True else: job.queued = False if job.state == 'done': job.done = True else: job.done = False job['status'] = int(record[3]) if re_integer.match(record[3]) else None #job['class'] = record[4] if record[4] else None job['policy'] = record[4] if record[4] else None job['schedule'] = record[5] if record[5] else None job['client'] = record[6] if record[6] else None job['server'] = record[7] if record[7] else None job['started'] = datetime.datetime.fromtimestamp(int(record[8]), tz) if re_integer.match(record[8]) else None job['elapsed'] = int(record[9]) if re_integer.match(record[9]) else None job['ended'] = datetime.datetime.fromtimestamp(int(record[10]), tz) if re_integer.match(record[10]) else None job['stunit'] = record[11] if record[11] else None job['try'] = int(record[12]) if re_integer.match(record[12]) else None job['operation'] = operation_types[record[13]] if record[13] in operation_types else None job['kbytes'] = long(record[14]) if re_integer.match(record[14]) else None job['files'] = long(record[15]) if re_integer.match(record[15]) else None job['pathlastwritten'] = record[16] if record[16] else None job['percent'] = int(record[17]) if re_integer.match(record[17]) else None job['jobpid'] = int(record[18]) if re_integer.match(record[18]) else None job['owner'] = record[19] if record[19] else None job['subtype'] = subtypes[record[20]] if record[20] in subtypes else None #job['classtype'] = class_types[record[21]] if record[21] in class_types else None job['policytype'] = policy_types[record[21]] if record[21] in policy_types else None job['schedule_type'] = schedule_types[record[22]] if record[22] in schedule_types else None job['priority'] = record[23] if record[23] else None job['group'] = record[24] if record[24] else None job['masterserver'] = record[25] if record[25] else None job['retentionunits'] = retention_units[record[26]] if record[26] in retention_units else None job['retentionperiod'] = record[27] if record[27] else None job['compression'] = record[28] if record[28] else None job['kbyteslastwritten'] = long(record[29]) if re_integer.match(record[29]) else None job['fileslastwritten'] = long(record[30]) if re_integer.match(record[30]) else None # # Files # filelistcount = int(record[31]) if re_integer.match(record[31]) else None filelist = [] i = 32 if filelistcount is not None and filelistcount > 0: for i in range(i, i + filelistcount): file = record[i] if file: filelist.append(file) i += 1 job['filelistcount'] = filelistcount job['filelist'] = filelist # # Tries # trycount = int(record[i]) if record[i] else None tries = [] if trycount: for t in range(trycount): Try = ExtendedDict() #Try = OrderedDict() #Try['trypid'] = record[i+1] if record[i+1] else None Try['pid'] = record[i+1] if record[i+1] else None #Try['trystunit'] = record[i+2] if record[i+2] else None Try['stunit'] = record[i+2] if record[i+2] else None #Try['tryserver'] = record[i+3] if record[i+3] else None Try['server'] = record[i+3] if record[i+3] else None #Try['trystarted'] = datetime.datetime.fromtimestamp(int(record[i+4]), tz) if re_integer.match(record[i+4]) else None Try['started'] = datetime.datetime.fromtimestamp(int(record[i+4]), tz) if re_integer.match(record[i+4]) else None Try['elapsed'] = int(record[i+5]) if re_integer.match(record[i+5]) else None #Try['tryended'] = datetime.datetime.fromtimestamp(int(record[i+6]), tz) if re_integer.match(record[i+6]) else None Try['ended'] = datetime.datetime.fromtimestamp(int(record[i+6]), tz) if re_integer.match(record[i+6]) else None #Try['trystatus'] = int(record[i+7]) if re_integer.match(record[i+7]) else None Try['status'] = int(record[i+7]) if re_integer.match(record[i+7]) else None #Try['trystatusdescription'] = record[i+8] if record[i+8] else None Try['statusdescription'] = record[i+8] if record[i+8] else None trystatuscount = int(record[i+9]) if re_integer.match(record[i+9]) else None trystatuslines = [] i += 10 # # Process status lines if there are any # if trystatuscount is not None and trystatuscount > 0: for i in range(i, i + trystatuscount): trystatuslines.append(record[i]) #Try['trystatuscount'] = trystatuscount Try['statuscount'] = trystatuscount #Try['trystatuslines'] = trystatuslines Try['statuslines'] = trystatuslines #Try['trybyteswritten'] = long(record[i+1]) if re_integer.match(record[i+1]) else None #Try['byteswritten'] = long(record[i+1]) if re_integer.match(record[i+1]) else None #Try['trykbyteswritten'] = long(record[i+1]) if re_integer.match(record[i+1]) else None #Try['kbyteswritten'] = long(record[i+1]) if re_integer.match(record[i+1]) else None #Try['trykbytes'] = long(record[i+1]) if re_integer.match(record[i+1]) else None Try['kbytes'] = long(record[i+1]) if re_integer.match(record[i+1]) else None #Try['tryfileswritten'] = long(record[i+2]) if re_integer.match(record[i+2]) else None Try['fileswritten'] = long(record[i+2]) if re_integer.match(record[i+2]) else None tries.append(Try) # # Only increment by 1 if there were no status lines # if trystatuscount is not None and trystatuscount > 0: i += 2 else: i += 1 job['trycount'] = trycount job['tries'] = tries # # Trailing Columns # remaining = len(record[i+1:]) job['parentjob'] = record[i+1] if record[i+1] else None job['kbpersec'] = record[i+2] if record[i+2] else None job['copy'] = record[i+3] if record[i+3] else None job['robot'] = record[i+4] if record[i+4] else None job['vault'] = record[i+5] if record[i+5] else None job['profile'] = record[i+6] if record[i+6] else None job['session'] = record[i+7] if record[i+7] else None job['ejecttapes'] = record[i+8] if record[i+8] else None job['srcstunit'] = record[i+9] if record[i+9] else None job['srcserver'] = record[i+10] if record[i+10] else None job['srcmedia'] = record[i+11] if record[i+11] else None job['dstmedia'] = record[i+12] if record[i+12] else None job['stream'] = record[i+13] if record[i+13] else None job['suspendable'] = record[i+14] if record[i+14] else None job['resumable'] = record[i+15] if record[i+15] else None job['restartable'] = record[i+16] if record[i+16] else None job['datamovement'] = record[i+17] if record[i+17] else None job['frozenimage'] = record[i+18] if record[i+18] else None job['backupid'] = record[i+19] if record[i+19] else None job['killable'] = record[i+20] if record[i+20] else None job['controllinghost'] = record[i+21] if record[i+21] else None # # DEBUG #j = 0 #for key in job.keys(): # j += 1 # print '%2d: %20s: %s' % (j, key, job[key]) return job else: raise ParseError, 'Unknown format %s' % (format)