1#!/usr/bin/env python3
   2
   3"""Low level database options.
   4
   5Functions:
   6
   71) Show information on available database connections
   82) Database console access
   93) Execute SQL statements from commands line
  104) Execute SQL statements from a file
  115) Dump/restore tables to CSV file(s)
  126) Generate Basic User Table Statistics report.
  13
  14"""
  15
  16import os
  17import sys
  18import csv
  19import tty
  20import logging
  21import termios
  22import pkg_resources
  23from datetime import datetime
  24from collections import defaultdict
  25from collections import Counter
  26from operator import itemgetter
  27from fnmatch import fnmatch
  28
  29from chart.common.path import Path
  30from chart.common.ip import is_ip_address
  31from chart.common.ip import lookup_ip
  32from chart.common.ip import ping
  33from chart.common.ip import reverse_ip
  34from chart.common.ip import UnknownServer
  35from chart.common.prettyprint import Table
  36from chart.db.model.table import TableInfo
  37from chart.db.model.table import TableType
  38from chart.common.util import creatable
  39from chart.db import ts
  40from chart.db.connection import db_connect
  41from chart.db.exceptions import ConnectionError
  42from chart.db.exceptions import DatabaseError
  43from chart.db.settings import DatabaseEngine
  44from chart.project import settings
  45from chart.project import SID
  46from chart.common.traits import Datatype
  47from chart.db.model.table import TableType
  48from chart.db.model.table import TableStorage
  49from chart.db.exceptions import DuplicateDataError
  50from chart.db.exceptions import IntegrityError
  51from chart.common.xml import datetime_to_xml
  52from chart.common.xml import xml_to_datetime
  53from chart.common.traits import name_of_thing
  54from chart.common.args import ArgumentParser
  55from chart import settings
  56from chart.db.model.table import TableInfo
  57from chart.db.connection import db_connect
  58from chart.db.settings import DatabaseEngine
  59
  60logger = logging.getLogger()
  61
  62
  63def keyboard_confirm(message='Confirm?',
  64                     cancel_message='Cancelled'):
  65    """Let the user confirm an action by pressing 'y'.
  66
  67    Anything else cancels the action."""
  68
  69    old_attrs = termios.tcgetattr(sys.stdin)
  70    print('{message} (n) '.format(message=message), end='')
  71    sys.stdout.flush()
  72    tty.setraw(sys.stdin)
  73    res = sys.stdin.read(1)
  74    termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_attrs)
  75    print(res)
  76    if len(res) > 0 and (res[0] == '1' or res[0].lower() == 'y'):
  77        return True
  78
  79    else:
  80        print(cancel_message)
  81        return False
  82
  83
  84# def dump(target, table, sid, start_time, stop_time):  # (unused arg) pylint:disable=W0613
  85#     """Write all values from `table` for `sid` between `start_time` and `stop_time`
  86#     to handle `target` as a CSV file.
  87#     Should handle non-TS tables
  88#     The first line gives the field names.
  89#     """
  90
  91#     logger.info('Dump {table} to {target} for {sid} from {start} to {stop}'.format(
  92#         table=table, target=target, sid=sid, start=start_time, stop=stop_time))
  93
  94#     # this is weird we do it earlier but it gets reset to 10 somewhere
  95#     logger.setLevel(logging.CRITICAL)
  96#     # it might be nice to direct logging to stderr instead but I don't want to plough through
  97#     # the morass of the logging module internals
  98
  99#     # basically a comment
 100#     logger.info('Dump table {t} source {s} from {start} to {stop}'.format(
 101#             t=table, s=sid, start=start_time, stop=stop_time))
 102
 103#     if isinstance(target, Path):
 104#         handle = target.open('w')
 105
 106#     else:
 107#         handle = target
 108
 109#     # pull the fields list from db meta
 110#     # using xml definitions would be better because of column ordering and excluding removed
 111#     # columns
 112#     # no support for non-oracle engines
 113
 114#     # handle.write(table + '\n')
 115#     csv_writer = csv.writer(handle, dialect='excel')
 116
 117#     db_conn = db_connect()  # must work with stats table)
 118
 119#     conds = [sid.sql_where()]
 120#     binds = {}
 121#     if start_time is not None:
 122#         conds.append('SENSING_TIME>=:start_time')
 123#         binds['start_time'] = start_time
 124
 125#     if stop_time is not None:
 126#         conds.append('SENSING_TIME>=:start_time')
 127#         binds['start_time'] = start_time
 128
 129#     if stop_time is not None:
 130#         conds.append('SENSING_TIME<:stop_time')
 131#         binds['stop_time'] = stop_time
 132
 133#     sql = 'SELECT * FROM {table} WHERE {conds} ORDER BY sensing_time'.format(
 134#         table=table, conds=' AND '.join(conds))
 135#     cur = db_conn.query(sql, binds)
 136#     # print(cur.description)
 137#     # return
 138
 139#     meta = ['table:{table}'.format(table=table.upper()), 'sid:{sid}'.format(sid=sid)]
 140#     if start_time is not None:
 141#         meta.append('start:{start}'.format(start=datetime_to_xml(start_time)))
 142
 143#     if stop_time is not None:
 144#         meta.append('stop:{stop}'.format(stop=datetime_to_xml(stop_time)))
 145
 146#     csv_writer.writerow([' '.join(meta)])
 147
 148#     csv_writer.writerow([r[0] for r in cur.description if r[0] != 'DURATION'])
 149
 150#     import cx_Oracle
 151#     for row in cur:
 152#         out = []
 153#         for desc, value in zip(cur.description, row):
 154#             if desc[1] is cx_Oracle.STRING:
 155#                 out.append("'{v}'".format(v=value))
 156
 157#             elif desc[1] is cx_Oracle.TIMESTAMP:
 158#                 out.append("timestamp'{v}'".format(v=value))
 159
 160#             elif desc[1] is cx_Oracle.INTERVAL:
 161#                 # DURATION in stats tables
 162#                 # out.append('NULL')
 163#                 # we strip out DURATION values from stats tables because a) they are not needed
 164#                 # and b) Oracle weirdness
 165#                 # -- actually Oracle weirdness might just be DURATION doesn't exist in DEV database
 166#                 pass
 167
 168#             elif value is None:
 169#                 out.append('NULL')
 170
 171#             else:
 172#                 out.append(value)
 173
 174#         if cur.rowcount % 500 == 0:
 175#             print('Processed {cc} rows'.format(cc=cur.rowcount), file=sys.stderr)
 176
 177#         csv_writer.writerow(out)
 178
 179#     logger.info('Written {rows} rows to {f}'.format(rows=cur.rowcount,
 180                                                     # f=str(target)))
 181
 182
 183def restore(table_name, path):
 184    if table_name is None:
 185        table_name = path.stem
 186
 187    table_name = name_of_thing(table_name)
 188
 189    db_conn = db_connect()
 190
 191    db_table = None
 192    for t in db_conn.all_tables():
 193        if table_name.lower() == t.name.lower():
 194            db_table = t
 195
 196    if db_table is None:
 197        raise ValueError('Could not find table {t} in database'.format(t=table_name))
 198
 199    db_conn.read_table(db_table)
 200    # for f in db_table.fields:
 201        # print(f)
 202
 203    logger.info('Restoring to {t}'.format(t=table_name))
 204    c = 1
 205    ins_cur = None
 206    fieldnames = None
 207    for line in csv.reader(path.open('r'), dialect='excel'):
 208        # print(line)
 209        if ins_cur is None:
 210            fieldnames = line
 211            sql = 'INSERT INTO {table} ({fields}) VALUES ({values})'.format(
 212                table=db_table.name,
 213                fields=', '.join(fieldnames),
 214                values=', '.join(':{i}'.format(i=i) for i in range(1, len(fieldnames) + 1)))
 215            # print(sql)
 216            ins_cur = db_conn.prepared_cursor(sql)
 217            continue
 218
 219        # data = [db_conn.imp().str_to_ins(db_table.fields[f], i) for f, i in zip(fieldnames, line)]
 220        data = []
 221        for fieldname, cell in zip(fieldnames, line):
 222            if fieldname == 'SENSING_TIME':
 223                data.append(cell)
 224
 225            elif fieldname == 'SID_NUM':
 226                data.append(cell)
 227
 228            else:
 229                # print(db_table.fields.keys())
 230                data.append(db_conn.imp().str_to_ins(db_table.fields[fieldname.lower()], cell))
 231
 232        ins_cur.execute(None, data)
 233        # logger.debug('insert ' + str(data))
 234        db_conn.commit()
 235        c += 1
 236        if c % 100 == 0:
 237            logger.info('Inserted {cc} rows'.format(cc=c))
 238
 239    logger.info('Finished {cc} rows'.format(cc=c))
 240
 241
 242# def restore(handle):
 243#     csv_reader = csv.reader(handle, dialect='excel')
 244#     csv_iter = iter(csv_reader.__iter__())
 245#     # csv_iter = iter(csv_reader)
 246
 247#     table = None
 248#     start_time = None
 249#     stop_time = None
 250#     sid = None
 251#     for kv in next(csv_iter)[0].split(' '):
 252#         key, _, value = kv.partition(':')
 253#         # meta[key] = value
 254#         if key == 'table':
 255#             table = value
 256
 257#         elif key == 'sid':
 258#             sid = SID.from_string(value)
 259
 260#         elif key == 'start':
 261#             start_time = xml_to_datetime(value)
 262
 263#         elif key == 'stop':
 264#             stop_time = xml_to_datetime(value)
 265
 266#     # if not table.endswith('_STATS'):
 267#         # ts.delete(table=table, sid=sid, sensing_start=start, sensing_stop=stop)
 268
 269#     db_conn = db_connect()  # table)
 270#     conds = [sid.sql_where()]
 271#     binds = {}
 272#     if start_time is not None:
 273#         conds.append('SENSING_TIME>=:start_time')
 274#         binds['start_time'] = start_time
 275
 276#     if stop_time is not None:
 277#         conds.append('SENSING_TIME>=:start_time')
 278#         binds['start_time'] = start_time
 279
 280#     if stop_time is not None:
 281#         conds.append('SENSING_TIME<:stop_time')
 282#         binds['stop_time'] = stop_time
 283
 284#     sql = 'DELETE FROM {table} WHERE {conds}'.format(table=table, conds=' AND '.join(conds))
 285#     logging.info('SQL {sql} BIND {bind}'.format(sql=sql, bind=binds))
 286#     db_conn.execute(sql, binds)
 287#     # db_conn.commit()
 288
 289#     # fields = ', '.join("'{f}'".format(f=f) for f in csv_iter.next())
 290#     fields = ', '.join("{f}".format(f=f) for f in next(csv_iter))
 291#     # print('fields ' + fields)
 292
 293#     BATCH = 20
 294#     COMMIT = 1000
 295
 296#     # see http://stackoverflow.com/questions/39576/best-way-to-do-multi-row-insert-in-oracle
 297#     # for source of this weird SQL
 298
 299#     # Oracle weirdness - the batched-up insert method is really fast on our DEV database,
 300#     # but unuseably slow on VAL. However running individual INSERT statements is fine
 301#     # on VAL.
 302#     # Update: ... or maybe not
 303
 304#     rows = []
 305
 306#     for cc, row in enumerate(csv_reader, 1):
 307#         rows.append('SELECT {values} FROM DUAL'.format(
 308#             values=', '.join('\'NaN\'' if r == 'nan' else r for r in row)))
 309#         if cc % BATCH == 0:
 310#             # logging.debug('Begin flush of {batch} count {cc}'.format(batch=BATCH, cc=cc))
 311#             sql = 'INSERT INTO {table} ({fields}) {rows}'.format(
 312#                 table=table,
 313#                 fields=fields,
 314#                 rows=' UNION ALL '.join(rows))
 315#             db_conn.query(sql)
 316#             logging.info('Flushed {batch} count {cc}'.format(batch=BATCH, cc=cc))
 317#             rows = []
 318
 319#         if cc % COMMIT == 0:
 320#             logging.debug('Precomit')
 321#             db_conn.commit()
 322#             logging.debug('Postcomit')
 323
 324#     db_conn.commit()
 325#     logging.info('Wrote {cc} rows'.format(cc=cc))
 326
 327
 328# def restore_orb_histo(source, fieldnames, types):
 329#     """Special ingester for ORB_HISTO.
 330#     This is not good design, with a bit more care restore() could be rewritten
 331#     to handle SYS tables as well as TS.
 332#     """
 333
 334#     print(fieldnames)
 335#     print(types)
 336#     ins_cur = ts.insert(table='ORB_HISTO', fields=fieldnames)
 337#     for row in source:
 338#         print(row)
 339#         ins_cur.execute(None, [row[0], datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')] +
 340#                         [float(f) for f in row[2:]])
 341
 342#     # db_conn.commit()
 343
 344
 345# def restore(target):
 346#     """Read rows from `target` into a table.
 347#     The header consists of one line giving table name then one line giving field names.
 348#     The data rows each give sid, sensing_time then fields.
 349#     `target` may be a filename or file like object.
 350
 351#     Example:
 352
 353#     ADCS_MSG
 354#     ADCSMSG_MESS_OK,ADCSMSG_MESS_ERROR
 355#     'M02',2010-10-01T06:00:00,1,2
 356#     ...
 357
 358#     """
 359
 360#     if isinstance(target, basestring):
 361#         if os.path.splitext(target)[1] == '.bz2':
 362#             import bz2
 363#             handle = bz2.BZ2File(target)
 364
 365#         else:
 366#             handle = open(target, 'rb')
 367
 368#     elif isinstance(target, Path):
 369#         handle = target.open('rb')
 370
 371#     else:
 372#         handle = target
 373
 374#     # read table name from first line
 375#     table_name = handle.readline().strip()
 376#     table_info = TableInfo(table_name)
 377#     logger.info('Reading into table {t}'.format(t=table_info.name))
 378
 379#     c = csv.reader(handle, dialect='excel')
 380
 381#     fieldnames = c.next()
 382#     # print('names ' + str(fieldnames))
 383#     logger.info('Header has {cc} field names'.format(cc=len(fieldnames)))
 384
 385#     if table_info.table_type is TableType.SYS:
 386#         db_conn = db_connect(table_info)
 387#         db_conn.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
 388#         cc = 0
 389#         for row in target:
 390#             sql = 'INSERT INTO {t} ({f}) VALUES ({v})'.format(
 391#                 t=table_info.name,
 392#                 f=', '.join(fieldnames),
 393#                 v=', '.join('\'{r}\''.format(r=r) for r in row.split(',')))
 394#             # print(sql)
 395#             try:
 396#                 db_conn.execute(sql)
 397#             except IntegrityError:
 398#                 # ok we should handle duplicates here. However for the current task it's not needed
 399#                 # (we can skip over them) so I'm leaving it as this
 400#                 # print('dup')
 401#                 pass
 402
 403#             cc += 1
 404
 405#         db_conn.commit()
 406#         logging.info('Inserted {cc} rows'.format(cc=cc))
 407#         return
 408
 409#     def conv_float(s):
 410#         """Convert `s` to float, or None if empty."""
 411#         if len(s) == 0:
 412#             return None
 413
 414#         else:
 415#             return float(s)
 416
 417#     def conv_int(s):
 418#         """Convert `s` to int, or None if empty."""
 419#         if len(s) == 0:
 420#             return None
 421
 422#         else:
 423#             return int(s)
 424
 425#     # Get the datatype (only integer, float handled) for each field
 426#     types = []
 427#     for f in fieldnames[2:]:
 428#         if table_info.fields[f].datatype is Datatype.FLOAT:
 429#             types.append(conv_float)
 430
 431#         else:
 432#             types.append(conv_int)
 433
 434#     if table_name.upper() == 'ORB_HISTO':
 435#         return restore_orb_histo(c, fieldnames, types)
 436
 437#     db_conn = db_connect(table_info)
 438#     ins_cur = db_conn.prepared_cursor('INSERT INTO {table} ({fields}) VALUES ({binds})'.format(
 439#             table=table_info.name,
 440#             fields=','.join(fieldnames),
 441#             binds=','.join(':' + str(i) for i in xrange(len(fieldnames)))))
 442#     db_conn.setinputsizes(ins_cur, [str, datetime] + [int] * (len(fieldnames) - 2))
 443
 444#     if table_info.is_timeseries:
 445#         del_cur = ts.prepare_delete(table=table_info, sensing_time='sensing_time')
 446
 447#     else:
 448#         del_cur = None
 449
 450#     # if "MHS_DC_OFF_VALID_FL" in fieldnames:
 451#         # print 'MHS_DC_OFF_VALH1 # ', fieldnames.index("MHS_DC_OFF_VAL_H2")
 452
 453#     first_time = None
 454#     last_time = None
 455#     resets = 0
 456#     drops = 0
 457#     inserts = 0
 458#     for row in c:
 459#         # print len(fieldnames), fieldnames[652], type(fieldnames[652]),
 460#         # len(fieldnames[652]), 'x'+row[652]+'x'
 461#         if len(fieldnames) >= 652 and fieldnames[652] == 'MHS_OBT' and \
 462#                 (row[652] is None or len(row[652]) == 0):
 463
 464#             # logger.debug('Setting MHS_OBT to 1')
 465#             row[652] = '1'
 466#             resets += 1
 467
 468#         # drop rows with empty MHS_DC_OFF_VALID_FL
 469#         if len(fieldnames) > 43 and fieldnames[43] == 'MHS_DC_OFF_VALID_FL' and len(row[43]) == 0:
 470#             drops += 1
 471#             continue
 472
 473#         if len(fieldnames) > 46 and fieldnames[46] == 'MHS_DC_OFF_VAL_H1' and len(row[46]) == 0:
 474#             drops += 1
 475#             continue
 476
 477#         if len(fieldnames) > 47 and fieldnames[47] == 'MHS_DC_OFF_VAL_H2' and len(row[47]) == 0:
 478#             drops += 1
 479#             continue
 480
 481#         if len(fieldnames) > 48 and fieldnames[48] == 'MHS_DC_OFF_VAL_H3' and len(row[48]) == 0:
 482#             drops += 1
 483#             continue
 484
 485#         if len(fieldnames) > 49 and fieldnames[49] == 'MHS_DC_OFF_VAL_H4' and len(row[49]) == 0:
 486#             drops += 1
 487#             continue
 488
 489#         if len(fieldnames) > 50 and fieldnames[50] == 'MHS_DC_OFF_VAL_H5' and len(row[50]) == 0:
 490#             drops += 1
 491#             continue
 492
 493#         # drop rows containing nulls
 494#         # print 'testing ', row
 495#         null = False
 496#         for r in row:
 497#             # print 't x'+str(r)+'x l' + str(len(r))
 498#             if len(r) == 0:
 499#                 null = True
 500
 501#         # print 'Null ', null
 502#         if null:
 503#             drops += 1
 504#             # print 'dropline'
 505#             continue
 506
 507#         # else:
 508#             # print 'not drop line'
 509
 510#         sid = SID(row[0])
 511#         if '.' in row[1]:
 512#             sensing_time = datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S.%f')
 513
 514#         else:
 515#             sensing_time = datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')
 516
 517#         if del_cur is not None:
 518#             # print(del_cur.statement)
 519#             del_cur.execute(None, scid=sid.scid, sensing_time=sensing_time)
 520
 521#         ins_cur.execute(None,
 522#                         [sid.scid, sensing_time] + [conv(i) for conv, i in zip(types, row[2:])])
 523#         if first_time is None:
 524#             first_time = sensing_time
 525
 526#         last_time = sensing_time
 527#         inserts += 1
 528
 529#     db_conn.commit()
 530#     logger.info('Inserted {cc} rows from {fr} to {to}'.format(cc=inserts,
 531#                                                            fr=first_time,
 532#                                                            to=last_time))
 533
 534#     if resets > 0:
 535#         logger.info('Reset {resets} values from null'.format(resets=resets))
 536
 537#     if drops > 0:
 538        # logger.info('Dropped {drops} rows'.format(drops=drops))
 539
 540
 541def find_exe(name):
 542    """Locate path for executable `name`."""
 543    # List of directories to search for `exe`
 544    search_dirs = [
 545        '/opt/oracle/is32',
 546        '/usr/bin',
 547        '/usr/lib/oracle/xe/app/oracle/product/10.2.0/server/bin',
 548        ] + os.environ['PATH'].split(':')
 549
 550    for d in search_dirs:
 551        if d.endswith('commands'):
 552            continue
 553
 554        exe = os.path.join(d, name)
 555        if os.path.exists(exe) and os.access(exe, os.X_OK):
 556            return exe
 557
 558    return None
 559
 560
 561def launch_sqlplus(script=None, echo=False):
 562    """If Oracle is the configured database, launch `sqlplus`.
 563    Optionally execute `file`.
 564    We attempt to use `rlwrap` to give readline support to tool.
 565    """
 566
 567    print('Launch sqlplus. For better tables enter "set lin 130" and "set pagesize 50000"')
 568
 569    # locate the sqlplus binary from the Oracle client
 570    sqlplus = find_exe('sqlplus')
 571
 572    if sqlplus is None:
 573        raise IOError('Cannot locate Oracle sqlplus executable')
 574
 575    if sqlplus == '/opt/oracle/is32/sqlplus':
 576        # running on TCE AIX machine
 577        os.environ['LIBPATH'] = '/opt/oracle/is32:' + os.environ.get('LIBPATH', '')
 578
 579    #print sqlplus
 580
 581    params = settings.DATABASES['default']
 582<<<hidden due to potential security issue>>>
 583
 584    # construct command line arguments
 585<<<hidden due to potential security issue>>>
 586            user=params['USER'],
 587<<<hidden due to potential security issue>>>
 588            host=params['HOST'],
 589            name=params['NAME'])]
 590
 591    # Oracle doesn't like SYS to connect as an ordinary user
 592    if params.get('DBA'):
 593        args += ['AS', 'SYSDBA']
 594
 595    # add any extra options from the user such as @script.sql
 596    # args += sys.argv[1:]
 597
 598    if script is not None:
 599        args += ['@' + script]
 600
 601    # look for rlwrap
 602    rlwrap = find_exe('rlwrap')
 603    if rlwrap is None:
 604        exe = sqlplus
 605
 606    else:
 607        exe = rlwrap
 608        args.insert(0, rlwrap)
 609
 610    print('Accessing {name} as {user} on {host}'.format(
 611        name=params['NAME'],
 612        user=params['USER'],
 613        host=params['HOST']))
 614
 615    if echo:
 616        # print(str(exe) + ' ARGS ' + str(args))
 617        print('> {cmd}'.format(cmd=' '.join(args)))
 618
 619    try:
 620        os.execv(exe, args)
 621    except Exception:
 622        print('Cannot access {exe} {args}'.format(exe=exe, args=args))
 623
 624
 625def launch_psql(script=None):
 626    """If PostgreSQL is configured as the database, launch `psql`.
 627    Run `script` if passed."""
 628
 629<<<hidden due to potential security issue>>>
 630    # so we use an expect script
 631    EXE = pkg_resources.resource_filename(__name__, 'psql.expect')
 632
 633    db_conn = db_connect()
 634    cmd = [EXE,
 635           db_conn.params['HOST'],
 636           db_conn.params['PORT'],
 637           db_conn.params['NAME'],
 638           db_conn.params['USER'],
 639<<<hidden due to potential security issue>>>
 640
 641    if script is not None:
 642        cmd.extend(['-f', script])
 643
 644    # check_postgres -H 127.0.0.1 -p 5432 --dbname=chart --dbuser=developer
 645    # --dbpass=Jut8ThU6rEsw --action=dbstats
 646
 647    os.execv(EXE, cmd)
 648
 649
 650def launch_mysql(script=None):    # mja - inserted for mysql
 651    """If MYSQL is configured as the database, launch `mysql`.
 652    Run `script` if passed."""
 653
 654    EXE = '/usr/bin/mysql'
 655
 656    db_conn = db_connect()
 657    cmd = [EXE,
 658           db_conn.params['HOST'],
 659           db_conn.params['PORT'],
 660           db_conn.params['NAME'],
 661           db_conn.params['USER'],
 662<<<hidden due to potential security issue>>>
 663
 664    if script is not None:
 665        cmd.extend(['-f', script])
 666
 667    # check_postgres -H 127.0.0.1 -p 5432 --dbname=chart --dbuser=developer --dbpass=Jut8ThU6rEsw
 668        # --action=dbstats
 669
 670    os.execv(EXE, cmd)
 671
 672
 673def launch_sqlite(script=None):
 674    """Launch sqlite3 CLI. Run `script` if passed."""
 675    EXE = '/usr/bin/sqlite3'
 676
 677    cmd = [EXE, str(settings.DATABASES['default']['NAME'])]
 678
 679    if script is not None:
 680        cmd.extend(['-f', script])
 681
 682    os.execv(EXE, cmd)
 683
 684
 685def show_database(db_name):
 686    """Show information on a named database connection."""
 687
 688    t = Table(title='Primary connection details', title_underline=None)
 689    t.append(('ID', db_name))
 690
 691    if db_name not in settings.DATABASES:
 692        raise ValueError('No such database {name}'.format(name=db_name))
 693
 694<<<hidden due to potential security issue>>>
 695<<<hidden due to potential security issue>>>
 696
 697    for k, v in settings.DATABASES[db_name].items():
 698        # print(k, v)
 699
 700        if isinstance(v, dict):
 701            # there can be an EXTRAS dict
 702            v = str(v)
 703
 704        if k == 'HOST':
 705            if is_ip_address(v):
 706                v = '{v} ({host})'.format(v=v, host=reverse_ip(v))
 707
 708            else:
 709                try:
 710                    v = '{v} ({addr})'.format(v=v, addr=lookup_ip(v))
 711                except UnknownServer:
 712                    v = '{v} (unknown)'.format(v=v)
 713
 714<<<hidden due to potential security issue>>>
 715            if isinstance(v, str):
 716                v = 'xxx'
 717
 718            else:
 719                v = v.describe()
 720
 721        t.append((k, v))
 722
 723    t.write()
 724
 725    if len(settings.DB_SPLIT_TABLES) > 0:
 726        local = Table(title='Local tables',
 727                      headings=('Table', 'Connection'),
 728                      title_underline=None,
 729                      indent='  ')
 730        for k in sorted(settings.DB_SPLIT_TABLES.keys()):
 731            v = settings.DB_SPLIT_TABLES[k]
 732            local.append((k, v))
 733
 734        local.write()
 735
 736    else:
 737        print('No split tables\n')
 738
 739    t = Table(title='Testing database', title_underline=None, indent='  ')
 740
 741    if settings.DATABASES[db_name]['ENGINE'] == 'django.db.backends.sqlite3':
 742        # it its an sqlite database then check the file exists
 743        # t.append(('Database file', settings.DB_AVAILABLE[db_name]['NAME']))
 744        filename = settings.DATABASES[db_name]['NAME']
 745        t.append(('Path', filename))
 746        t.append(('File exists', Path(filename).exists()))
 747        t.append(('File can be created', creatable(Path(filename))))
 748
 749    else:
 750        # otherwise check the host and service exists on the server
 751        # settings.DB_AVAILABLE[db_name]['HOST'] += 'x'
 752        ping_res = ping(settings.DATABASES[db_name]['HOST'])
 753        # print('ping res ' + str(ping_res))
 754        t.append(('Ping server {host}'.format(host=settings.DATABASES[db_name]['HOST']),
 755                  'ok' if ping_res is True else ping_res))
 756
 757        db_conn = db_connect()
 758        try:
 759            db_conn.query('SELECT 0 FROM dual').fetchall()
 760            db_res = 'ok'
 761
 762        except ConnectionError as e:
 763            db_res = str(e)
 764
 765        except DatabaseError as e:
 766            db_res = str(e)
 767
 768        t.append(('Test database availability', db_res))
 769
 770    t.write()
 771
 772
 773def list_databases():
 774    """Display a table showing names and descriptions of all configured databases."""
 775    t = Table(title='Database sources',
 776              headings=('Name', 'Default', 'Description'))
 777              # header_sep='-',
 778              # header_column_sep=' | ',
 779              # cross='-^-')
 780    # t = Table(headings=('Name', 'Description'))#, column_sep=' | ',header_sep='-', cross='-+-')
 781    for dbname in list(settings.DATABASES.keys()):
 782        if dbname == 'default':
 783            continue
 784
 785        description = settings.DATABASES[dbname].get('DESCRIPTION', '')
 786        in_use = ''
 787        if settings.DATABASES['default']['DB_NAME'] == dbname:
 788            # description += ' (in use)'
 789            in_use = 'Yes'
 790
 791        t.append((dbname, in_use, description))
 792
 793    t.write()
 794    show_overrides()
 795
 796
 797def show_overrides():
 798    """Display a list of tables which are accessed from alternate data sources."""
 799
 800    if len(settings.DB_SPLIT_TABLES) == 0:
 801        print('No table overrides in use')
 802
 803    else:
 804        t = Table(title='The following tables are accessed via alternative sources',
 805                  headings=('Table', 'Source'))
 806        for k, v in settings.DB_SPLIT_TABLES.items():
 807            t.append((k, v))
 808
 809        t.write()
 810
 811
 812def sql(statement, no_ms=None, kv=None, pp=None, no_prompt=False, no_head=False):
 813    """Execute a SELECT statement and show results in various forms.
 814
 815    Args:
 816        `statement` (str): SQL to run
 817        `kv` (bool): Invert columns and rows.
 818        `no_ms` (bool): No milliseconds.
 819        `pp` (bool): Rotate output?
 820        `no_prompt` (bool): Suppress "are you sure?" prompt if given
 821        `no_head` (bool): Hide the header row in csv output
 822    """
 823
 824    db_conn = db_connect()
 825
 826    if statement.endswith(';'):
 827        statement = statement[:-1]
 828
 829    try:
 830        cur = db_conn.query(statement)
 831
 832    except DatabaseError as e:
 833        print(e)
 834        return
 835
 836    if cur is None:
 837        raise ValueError('Query returned None object')
 838
 839    if cur.description is None:
 840        if no_prompt:
 841            db_conn.commit()
 842            print('Commited {cc} changes'.format(cc=cur.rowcount))
 843
 844        else:
 845            # could be a DDL command
 846            print('Command executed successfully with rowcount of {cc}'.format(
 847                    cc=cur.rowcount))
 848            if keyboard_confirm('Commit?'):
 849                print('Committing')
 850                db_conn.commit()
 851
 852            else:
 853                print('Not committing')
 854
 855        return
 856
 857    if not kv and not pp:
 858        f = csv.writer(sys.stdout, dialect='excel')
 859        if not no_head:
 860            f.writerow([x[0] for x in cur.description])
 861
 862        if no_ms:
 863            for row in cur:
 864                r = [x for x in row]  # remove ms otherwise Excel 2003 gets a bit funny
 865                for rr in range(len(r)):
 866                    if type(r[rr]) == datetime:
 867                        r[rr] = r[rr].replace(microsecond=0)
 868
 869                f.writerow(r)
 870
 871        else:
 872            for row in cur:
 873                f.writerow(row)
 874
 875    elif kv and not pp:
 876        rows = cur.fetchall()
 877        t = Table(headings=['Field'] + ['Row ' + str(i) for i in range(len(rows))])
 878        for vals in zip((i[0] for i in cur.description), *rows):
 879            t.append(vals)
 880
 881        t.write()
 882
 883    elif not kv and pp:
 884        rows = cur.fetchall()
 885        t = Table(headings=[i[0] for i in cur.description])
 886        for row in rows:
 887            t.append(row)
 888
 889        t.write()
 890
 891    else:
 892        raise ValueError('bad args')
 893
 894
 895def status_table(table_name):
 896    """Show some information about the table and its partitions.
 897    Oracle only.
 898    """
 899    db_conn = db_connect()
 900
 901    # look up any synonym
 902    schema = db_conn.params['USER']
 903    row = db_conn.query('SELECT TABLE_OWNER FROM ALL_SYNONYMS WHERE OWNER=:owner '
 904                          'AND TABLE_NAME=:table_name',
 905                          owner=schema.upper(),
 906                          table_name=table_name.upper()).fetchone()
 907    if row is None:
 908        owner = schema
 909        print('No synonym so using {owner} as schema'.format(owner=owner))
 910
 911    else:
 912        owner = row[0]
 913        print('Following synonym to {owner}'.format(owner=owner))
 914
 915    t = Table(title='Fields of {owner}.{table}'.format(owner=owner, table=table_name),
 916              headings=('Name', 'Type', 'Length', 'Nullable'),
 917              )
 918    for row in db_conn.query('SELECT COLUMN_NAME, DATA_TYPE, DATA_LENGTH, NULLABLE '
 919                             'FROM ALL_TAB_COLUMNS WHERE OWNER=:owner '
 920                             'AND TABLE_NAME=:table_name',
 921                             owner=owner,
 922                             table_name=table_name):
 923        #, LAST_ANALYZED, AVG_COL_LEN
 924        t.append(row)
 925
 926    t.write()
 927
 928    return
 929
 930    # Table info
 931    # row = db_conn.query('SELECT t.table_name, t.owner, t.tablespace_name, '
 932    #                '  p.partitioning_type, p.subpartitioning_type, p.partition_count '
 933    #                'from all_tables t, all_part_tables p '
 934    #                'where t.table_name=p.table_name '
 935    #                'and t.table_name=:table_name',
 936    #                table_name=table_name).fetchone()
 937    # t = Table()  # title='Information for table {tab}'.format(tab=table_name))
 938
 939    # if row is None:
 940    #     print('No partition information found')
 941
 942    # else:
 943    #     name, owner, tablespace, part, subpart, count = row
 944    #     t.append(('Table name', name))
 945    #     t.append(('Owner', owner))
 946    #     t.append(('Partitioning type', part))
 947    #     t.append(('Subpartitioning type', subpart))
 948    #     t.append(('Partition count', count))
 949    #     t.append(('Tablespace', tablespace))
 950
 951    # t.write()
 952
 953    # # Partition and subparttion info
 954    # print('Partitions:')  # (partition, subpartition, value, tablespace, rows,
 955    # total size (bytes)')
 956    # t = Table(indent='    ')
 957    # # headings=('Partition', 'Subpartition', 'Value', 'Tablespace', 'Rows', 'Size'))
 958    # t.append(('Part', 'Subpart', 'Value', 'Tablespace', 'Rows', 'Size', 'Segsize'))
 959    # t.append([None] * 7)
 960    # for partition_name, high_value, tablespace, num_rows, _, avg_row_len in db_conn.query(
 961    #     'SELECT partition_name, high_value, tablespace_name, num_rows, blocks, avg_row_len '
 962    #     'FROM all_tab_partitions WHERE table_name=:table_name', table_name=table_name):
 963
 964    #     # t = Table(indent='    ', coda='')#, title='Partition')
 965    #     # t.append(('Name', partition_name))
 966    #     # t.append(('Rows', num_rows))
 967    #     # t.write()
 968    #     t.append((partition_name, None, high_value, tablespace, nvl(num_rows, 0),
 969    #               nvl(num_rows, 0) * nvl(avg_row_len, 0)))
 970    #     # print('    Subpartitions:')
 971    #     for subpartition_name, high_value, tablespace, num_rows, _, avg_row_len in db_conn.query(
 972    #         'SELECT subpartition_name, high_value, tablespace_name, num_rows, blocks,
 973    # avg_row_len '
 974    #         'FROM all_tab_subpartitions '
 975    #         'WHERE table_name=:table_name '
 976    #         'AND partition_name=:partname', table_name=table_name, partname=partition_name):
 977    #         # t.append((subpartition_name, num_rows))
 978
 979    #         row = db_conn.query('select blocks, bytes from dba_segments '
 980    #                        'where segment_type=:segtype '
 981    #                        'and segment_name=:segname '
 982    #                        'and partition_name=:partname',
 983    #                        segtype='TABLE SUBPARTITION',
 984    #                        segname=table_name,
 985    #                        partname=subpartition_name).fetchone()
 986    #         seg_size = '{blocks}/{bytes}'.format(blocks=row[0],
 987    #                                              bytes=row[1]) if row is not None else None
 988
 989    #         t.append((None,
 990    #                   subpartition_name,
 991    #                   high_value,
 992    #                   tablespace,
 993    #                   nvl(num_rows, 0),
 994    #                   nvl(num_rows, 0) * nvl(avg_row_len, 0),
 995    #                   seg_size))
 996
 997    #     t.append([None] * 7)
 998
 999    # t.write()
1000
1001    # row counts
1002    # TABLE_OWNER,TABLE_NAME,PARTITION_NAME,SUBPARTITION_NAME,HIGH_VALUE,HIGH_VALUE_LENGTH,
1003    # SUBPARTITION_POSITION,TABLESPACE_NAME,PCT_FREE,PCT_USED,INI_TRANS,MAX_TRANS,INITIAL_EXTENT,
1004    # NEXT_EXTENT,MIN_EXTENT,MAX_EXTENT,MAX_SIZE,PCT_INCREASE,FREELISTS,FREELIST_GROUPS,LOGGING,
1005    # COMPRESSION,COMPRESS_FOR,NUM_ROWS,BLOCKS,EMPTY_BLOCKS,AVG_SPACE,CHAIN_CNT,AVG_ROW_LEN,
1006    # SAMPLE_SIZE,LAST_ANALYZED,BUFFER_POOL,FLASH_CACHE,CELL_FLASH_CACHE,GLOBAL_STATS,USER_STATS,
1007    # INTERVAL,SEGMENT_CREATED
1008    # MSG_OPE,BATREC,P_MSG1,SP_MSG1_PRIM,'PRIM',6,1,MSG_OPE_MSG1_PRIM,0,,1,255,,,1,,,,,,YES,ENABLED,
1009    #BASIC,,,,,,,,,DEFAULT,DEFAULT,DEFAULT,NO,NO,NO,NO
1010    # "select table_name,partition_name,tablespace_name,num_rows,subpartition_count from all_tab_pa
1011    # rtitions where table_owner='MSG_OPE'
1012
1013    # and subpartitions
1014
1015
1016def status_database():
1017    """Show database status.
1018    We show basic information on all datafiles and all tablespaces.
1019    """
1020
1021    db_conn = db_connect()
1022
1023    t = Table(title='Datafiles', headings=('Name', 'Tablespace', 'Size'))
1024    for name, tablespace, size in db_conn.query('SELECT file_name, tablespace_name, bytes '
1025                                     'FROM dba_data_files'):
1026        t.append((name, tablespace, size))
1027
1028    t.write()
1029
1030    t = Table(title='Tablespaces', headings=('Name', 'Block size'))
1031    for name, size in db_conn.query('SELECT tablespace_name, block_size FROM dba_tablespaces'):
1032        t.append((name, size))
1033
1034    t.write()
1035
1036    # dba_extents ? where owner=SCHEMA
1037    # dba_data_files
1038
1039    # all
1040
1041    # t = Table(title='Datafiles')#headings=())
1042    # for
1043
1044
1045def stats_table(tables, sid, start_time, stop_time):
1046    """Show some stats about a set of tables - Oracle only."""
1047
1048    db_conn = db_connect()
1049
1050    t = Table(
1051        title='Growth Rate Stats for {sid} over time range {start} : {stop}'.format(
1052            sid=sid, start=start_time, stop=stop_time), indent='  ',
1053        headings=(
1054            'TableName', 'NumberRows', 'AveRow_Len', 'Growth (MB)', 'No Cols', '% Occupancy'),)
1055
1056    total_growth_rate = 0.0
1057
1058    for table_info in tables :
1059        if table_info.table_type is TableType.VIEW:
1060            logging.info('Skipping view {n}'.format(n=table_info.name))
1061            continue
1062
1063        if table_info.storage is TableStorage.KEYVALUE:
1064            logging.info('Skipping key-value store {n}'.format(n=table_info.name))
1065            continue
1066
1067        table_name = table_info.name
1068
1069        #logger.info('Reading into table {t}'.format(t=table_info.name))
1070
1071        schema = db_conn.query('SELECT table_owner FROM user_synonyms WHERE table_name=:tablename',
1072            tablename=table_name).fetchone()[0]
1073        # logging.debug('Using schema {schema} for table {table}'.format(schema=schema, table=table_name))
1074
1075        # filter by OWNER column in case the database has multiple schemas
1076        avg_row_len = db_conn.query(
1077            'SELECT AVG_ROW_LEN FROM ALL_TABLES WHERE OWNER=:owner AND TABLE_NAME=:table_name',
1078            owner=schema, table_name=table_name).fetchone()
1079
1080        if avg_row_len is None or avg_row_len[0] is None:
1081            raise ValueError('Table {table} not found'.format(table=table_name))
1082
1083        # for TS tables this needs SID fields added
1084        fields = list(table_info.fields.keys())
1085
1086        cols = 0
1087        nulls = 0
1088
1089        cur = ts.select(table=table_info,
1090                    fields=fields,
1091                    sid=sid,
1092                    sensing_start=start_time,
1093                    sensing_stop=stop_time,
1094                    calibrated=False)  # take raw data as raw
1095        for row in cur:
1096            cols += len(row)
1097            nulls += row.count(None)
1098
1099        no_rows = cur.rowcount
1100        if no_rows > 0:
1101            logger.info(
1102                'Table {table} rows {rows} avg len {av} columns {cols} nulls {nulls}'.format(
1103                    rows=no_rows, table=table_name, av=avg_row_len[0], cols=cols, nulls=nulls))
1104
1105            no_cols = cols / no_rows
1106
1107            if cols > 0:
1108                per_occ = ((cols - nulls) / cols)  * 100.00
1109
1110            else:
1111                per_occ = 0
1112
1113            per_nulls = 100.0 - per_occ
1114
1115            # calculate growth rate ~= avg_row_len * no_rows
1116            growth_rate = (avg_row_len[0] * no_rows) / (1024.0 * 1024.0)  # MB
1117            total_growth_rate += growth_rate
1118            t.append((table_name, no_rows, avg_row_len[0],
1119                      format(growth_rate, '.3f'),
1120                      no_cols,
1121                      format(per_occ, '.2f')))
1122
1123        else:
1124            logger.info('No data in Table {table} during allotted time range'.format(
1125                rows=no_rows, table=table_name))
1126            t.append((table_name, 0, avg_row_len[0], 0.0, '-', '-'))
1127
1128    # sort table on Growth Value
1129    t.sort_numerically(3, True)
1130    t.write()
1131
1132    # write Total ...
1133    t = Table(coda='Total data growth for time range = {g} MB\n'.format(
1134        g=format(total_growth_rate, '.3f')), indent='    ')
1135    t.write()
1136
1137
1138def table_arg(table_arg, inc_sys=False):
1139    """Convert list of table names to a list of TableInfos, expanding wildcards."""
1140
1141    table_infos = []
1142    for tablename in table_arg:
1143        table_infos += TableInfo.all(ts=True, name=(tablename.upper()), sys=inc_sys )
1144
1145    return table_infos
1146
1147
1148def describe_table(table_name):
1149    pass
1150
1151
1152def list_tabledefs(sys=False, target=sys.stdout):
1153    """List timeseries or sys tables."""
1154    t = Table(title='Timeseries databases',
1155              headings=('Name', 'Description'))
1156    for table_info in TableInfo.all(sys=sys):
1157        t.append((table_info.name, table_info.description))
1158
1159    t.write()
1160
1161
1162def table_db_tables(specs):
1163    """Show table and summary of basic size information for database tables matchines `specs`."""
1164    # should use a db.drivers. function to list actual tables
1165    # table_catalog | table_schema | table_name | table_type | self_referencing_column_name |
1166    # reference_generation | user_defined_type_catalog | user_defined_type_schema |
1167    # user_defined_type_name | is_insertable_into | is_typed | commit_action
1168    db_conn = db_connect()
1169    conv_specs = [s.lower().replace('%', '*') for s in specs]
1170    matches = set()
1171    for tablename, in db_conn.query('SELECT table_name FROM information_schema.tables'):
1172        for c in conv_specs:
1173            if fnmatch(tablename.lower(), c):
1174                # print('matched', tablename)
1175                matches.add(tablename)
1176
1177    output = Table(headings=('Name', 'Rows', 'Totalsize', 'Rowsize'))
1178    total_total_bytes = 0
1179    total_rowsize = 0
1180    for table in sorted(matches):
1181        rows, total_bytes = db_conn.query('SELECT count(*), pg_total_relation_size(\'{tbl}\') from {tbl}'.format(
1182            tbl=table)).fetchone()
1183        rowsize = total_bytes / rows if rows > 0 else 0
1184        total_total_bytes += total_bytes
1185        total_rowsize += rowsize
1186        output.append((table, rows, total_bytes, rowsize))
1187
1188    output.write()
1189
1190    summary = Table()
1191    summary.append(('Table count', len(matches)))
1192    summary.append(('Size of all tables', total_total_bytes))
1193    if len(matches) > 0:
1194        summary.append(('Average table size', total_total_bytes / len(matches)))
1195        summary.append(('Average rowsize', total_rowsize / len(matches)))
1196
1197    summary.write()
1198
1199    # stats on cell compression used
1200    # select pg_column_compression('stats'),count(*) from tm_stats1k_kc_999 group by pg_column_compression('stats');
1201
1202
1203def main():
1204    """Command line entry point."""
1205
1206    # clean up logging
1207    from chart.common.log import init_log
1208    init_log(stderr=True)
1209    # init_log(messages_only=True)
1210    # logger.setLevel(logging.CRITICAL)
1211
1212    parser = ArgumentParser(__doc__)
1213
1214    # required input for several functions...
1215
1216    parser.add_argument('--sid', '-s',
1217                        type=ArgumentParser.sid)
1218    parser.add_argument('--start',
1219                        type=ArgumentParser.start_time,
1220                        metavar='TIME',
1221                        help='Begin search at TIME.')
1222    parser.add_argument('--stop',
1223                        type=ArgumentParser.stop_time,
1224                        metavar='TIME',
1225                        help='End search at TIME.')
1226    parser.add_argument('--table', '-t',
1227                        nargs='+',
1228                        #type=ArgumentParser.table,
1229                        help='Supply table_name(s) wildcards allowed')
1230
1231    # Connection...
1232
1233    conn_group = parser.add_argument_group(
1234        'Connection',
1235        'Options used to select or view available db connections')
1236    conn_group.add_argument('--db',
1237                        metavar='CONN',
1238                        help='Use database connection CONNECTION')
1239    conn_group.add_argument('--list', '-l',
1240                        action='store_true',
1241                        help='List available database connections')
1242    conn_group.add_argument('--show', '--info', '-i',
1243                            metavar='CONN',
1244                            nargs='*',
1245                            help='Show connection information for database CONN')
1246    conn_group.add_argument('--echo', '-e',
1247                            action='store_true',
1248                            help='Show all shell commands before execution')
1249
1250    # REPL ...
1251
1252    exe_group = parser.add_argument_group(
1253        'REPL',
1254        'Options for starting up a database REPL environment')
1255    exe_group.add_argument('--file', '-f',
1256                        metavar='FILE',
1257                        help='Execute FILE before starting interpreter')
1258    exe_group.add_argument('--repl',
1259                           action='store_true',
1260                        help=('Start a command line terminal connection to the database '
1261                              '(implied if no SQL statement given)'))
1262    exe_group.add_argument('--ddl',
1263                           help='Describe table')
1264    exe_group.add_argument('--no-head',
1265                           action='store_true',
1266                           help='Do not show header row in output')
1267
1268    # SQL ...
1269
1270    sql_group = parser.add_argument_group(
1271        'SQL',
1272        'Options for executing SQL statements')
1273    sql_group.add_argument('--no-ms',
1274                           action='store_true',
1275                           help='Remove milliseconds from output (may help import to Excel)')
1276    sql_group.add_argument('--kv',
1277                           action='store_true',
1278                           help='Alternate output format')
1279    sql_group.add_argument('--pp',
1280                           action='store_true',
1281                           help='Print output as a table')
1282    sql_group.add_argument('--no-prompt',
1283                           action='store_true',
1284                           help='Auto commit any changes without prompting')
1285
1286    # Dump/restore...
1287
1288    dump_group = parser.add_argument_group(
1289        'Dump/restore',
1290        'Options for controlling data dump and restore to CSV functionality')
1291    # dump_group.add_argument('--dump',
1292                            # type=ArgumentParser.output_filename,
1293                            # action='store_true',
1294                            # help='data dump / restore to CSV')
1295    dump_group.add_argument('--restore',
1296                            type=ArgumentParser.input_filename,
1297                            help='Reload data from file')
1298
1299    # Status
1300
1301    status_group = parser.add_argument_group(
1302        'Database status',
1303        'Options for viewing current database status information')
1304    status_group.add_argument('--table-status', '--desc',
1305                                help='Show information about a table')
1306    status_group.add_argument('--db-status',
1307                              action='store_true',
1308                              help='Show information about the database')
1309    status_group.add_argument('--db-tables',
1310                              nargs='+',
1311                              help='Show table and summary of size and basic info about database tables')
1312    # partitions, datafiles, table sizes
1313
1314    # Statistics
1315
1316    # stats_group = parser.add_argument_group(
1317        # 'Database Statistics',
1318        # 'Options for viewing current database User Table(s) Size and Growth statistics information')
1319    # stats_group.add_argument('--stats',
1320                             # action='store_true',
1321                             # help='Show stats about all tables, unless --table defined')
1322
1323    # Table definitions
1324    ddl_group = parser.add_argument_group(
1325        'Table definitions')
1326    ddl_group.add_argument('--list-tabledefs',
1327                           action='store_true',
1328                           help='List all table definitions')
1329    ddl_group.add_argument('--sys',
1330                           action='store_true',
1331                           help='Show system tables instead of timeseries')
1332
1333    parser.add_argument('SQL', nargs='*')
1334    args = parser.parse_args()
1335
1336    if args.list_tabledefs:
1337        list_tabledefs(sys=args.sys)
1338        parser.exit()
1339
1340    # switch databases (must be first option handled)
1341    if args.db:
1342        settings.set_db_name(args.db)
1343
1344    # after allowing user specified db
1345    # db_conn = db_connect()
1346
1347    if args.db_tables:
1348        table_db_tables(args.db_tables)
1349        parser.exit()
1350
1351    if args.table:
1352        # convert table input to list of TableInfos
1353        tables = []
1354        for tablename in args.table:
1355            tables += TableInfo.all(ts=True, name=tablename)
1356
1357    # connection options
1358    if args.list:
1359        list_databases()
1360        parser.exit()
1361
1362    if args.ddl:
1363        describe_table(args.desc)
1364        parser.exit()
1365
1366    if args.show is not None:
1367        # print('settings.DATABASES ' + str(settings.DATABASES.keys()))
1368        if len(args.show) == 0:
1369            show_database('default')
1370            # show_database(settings.DATABASES['default']['DB_NAME'])
1371
1372        elif len(args.show) == 1:
1373            show_database(args.show[0])
1374
1375        else:
1376            parser.error('Can only specify one --show option')
1377
1378        parser.exit()
1379
1380    if args.table_status:
1381        status_table(args.table_status)
1382        parser.exit()
1383
1384    if args.db_status:
1385        status_database()
1386        parser.exit()
1387
1388    if args.SQL:
1389        sql(' '.join(args.SQL), kv=args.kv, no_prompt=args.no_prompt, no_head=args.no_head)
1390        parser.exit()
1391
1392    # if args.dump:
1393        # if args.sid is None:
1394            # parser.error('No --sid specified')
1395
1396        # if args.table is None:
1397            # parser.error('No --table specified')
1398
1399        # if len(args.dump) > 1:
1400            # parser.error('Only give one output parameter to --dump option')
1401
1402        # elif len(args.dump) == 1:
1403        # dump(args.dump, args.table[0], args.sid, args.start, args.stop)
1404
1405        # else:
1406        # dump(sys.stdout, args.table[0], args.sid, args.start, args.stop)
1407
1408        # parser.exit()
1409
1410    if args.restore:
1411        if args.table is not None and len(args.table) != 1:
1412            parser.error('If restoring to explicit table, specify only one table')
1413
1414        restore(args.table[0] if args.table is not None else None, args.restore)
1415        parser.exit()
1416
1417    # if args.stats:
1418    #     if args.table is None:
1419    #         tables = list(TableInfo.all())
1420
1421    #     stats_table(tables, args.sid, args.start, args.stop)
1422    #     parser.exit()
1423
1424    # repl options
1425    # print(args.sql)
1426    # if len(args.sql) > 0:
1427        # statement = ' '.join(args.sql)
1428        # print('Executing statement {s}'.format(s=statement))
1429        # sql(statement=statement,
1430            # no_ms=args.no_ms,
1431            # kv=args.kv,
1432            # pp=args.pp)
1433
1434    db_conn = db_connect()
1435
1436    if db_conn.engine is DatabaseEngine.ORACLE:
1437        launch_sqlplus(args.file, args.echo)
1438
1439    elif db_conn.engine is DatabaseEngine.POSTGRESQL:
1440        launch_psql(args.file)
1441
1442    elif db_conn.engine is DatabaseEngine.MYSQL:
1443        launch_mysql(args.file)
1444
1445    elif db_conn.engine is DatabaseEngine.SQLITE:
1446        launch_sqlite(args.file)
1447
1448    else:
1449        parser.error('Unsupported database type: {type}'.format(type=db_conn.engine.name))
1450
1451
1452if __name__ == '__main__':
1453    main()