#!/usr/bin/env python3 # -*- encoding: UTF8 -*- # test_SDP.py # Tests SDP (Server Deployment Package) on Linux VMs # Intended to be run from with a Vagrant VM. # See documentation and run_tests.sh in /sdp/main/test/README.md from __future__ import print_function import os, sys, re, socket, time, shutil, logging, time, pwd, stat import P4 import unittest import fileinput, subprocess import argparse import glob LOGGER_NAME = 'SDPTest' mkdirs_config = '/hxdepots/sdp/Server/Unix/setup/mkdirs.cfg' mkdirs_file = '/hxdepots/sdp/Server/Unix/setup/mkdirs.sh' MAILTO = 'mailto-admin@example.com' MAILFROM = 'mailfrom-admin@example.com' logger = logging.getLogger(LOGGER_NAME) single_instance = None class NotSudo(Exception): pass def get_host_ipaddress(): try: address = socket.gethostbyname(socket.gethostname()) # On my system, this always gives me 127.0.0.1. Hence... except: address = '' if not address or address.startswith('127.'): # ...the hard way. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('4.2.2.1', 0)) address = s.getsockname()[0] s.detach() logger.debug('IPAddress: %s' % address) return address def init_logging(): global logger logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s') fh = logging.FileHandler('/tmp/%s.log' % LOGGER_NAME, mode='w') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) def do_unlink(filename): "Unlink file if it exists" if os.path.lexists(filename): os.unlink(filename) def substitute_unix_vars(line, instance, port): line = line.rstrip() if line.startswith('export MAILTO='): print("export MAILTO=%s" % MAILTO) elif line.startswith('export SSL_PREFIX=ssl:'): print("export SSL_PREFIX=") elif line.startswith('export MAILFROM='): print("export MAILFROM=%s" % MAILFROM) elif line.startswith('export P4PORTNUM='): print("export P4PORTNUM=%s" % port) elif line.startswith('export KEEPLOGS='): print("export KEEPLOGS=3") elif line.startswith('export KEEPCKPS='): print("export KEEPCKPS=3") elif line.startswith('export KEEPJNLS='): print("export KEEPJNLS=3") else: print(line) def configure_p4_vars(instance, port): "Configure p4_vars" for line in fileinput.input('/p4/common/bin/p4_vars', inplace=True): substitute_unix_vars(line, instance, port) def configure_instance_vars(instance, port): "Configure instance vars" for line in fileinput.input('/p4/common/config/p4_%s.vars' % instance, inplace=True): substitute_unix_vars(line, instance, port) class SDPTest_base(unittest.TestCase): "Generic test class for others to inherit" def setup_everything(self): if 'perforce' != pwd.getpwuid(os.getuid())[0]: raise Exception("This test harness should be run as user 'perforce'") try: result = subprocess.check_call("sudo ls > /dev/null", shell=True, timeout=20) except Exception as e: raise NotSudo("This test harness must be run as user perforce with sudo privileges or it will not work") def setUp(self): self.setup_everything() def run_test(self): pass def run_cmd(self, cmd, get_output=True, timeout=35, stop_on_error=True): "Run cmd logging input and output" output = "" try: logger.debug("Running: %s" % cmd) if get_output: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True) output, err = p.communicate(timeout=timeout) rc = p.returncode logger.debug("Output:\n%s" % output) else: result = subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout) logger.debug('Result: %d' % result) except subprocess.CalledProcessError as e: logger.debug("Output: %s" % e.output) if stop_on_error: msg = 'Failed sudo_cmd: %s' % (e.returncode, str(e)) logger.debug(msg) self.fail(msg) except Exception as e: logger.debug("Output: %s" % output) if stop_on_error: msg = 'Failed sudo_cmd: %s' % str(e) logger.debug(msg) self.fail(msg) return output def sudo_cmd(self, cmd, get_output=True, stop_on_error=True): "Run cmd with sudo" output = self.run_cmd("sudo %s" % cmd, get_output=get_output, stop_on_error=stop_on_error) return output def configure_mkdirs(self, instance): "Configure mkdirs.sh with a couple of key variables" ipaddr = get_host_ipaddress() for line in fileinput.input(mkdirs_config, inplace=True): line = line.rstrip() if line.startswith('P4DNSNAME'): print("P4DNSNAME=%s" % ipaddr) elif line.startswith('P4ADMINPASS'): print("P4ADMINPASS=Password1") elif line.startswith('MASTERINSTANCE'): print("MASTERINSTANCE=%s" % instance) elif line.startswith('HOST_IP'): print("HOST_IP=%s" % ipaddr) else: print(line) def run_mkdirs(self, instance): "Runs the mkdirs script" output = self.sudo_cmd("%s %s" % (mkdirs_file, instance)) valid_lines = ["Verified: Running as root.", "Warning: No p4p in /hxdepots/sdp/Server/Unix/p4/common/bin", "Warning: No p4broker in /hxdepots/sdp/Server/Unix/p4/common/bin", "Verified: Preflight checks passed.", "Setting permissions on depot files - this may take some time ...", "Setting ownership on depot files - this may take some time ...", "It is recommended that the perforce's umask be changed to 0026 to block world access to Perforce files.", "Add umask 0026 to perforce's .bash_profile to make this change."] for line in output.split('\n'): line = line.strip() if line and line not in valid_lines: self.fail('Unexpected line in mkdirs output:\n%s' % line) #--- Test Cases class configure_master(SDPTest_base): def check_dirs(self, rootdir, dirlist): "Checks specified directories are present" found_dirs = self.run_cmd("find %s -type d" % rootdir, stop_on_error=False).split() for d in [x.strip() for x in dirlist.split()]: self.assertIn(d, found_dirs) def p4service(self, cmd, instance, stop_on_error=True): "Start or stop service" self.run_cmd("/p4/%s/bin/p4d_%s_init %s" % (instance, instance, cmd), get_output=False, stop_on_error=stop_on_error) def remove_test_dirs(self, instances): "Remove all appropriate directories created" dirs_to_remove = "/hxdepots/sdp /hxdepots/p4 /hxmetadata1/p4 /hxmetadata2/p4 /hxlogs/p4".split() for d in dirs_to_remove: if os.path.exists(d): self.sudo_cmd("rm -rf %s" % d) for instance in instances: for f in ["/p4/%s" % instance, "/p4/common"]: if os.path.lexists(f): self.sudo_cmd("unlink %s" % f) def liveCheckpointTest(self, instance): "Test live checkpoint script" self.assertFalse(os.path.exists('/p4/%s/offline_db/db.domain' % instance)) self.run_cmd('/p4/common/bin/live_checkpoint.sh %s' % instance) # Quick check on c=log file contents log_contents = self.readLog('checkpoint.log', instance) self.assertRegex(log_contents, "Checkpointing to /p4/%s/checkpoints/p4_%s.ckp" % (instance, instance)) self.assertRegex(log_contents, "journal") # Make sure offline db is present self.assertTrue(os.path.exists('/p4/%s/offline_db/db.domain' % instance)) def recreateOfflineDBTest(self, instance): "Test recreate_offline_db script" self.assertTrue(os.path.exists('/p4/%s/offline_db/db.domain' % instance)) self.sudo_cmd("rm -rf /p4/%s/offline_db/db.*" % instance) self.run_cmd('/p4/common/bin/recreate_offline_db.sh %s' % instance) # Quick check on log file contents logPattern = '/p4/%s/logs/recreate_offline_db.log.*' % instance logfiles = glob.glob(logPattern) self.assertEqual(1, len(logfiles)) log_contents = self.readLog(os.path.basename(logfiles[0]), instance) self.assertRegex(log_contents, "Start p4_%s recreate of offline db" % (instance)) self.assertRegex(log_contents, "Recovering from /p4/%s/checkpoints/p4_%s.ckp" % ( instance, instance)) # Make sure offline db is present self.assertTrue(os.path.exists('/p4/%s/offline_db/db.domain' % instance)) def failedDailyBackupTest(self, instance): "Test daily backup script - expected to fail due to lack of offline db" logger.debug("failedDailyBackupTest") jnl_counter = self.p4run('counter', 'journal')[0]['value'] self.run_cmd('/p4/common/bin/daily_checkpoint.sh %s' % instance, stop_on_error=False) # Quick check on c=log file contents log_contents = self.readLog('checkpoint.log', instance) self.assertRegex(log_contents, "Offline database not in a usable state") new_jnl_counter = self.p4run('counter', 'journal')[0]['value'] self.assertEqual(int(new_jnl_counter), int(jnl_counter)) def dailyBackupTest(self, instance): "Test daily backup script" jnl_counter = self.p4run('counter', 'journal')[0]['value'] logger.debug("dailyBackupTest") self.run_cmd('/p4/common/bin/daily_checkpoint.sh %s' % instance) # Quick check on c=log file contents log_contents = self.readLog('checkpoint.log', instance) self.assertRegex(log_contents, "Dumping to /p4/%s/checkpoints/p4_%s.ckp" % (instance, instance)) self.assertRegex(log_contents, "journal") new_jnl_counter = self.p4run('counter', 'journal')[0]['value'] self.assertEqual(int(new_jnl_counter), int(jnl_counter) + 1) def readLog(self, log_name, instance): "Read the appropriate log file contents" with open('/p4/%s/logs/%s' % (instance, log_name), 'r') as fh: log_contents = fh.read() return log_contents def verifyTest(self, instance): "Test verify script" logger.debug("verifyTest") verify_cmd = '/p4/common/bin/p4verify.sh %s' % instance self.run_cmd(verify_cmd) log_contents = self.readLog('p4verify.log', instance) for depot in ["depot", "specs"]: verify_ok = re.compile("verify -qz //%s/...\nexit: 0" % depot, re.MULTILINE) self.assertRegex(log_contents, verify_ok) # Make sure we check for shelves in the 'depot' depot. verify_ok = re.compile("verify -qS //depot/...\nexit: 0", re.MULTILINE) # Check that we use '-U' for unload depot, and do not do -qS (as there are no shelves # in an unload depot). Also, we expect errors about their not being any files in the # unload depot. verify_ok = re.compile("verify -U -q //unload/...\nerror: //unload/... - no such unloaded", re.MULTILINE) self.assertRegex(log_contents, verify_ok) # Streams depot doesn't have any files so gives an error - we just search for it verify_ok = re.compile("verify -qz //streams/...\nexit: 0", re.MULTILINE) self.assertRegex(log_contents, verify_ok) # Now create verify errors and make sure we see them orig_depot_name = '/p4/%s/depots/depot' % instance new_depot_name = orig_depot_name + '.new' os.rename(orig_depot_name, new_depot_name) self.run_cmd(verify_cmd, stop_on_error=False) log_contents = self.readLog('p4verify.log', instance) for depot in ["depot"]: verify_ok = re.compile("verify -qz //%s/...\nerror: [^\n]*MISSING!\nexit: 1" % depot, re.MULTILINE) self.assertRegex(log_contents, verify_ok) # Rename things back again and all should be well! os.rename(new_depot_name, orig_depot_name) time.sleep(1) # Make sure verify log is OK. self.run_cmd(verify_cmd, stop_on_error=True) log_contents = self.readLog('p4verify.log', instance) for depot in ["depot", "specs"]: verify_ok = re.compile("verify -qz //%s/...\nexit: 0" % depot, re.MULTILINE) self.assertRegex(log_contents, verify_ok) def failedrecreateDBFromOfflineTest(self, instance): "Test recreate DB from offline script - expected to fail due to lack of offline db" logger.debug("failedrecreateDBFromOfflineTest") jnl_counter = self.p4run('counter', 'journal')[0]['value'] self.run_cmd('/p4/common/bin/recreate_db_checkpoint.sh %s' % instance, stop_on_error=False) # Quick check on c=log file contents log_contents = self.readLog('checkpoint.log', instance) self.assertRegex(log_contents, "Offline database not in a usable state") self.p4.disconnect() # Need to reconnect as script has restarted p4d self.p4.connect() new_jnl_counter = self.p4run('counter', 'journal')[0]['value'] self.assertEqual(int(new_jnl_counter), int(jnl_counter)) def recreateDBFromOfflineTest(self, instance): "Test recreate db from offline backup script" logger.debug("recreateDBFromOfflineTest") jnl_counter = self.p4run('counter', 'journal')[0]['value'] self.run_cmd('/p4/common/bin/recreate_db_checkpoint.sh %s' % instance) # Quick check on c=log file contents log_contents = self.readLog('checkpoint.log', instance) self.assertRegex(log_contents, "Dumping to /p4/%s/checkpoints/p4_%s.ckp" % (instance, instance)) self.assertRegex(log_contents, "journal") self.p4.disconnect() # Need to reconnect as script has restarted p4d self.p4.connect() new_jnl_counter = self.p4run('counter', 'journal')[0]['value'] # p4d_base now rotates the log on startup self.assertEqual(int(new_jnl_counter), int(jnl_counter) + 2) def configureServer(self, instance): "Set various configurables for master" configurables = """ security=3 run.users.authorize=1 db.peeking=2 dm.user.noautocreate=2 dm.user.resetpassword=1 filesys.P4ROOT.min=1G filesys.depot.min=1G filesys.P4JOURNAL.min=1G p4 configure unset monitor server=3 net.tcpsize=256k lbr.bufsize=256k server.commandlimits=2 serverlog.retain.3=7 serverlog.retain.7=7 serverlog.retain.8=7""".split("\n") instance_configurables = """ journalPrefix=/p4/${SDP_INSTANCE}/checkpoints/p4_${SDP_INSTANCE} server.depot.root=/p4/${SDP_INSTANCE}/depots serverlog.file.3=/p4/${SDP_INSTANCE}/logs/errors.csv serverlog.file.7=/p4/${SDP_INSTANCE}/logs/events.csv serverlog.file.8=/p4/${SDP_INSTANCE}/logs/integrity.csv""".split("\n") for c in [x.strip() for x in configurables]: self.p4run("configure set %s" % c) for ic in instance_configurables: ic = ic.strip() ic.replace("${SDP_INSTANCE}", instance) self.p4run("configure set %s" % ic) def configureReplication(self): "Configures stuff required for replication" def p4run(self, *args): "Run the command logging" logger.debug('p4 cmd: %s' % ",".join([str(x) for x in args])) result = self.p4.run(args) logger.debug('result: %s' % str(result)) return result def resetTest(self, instances): for instance in instances: self.sudo_cmd("ps -ef | grep p4d_%s | awk '{print $2}' | xargs kill > /dev/null 2>&1" % instance, stop_on_error=False) self.remove_test_dirs(instances) self.sudo_cmd("cp -R /sdp /hxdepots/sdp") self.sudo_cmd("sudo chown -R perforce:perforce /hxdepots/sdp") for f in ["/p4/p4.crontab", "/p4/p4.crontab.replica", "/p4/p4.crontab.edge"]: if os.path.exists(f): os.remove(f) for instance in instances: filename = "/p4/%s" %instance do_unlink(filename) do_unlink(filename.lower()) def configureInstance(self, instance, port): "Configure the master instance" # Stop the Perforce service if currently running from a previous run in case it is accessing dirs self.resetTest(instance) self.configure_mkdirs(instance) self.run_mkdirs(instance) depotdata_dir_list = """ /hxdepots/p4 /hxdepots/p4/SDP_INSTANCE /hxdepots/p4/SDP_INSTANCE/depots /hxdepots/p4/SDP_INSTANCE/bin /hxdepots/p4/SDP_INSTANCE/checkpoints /hxdepots/p4/common /hxdepots/p4/common/bin /hxdepots/p4/common/bin/triggers /hxdepots/p4/common/lib""".replace("SDP_INSTANCE", instance) logdata_dir_list = """ /hxlogs /hxlogs/p4 /hxlogs/p4/SDP_INSTANCE /hxlogs/p4/SDP_INSTANCE/tmp /hxlogs/p4/SDP_INSTANCE/logs""".replace("SDP_INSTANCE", instance) metadata1_dir_list = """ /hxmetadata1 /hxmetadata1/p4 /hxmetadata1/p4/SDP_INSTANCE /hxmetadata1/p4/SDP_INSTANCE/db1 /hxmetadata1/p4/SDP_INSTANCE/db1/save""".replace("SDP_INSTANCE", instance) metadata2_dir_list = """ /hxmetadata2 /hxmetadata2/p4 /hxmetadata2/p4/SDP_INSTANCE /hxmetadata2/p4/SDP_INSTANCE/db2 /hxmetadata2/p4/SDP_INSTANCE/db2/save""".replace("SDP_INSTANCE", instance) self.check_dirs('/hxdepots', depotdata_dir_list) self.check_dirs('/hxlogs', logdata_dir_list) self.check_dirs('/hxmetadata1', metadata1_dir_list) self.check_dirs('/hxmetadata2', metadata2_dir_list) configure_instance_vars(instance, port) configure_p4_vars(instance, port) def assertLogCount(self, expected, logPattern): logger.debug("Looking for logs: %s" % logPattern) logs = glob.glob(logPattern) self.assertEqual(3, len(logs)) def configure_master_p4d_instance(self, p4, instance): # Create our user and set password logger.debug('Creating user and setting password') # Create our user and set password user = p4.fetch_user('perforce') p4.save_user(user) p4.run_password('Password1', 'Password1') p4.password = 'Password1' p4.run_login() # Make him superuser prot = p4.fetch_protect() p4.save_protect(prot) # Things to setup # - create spec depot # - create a workspace and add at least one file # - configure the various tunables # - create server definitions - master and replica # - create service user for replica p4.run('configure', 'set', 'server.depot.root=/p4/%s/depots' % instance) p4.run('admin', 'restart') p4.disconnect() # New depot won't show up unless we do this time.sleep(1) p4.connect() if instance == 'Master': if not os.path.lexists("/p4/%s" % instance.lower()): self.run_cmd("ln -s /p4/%s /p4/%s" % (instance, instance.lower())) depot = p4.fetch_depot('specs') self.assertEqual(depot['Map'], 'specs/...') depot['Type'] = 'spec' p4.save_depot(depot) depot = p4.fetch_depot('unload') self.assertEqual(depot['Map'], 'unload/...') depot['Type'] = 'unload' p4.save_depot(depot) depot = p4.fetch_depot('archive') self.assertEqual(depot['Map'], 'archive/...') depot['Type'] = 'archive' p4.save_depot(depot) depot = p4.fetch_depot('streams') self.assertEqual(depot['Map'], 'streams/...') depot['Type'] = 'stream' p4.save_depot(depot) p4.disconnect() # New depot won't show up unless we do this p4.connect() depots = p4.run_depots() self.assertEqual(5, len(depots)) ws_name = 'test_ws' ws = p4.fetch_client(ws_name) ws['Root'] = '/tmp/test_ws' ws['View'] = ['//depot/main/... //%s/...' % ws_name] p4.save_client(ws) p4.client = ws_name if not os.path.exists(ws['Root']): os.mkdir(ws['Root']) fname = '/tmp/%s/file1' % ws_name if os.path.exists(fname): os.chmod(fname, stat.S_IWRITE) os.unlink(fname) with open(fname, 'w') as fh: fh.write('test data\n') p4.run_add(fname) chg = p4.fetch_change() chg['Description'] = 'Initial file' p4.save_submit(chg) changes = p4.run_changes() self.assertEqual(1, len(changes)) def instanceTest(self, instance, port): # So now we want to start up the Perforce service self.p4service("start", instance) p4 = P4.P4() self.p4 = p4 p4.port = 'localhost:%s' % port p4.user = 'perforce' p4.connect() # Create our user and set password user = p4.fetch_user('perforce') p4.save_user(user) p4.run_password('', 'Password1') p4.password = 'Password1' p4.run_login() # Make him superuser prot = p4.fetch_protect() p4.save_protect(prot) # Things to setup # - create spec depot # - create a workspace and add at least one file # - configure the various tunables # - create server definitions - master and replica # - create service user for replica # - run daily_checkpoint - check for error # - run live_checkpoint - make sure offline_db seeded # - run daily checkpoint more than once - create change lists in between times # - run recreate_db - check result self.configure_master_p4d_instance(p4, instance) ckpLogPattern = '/p4/%s/logs/checkpoint.log*' % instance logPattern = '/p4/%s/logs/log*' % instance # Following 2 tests should fail due to lack of offline_db self.failedDailyBackupTest(instance) time.sleep(1) self.failedrecreateDBFromOfflineTest(instance) time.sleep(1) self.liveCheckpointTest(instance) time.sleep(1) # Now the offline_db should exist # Run enough times to ensure logs get removed - (KEEPCKPS + 1) self.dailyBackupTest(instance) self.assertLogCount(3, ckpLogPattern) time.sleep(1) self.dailyBackupTest(instance) self.assertLogCount(3, ckpLogPattern) time.sleep(1) self.dailyBackupTest(instance) self.assertLogCount(3, ckpLogPattern) time.sleep(1) # Manually rotate journals again and ensure daily backup handles that self.p4run('admin', 'journal', '/p4/%s/checkpoints/p4_%s' % (instance, instance)) self.dailyBackupTest(instance) self.assertLogCount(3, ckpLogPattern) time.sleep(1) self.verifyTest(instance) # Tests: # - totalusers.py # - the various _init scripts # print('\n\nAbout to run recreate db from offline which sleeps for 30 seconds, so be patient...!') self.recreateDBFromOfflineTest(instance) self.assertLogCount(3, ckpLogPattern) self.assertLogCount(3, logPattern) time.sleep(1) self.dailyBackupTest(instance) self.assertLogCount(3, ckpLogPattern) self.assertLogCount(3, logPattern) # Delete offline_db and check we can recreate self.recreateOfflineDBTest(instance) self.assertLogCount(3, ckpLogPattern) self.assertLogCount(3, logPattern) time.sleep(1) self.dailyBackupTest(instance) # Note Daily doesn't increase the journal number so there are 2 with latest self.assertLogCount(3, ckpLogPattern) self.assertLogCount(3, logPattern) # print(p4.run_admin('stop')) def runTest(self): "Configure the master instance" all_instances = {"1": "1667", "Master": "2667"} if single_instance: if single_instance in all_instances: instances = {single_instance: all_instances[single_instance]} else: instances = all_instances for instance, port in instances.items(): self.resetTest(all_instances.keys()) self.configureInstance(instance, port) self.instanceTest(instance, port) if __name__ == "__main__": init_logging() parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--instance', default=None) options, args = parser.parse_known_args() testrunner = None if (options.instance): single_instance = options.instance unittest.main(testRunner=testrunner, argv=sys.argv[:1] + args)
# | Change | User | Description | Committed | |
---|---|---|---|---|---|
#1 | 24292 | gmc | "Forking branch Dev of perforce-software-sdp to gmc-sdp." | ||
//guest/perforce_software/sdp/dev/Server/test/test_SDP.py | |||||
#42 | 23792 | C. Thomas Tyler |
Removed test assertion for p4verify.s expecting to see '//archive' depot referenced. The verification logic avoids depots of type archive entirely. |
||
#41 | 23763 | C. Thomas Tyler | Updated test assertions. | ||
#40 | 23305 | C. Thomas Tyler |
Ajusted test for running mkdirs.sh to acccept more verbose mkdirs.sh output. |
||
#39 | 22387 | Robert Cowham |
Fix journal rotation off-by-one error Also make sure that -jj rotation specifies prefix. |
||
#38 | 22342 | Robert Cowham | Fixed hang when spawning subprocess - introduced by previous attempt to get output | ||
#37 | 22249 | Robert Cowham | Improve error messages when tests fail for emails etc. | ||
#36 | 22151 | Robert Cowham | Fix failing tests with new mkdirs.cfg | ||
#35 | 21308 | C. Thomas Tyler | Attempted to fix failing tests. | ||
#34 | 21267 | Robert Cowham |
Fix failing tests. Changed default filesystem names to: DB1=hxmetadata1 DB2=hxmetadata2 DD=hxdepots LG=hxlogs hx=Helix. Updated docs to reflect this |
||
#33 | 20946 | Robert Cowham | Fix tests | ||
#32 | 20878 | Robert Cowham | Refactor - on the way to possible cross platform script. | ||
#31 | 20435 | C. Thomas Tyler |
Tweaked the test so it now recognizes as valid some new output from mkdirs.sh. |
||
#30 | 19851 | Robert Cowham |
Check for usable offline_db before creating checkpoint work file. This avoids an error right at the start locking out the utility which will fix said error! |
||
#29 | 19527 | Russell C. Jackson (Rusty) | Added KEEPJNLS substitution. | ||
#28 | 19410 | Russell C. Jackson (Rusty) |
Made out on setting permissions consistent in mkdirs. Added the new output as valid in test_SDP. |
||
#27 | 19114 | Russell C. Jackson (Rusty) | Updated names for checkpoint scripts. | ||
#26 | 19111 | Russell C. Jackson (Rusty) | Removed instance in journal check. | ||
#25 | 19109 | Russell C. Jackson (Rusty) | just check for text: journal | ||
#24 | 19108 | Russell C. Jackson (Rusty) | Fix live_checkpoint test output. | ||
#23 | 19107 | Russell C. Jackson (Rusty) | Put fix in correct test for new journal rotation. | ||
#22 | 19106 | Russell C. Jackson (Rusty) | Change to handle different output on journal rotation with new command. | ||
#21 | 18936 | C. Thomas Tyler |
De-emphasizing changeType=restricted, including deprecating associated triggers. |
||
#20 | 18623 | Russell C. Jackson (Rusty) | Account for new crontab file in cleanup. | ||
#19 | 18600 | Russell C. Jackson (Rusty) | Change number of expected logs to 3. | ||
#18 | 18595 | Russell C. Jackson (Rusty) |
Fixed a log rotation bug that has been around for a long time. If you rotated the journal more times than KEEPCKPS and KEEPLOGS, the old method would remove all of your logs and checkpoints because it didn't actually look at how many were on disk. Found the bug while reviewing the test harness with Robert. Adjusted the test harness to account for the change. (Stole from Robert's shelf.) |
||
#17 | 18590 | Robert Cowham |
Fix failing tests. Change log filename format to use - instead of : as seperator for date/time component |
||
#16 | 18589 | Russell C. Jackson (Rusty) | Changed number of expected logs to 7 since I removed the lines that block the delete of 0-7 from backup_functions.sh | ||
#15 | 18539 | Robert Cowham | Fix timing issue and test assertions for log cleaning. | ||
#14 | 18537 | Robert Cowham |
Tests for recreate_offline_db.sh Refactor log file assertions. |
||
#13 | 18536 | Robert Cowham | Check that log files are getting deleted as appropriate. | ||
#12 | 18535 | Robert Cowham | Add option to specify single instance to run - useful for testing in some cases. | ||
#11 | 16784 | C. Thomas Tyler |
Routine Merge Down to dev from main using: p4 -s merge -n -b perforce_software-sdp-dev |
||
#10 | 16563 | C. Thomas Tyler |
Routine Merge Down to dev from main using: p4 merge -b perforce_software-sdp-dev p4 resolve -as |
||
#9 | 16373 | C. Thomas Tyler |
Routine Merge Down to dev from main using: p4 merge -b perforce_software-sdp-dev |
||
#8 | 16335 | C. Thomas Tyler |
Routine Merge Down to dev from main using: p4 merge -b perforce_software-sdp-dev |
||
#7 | 14136 | C. Thomas Tyler |
Routine merge down to dev from main for SDP using perforce_software-sdp-dev. |
||
#6 | 13910 | C. Thomas Tyler | Updated test swuite. | ||
#5 | 12245 | C. Thomas Tyler | Routine merge down from main. | ||
#4 | 12116 | Russell C. Jackson (Rusty) | Update dev from main. | ||
#3 | 12028 | C. Thomas Tyler | Refreshed SDP dev branch, merging down from main. | ||
#2 | 11541 | Russell C. Jackson (Rusty) | Keeping dev up to date. | ||
#1 | 10638 | C. Thomas Tyler | Populate perforce_software-sdp-dev. | ||
//guest/perforce_software/sdp/main/Server/test/test_SDP.py | |||||
#1 | 10148 | C. Thomas Tyler | Promoted the Perforce Server Deployment Package to The Workshop. |