Skip to content

Commit

Permalink
Merge branch 'development' into setup
Browse files Browse the repository at this point in the history
  • Loading branch information
altendky committed Apr 6, 2021
2 parents 34000cf + 3e1f950 commit fceb2be
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 98 deletions.
9 changes: 9 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
# Options for display and rendering
user_interface:
# Call out to the `stty` program to determine terminal size, instead of
# relying on what is reported by the curses library. In some cases,
# the curses library fails to update on SIGWINCH signals. If the
# `plotman interactive` curses interface does not properly adjust when
# you resize the terminal window, you can try setting this to True.
use_stty_size: True

# Where to plot and log.
directories:
# One directory in which to store all plot job logs (the STDOUT/
Expand Down
222 changes: 126 additions & 96 deletions src/plotman/interactive.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ def curses_main(stdscr):
# duplicating the code here.
with open('config.yaml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
ui_cfg = cfg['user_interface']
dir_cfg = cfg['directories']
sched_cfg = cfg['scheduling']
plotting_cfg = cfg['plotting']
Expand All @@ -77,30 +78,6 @@ def curses_main(stdscr):
archiving_configured = 'archive' in dir_cfg
archiving_active = archiving_configured

(n_rows, n_cols) = map(int, stdscr.getmaxyx())

# Page layout.
if (n_rows < 24):
raise Exception(f'Terminal has only {n_rows} lines; requires 24. '
'Try a larger terminal window.')
if (n_cols < 80):
raise Exception(f'Terminal has only {n_cols} lines; requires 80. '
'Try a larger terminal window.')

n_tmpdirs = len(dir_cfg['tmp'])
n_tmpdirs_half = int(n_tmpdirs / 2)

header_height = 3
dirs_height = n_tmpdirs_half + 8 # arch dirs & headers
remainder = n_rows - (header_height + dirs_height)
jobs_height = max(5, math.floor(remainder * 0.6))
logscreen_height = n_rows - (header_height + jobs_height + dirs_height)

header_pos = 0
jobs_pos = header_pos + header_height
dirs_pos = jobs_pos + jobs_height
logscreen_pos = dirs_pos + dirs_height

plotting_status = '<startup>' # todo rename these msg?
archiving_status = '<startup>'

Expand All @@ -109,41 +86,31 @@ def curses_main(stdscr):
stdscr.nodelay(True) # make getch() non-blocking
stdscr.timeout(2000)

try:
header_win = curses.newwin(header_height, n_cols, header_pos, 0)
log_win = curses.newwin(logscreen_height, n_cols, logscreen_pos, 0)
jobs_win = curses.newwin(jobs_height, n_cols, jobs_pos, 0)
dirs_win = curses.newwin(dirs_height, n_cols, dirs_pos, 0)
except Exception:
raise Exception('Failed to initialize curses windows, try a larger '
'terminal window.')
# Create windows. We'll size them in the main loop when we have their content.
header_win = curses.newwin(1, 1, 1, 0)
log_win = curses.newwin(1, 1, 1, 0)
jobs_win = curses.newwin(1, 1, 1, 0)
dirs_win = curses.newwin(1, 1, 1, 0)

jobs = Job.get_running_jobs(dir_cfg['log'])
last_refresh = datetime.datetime.now()
last_refresh = None

pressed_key = '' # For debugging

arch_report = '<initializing>'
archdir_freebytes = None

while True:

# TODO: handle resizing. Need to (1) figure out how to reliably get
# the terminal size -- the recommended method doesn't seem to work:
# (n_rows, n_cols) = [int(v) for v in stdscr.getmaxyx()]
# Consider instead:
# ...[int(v) for v in os.popen('stty size', 'r').read().split()]
# and then (2) implement the logic to resize all the subwindows as above

stdscr.clear()
linecap = n_cols - 1
logscreen_height = n_rows - (header_height + jobs_height + dirs_height)

elapsed = (datetime.datetime.now() - last_refresh).total_seconds()

# A full refresh scans for and reads info for running jobs from
# scratch (i.e., reread their logfiles). Otherwise we'll only
# initialize new jobs, and mostly rely on cached info.
do_full_refresh = elapsed >= refresh_period
do_full_refresh = False
elapsed = 0 # Time since last refresh, or zero if no prev. refresh
if last_refresh is None:
do_full_refresh = True
else:
elapsed = (datetime.datetime.now() - last_refresh).total_seconds()
do_full_refresh = elapsed >= refresh_period

if not do_full_refresh:
jobs = Job.get_running_jobs(dir_cfg['log'], cached_jobs=jobs)
Expand All @@ -153,41 +120,130 @@ def curses_main(stdscr):
jobs = Job.get_running_jobs(dir_cfg['log'])

if plotting_active:
(started, msg) = manager.maybe_start_new_plot(dir_cfg, sched_cfg, plotting_cfg)
(started, msg) = manager.maybe_start_new_plot(
dir_cfg, sched_cfg, plotting_cfg)
if (started):
log.log(msg)
plotting_status = '<just started job>'
jobs = Job.get_running_jobs(dir_cfg['log'], cached_jobs=jobs)
else:
plotting_status = msg

if archiving_configured and archiving_active:
# Look for running archive jobs. Be robust to finding more than one
# even though the scheduler should only run one at a time.
arch_jobs = archive.get_running_archive_jobs(dir_cfg['archive'])
if arch_jobs:
archiving_status = 'pid: ' + ', '.join(map(str, arch_jobs))
else:
(should_start, status_or_cmd) = archive.archive(dir_cfg, jobs)
if not should_start:
archiving_status = status_or_cmd
if archiving_configured:
if archiving_active:
# Look for running archive jobs. Be robust to finding more than one
# even though the scheduler should only run one at a time.
arch_jobs = archive.get_running_archive_jobs(dir_cfg['archive'])
if arch_jobs:
archiving_status = 'pid: ' + ', '.join(map(str, arch_jobs))
else:
cmd = status_or_cmd
log.log('Starting archive: ' + cmd)
(should_start, status_or_cmd) = archive.archive(dir_cfg, jobs)
if not should_start:
archiving_status = status_or_cmd
else:
cmd = status_or_cmd
log.log('Starting archive: ' + cmd)

# TODO: do something useful with output instead of DEVNULL
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
start_new_session=True)

archdir_freebytes = archive.get_archdir_freebytes(dir_cfg['archive'])


# Get terminal size. Recommended method is stdscr.getmaxyx(), but this
# does not seem to work on some systems. It may be a bug in Python
# curses, maybe having to do with registering sigwinch handlers in
# multithreaded environments. See e.g.
# https://stackoverflow.com/questions/33906183#33906270
# Alternative option is to call out to `stty size`. For now, we
# support both strategies, selected by a config option.
# TODO: also try shutil.get_terminal_size()
n_rows: int
n_cols: int
if 'use_stty_size' in ui_cfg and ui_cfg['use_stty_size']:
completed_process = subprocess.run(['stty', 'size'], check=True,
encoding='utf-8', stdout=subprocess.PIPE)
elements = completed_process.stdout.split()
(n_rows, n_cols) = [int(v) for v in elements]
else:
(n_rows, n_cols) = map(int, stdscr.getmaxyx())

stdscr.clear()
stdscr.resize(n_rows, n_cols)
curses.resize_term(n_rows, n_cols)

# TODO: do something useful with output instead of DEVNULL
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
start_new_session=True)
#
# Obtain and measure content
#

# Directory prefixes, for abbreviation
tmp_prefix = os.path.commonpath(dir_cfg['tmp'])
dst_prefix = os.path.commonpath(dir_cfg['dst'])
if archiving_configured:
arch_prefix = dir_cfg['archive']['rsyncd_path']

n_tmpdirs = len(dir_cfg['tmp'])
n_tmpdirs_half = int(n_tmpdirs / 2)

# Directory reports.
tmp_report_1 = reporting.tmp_dir_report(
jobs, dir_cfg, sched_cfg, n_cols, 0, n_tmpdirs_half, tmp_prefix)
tmp_report_2 = reporting.tmp_dir_report(
jobs, dir_cfg, sched_cfg, n_cols, n_tmpdirs_half, n_tmpdirs, tmp_prefix)
dst_report = reporting.dst_dir_report(
jobs, dir_cfg['dst'], n_cols, dst_prefix)
if archiving_configured:
arch_report = reporting.arch_dir_report(archdir_freebytes, n_cols, arch_prefix)
if not arch_report:
arch_report = '<no archive dir info>'
else:
arch_report = '<archiving not configured>'

#
# Layout
#

tmp_h = max(len(tmp_report_1.splitlines()),
len(tmp_report_2.splitlines()))
tmp_w = len(max(tmp_report_1.splitlines() +
tmp_report_2.splitlines(), key=len)) + 1
dst_h = len(dst_report.splitlines())
dst_w = len(max(dst_report.splitlines(), key=len)) + 1
arch_h = len(arch_report.splitlines()) + 1
arch_w = n_cols

header_h = 3
dirs_h = max(tmp_h, dst_h) + arch_h
remainder = n_rows - (header_h + dirs_h)
jobs_h = max(5, math.floor(remainder * 0.6))
logs_h = n_rows - (header_h + jobs_h + dirs_h)

header_pos = 0
jobs_pos = header_pos + header_h
stdscr.resize(n_rows, n_cols)
dirs_pos = jobs_pos + jobs_h
logscreen_pos = dirs_pos + dirs_h

linecap = n_cols - 1
logs_h = n_rows - (header_h + jobs_h + dirs_h)

try:
header_win = curses.newwin(header_h, n_cols, header_pos, 0)
log_win = curses.newwin(logs_h, n_cols, logscreen_pos, 0)
jobs_win = curses.newwin(jobs_h, n_cols, jobs_pos, 0)
dirs_win = curses.newwin(dirs_h, n_cols, dirs_pos, 0)
except Exception:
raise Exception('Failed to initialize curses windows, try a larger '
'terminal window.')

#
# Write
#

# Header
header_win.addnstr(0, 0, 'Plotman', linecap, curses.A_BOLD)
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
Expand Down Expand Up @@ -221,37 +277,11 @@ def curses_main(stdscr):


# Jobs
jobs_win.addstr(0, 0, reporting.status_report(jobs, n_cols, jobs_height,
jobs_win.addstr(0, 0, reporting.status_report(jobs, n_cols, jobs_h,
tmp_prefix, dst_prefix))
jobs_win.chgat(0, 0, curses.A_REVERSE)

# Dirs. Collect reports as strings, then lay out.
tmp_report_1 = reporting.tmp_dir_report(
jobs, dir_cfg, sched_cfg, n_cols, 0, n_tmpdirs_half, tmp_prefix)
tmp_report_2 = reporting.tmp_dir_report(
jobs, dir_cfg, sched_cfg, n_cols, n_tmpdirs_half, n_tmpdirs, tmp_prefix)

dst_report = reporting.dst_dir_report(
jobs, dir_cfg['dst'], n_cols, dst_prefix)

if do_full_refresh:
if archiving_configured:
arch_report = reporting.arch_dir_report(
archive.get_archdir_freebytes(dir_cfg['archive']), n_cols, arch_prefix)
if not arch_report:
arch_report = '<no archive dir info>'
else:
arch_report = '<archiving not configured>'

tmp_h = max(len(tmp_report_1.splitlines()),
len(tmp_report_2.splitlines()))
tmp_w = len(max(tmp_report_1.splitlines() +
tmp_report_2.splitlines(), key=len)) + 1
dst_h = len(dst_report.splitlines())
dst_w = len(max(dst_report.splitlines(), key=len)) + 1
arch_h = len(arch_report.splitlines()) + 1
arch_w = n_cols

# Dirs
tmpwin_12_gutter = 3
tmpwin_dstwin_gutter = 6

Expand Down Expand Up @@ -285,7 +315,7 @@ def curses_main(stdscr):
# this seems easier.
log_win.addnstr(0, 0, ('Log: %d (<up>/<down>/<end> to scroll)\n' % log.get_cur_pos() ),
linecap, curses.A_REVERSE)
for i, logline in enumerate(log.cur_slice(logscreen_height - 1)):
for i, logline in enumerate(log.cur_slice(logs_h - 1)):
log_win.addnstr(i + 1, 0, logline, linecap)

stdscr.noutrefresh()
Expand Down
3 changes: 1 addition & 2 deletions src/plotman/reporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def status_report(jobs, width, height=None, tmp_prefix='', dst_prefix=''):
abbreviate_jobs_list = True

if abbreviate_jobs_list:
n_rows = height - 2 # One for header, one for elipsis
n_rows = height - 2 # Minus one for header, one for ellipsis
n_begin_rows = int(n_rows / 2)
n_end_rows = n_rows - n_begin_rows

Expand Down Expand Up @@ -195,4 +195,3 @@ def dirs_report(jobs, dir_cfg, sched_cfg, width):
'archive dirs free space:\n' +
arch_dir_report(archive.get_archdir_freebytes(arch_cfg), width) + '\n')


0 comments on commit fceb2be

Please sign in to comment.