|
@@ -1,5 +1,5 @@
|
|
|
#!/usr/bin/python3
|
|
|
-#
|
|
|
+
|
|
|
import time
|
|
|
import threading
|
|
|
import subprocess
|
|
@@ -8,7 +8,8 @@ import sys
|
|
|
import os
|
|
|
import pickle
|
|
|
import gzip
|
|
|
-#
|
|
|
+import argparse
|
|
|
+
|
|
|
PROC_STAT_HEADERS = ('user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest', 'guest_nice')
|
|
|
PROC_PID_STAT_HEADERS = ('pid', 'comm', 'state', 'ppid', 'pgrp', 'session', 'tty_nr', 'tpgid', 'flags', 'minflt',
|
|
|
'cminflt', 'majflt', 'cmajflt', 'utime', 'stime', 'cutime', 'cstime', 'priority', 'nice',
|
|
@@ -17,224 +18,257 @@ PROC_PID_STAT_HEADERS = ('pid', 'comm', 'state', 'ppid', 'pgrp', 'session', 'tty
|
|
|
'sigcatch', 'wchan', 'nswap', 'cnswap', 'exit_signal', 'processor', 'rt_priority',
|
|
|
'policy', 'delayacct_blkio_ticks', 'guest_time', 'cguest_time', 'start_data', 'end_data',
|
|
|
'start_brk', 'arg_start', 'arg_end', 'env_start', 'env_end', 'exit_code')
|
|
|
-#
|
|
|
-def get_cpu_stats(path='/proc/stat'):
|
|
|
- """
|
|
|
- Get CPU statistics from /proc/stat. Output is returned for the system and each CPU.
|
|
|
|
|
|
- Ex:
|
|
|
+pid_stat_regex = re.compile('(\(.*\)|\S+)', flags=re.DOTALL)
|
|
|
|
|
|
- {'system': {'user': 8696397, 'nice': 22431, ...},
|
|
|
- 'cpus': {
|
|
|
- 0: {'user': 4199206, 'nice': 11615, ...},
|
|
|
- 1: {'user': 4199308, 'nice': 10642, ...}
|
|
|
- }
|
|
|
- }
|
|
|
- """
|
|
|
- #
|
|
|
- with open(path, 'r') as f:
|
|
|
- lines = f.readlines()
|
|
|
- #
|
|
|
- cpu_lines = [l for l in lines if l.startswith('cpu')]
|
|
|
- stats = {'system':None, 'cpus':{}}
|
|
|
+def read_proc_stat_file(f):
|
|
|
+ f.seek(0)
|
|
|
+
|
|
|
+ cpu_lines = []
|
|
|
+ found_cpu_line = False
|
|
|
+ for line in f:
|
|
|
+ # only read as much of the file as we need
|
|
|
+ if len(line) == 0:
|
|
|
+ continue
|
|
|
+ #
|
|
|
+ if line[0:3] == 'cpu':
|
|
|
+ cpu_lines.append(line)
|
|
|
+ found_cpu_line = True
|
|
|
+ elif found_cpu_line:
|
|
|
+ break
|
|
|
+ #
|
|
|
#
|
|
|
+
|
|
|
+ return cpu_lines
|
|
|
+#
|
|
|
+
|
|
|
+def parse_proc_stat_file(cpu_lines):
|
|
|
+ stats = {'system': None, 'cpus': {}}
|
|
|
+
|
|
|
for l in cpu_lines:
|
|
|
l_split = l.split()
|
|
|
- cpu_index = l_split[0][3:]
|
|
|
+ cpu_index = int(l_split[0][3:]) if len(l_split[0][3:]) != 0 else None
|
|
|
cpu_stats = {x[0]: int(x[1]) for x in zip(PROC_STAT_HEADERS, l_split[1:])}
|
|
|
- #
|
|
|
- if cpu_index == '':
|
|
|
+
|
|
|
+ if cpu_index == None:
|
|
|
stats['system'] = cpu_stats
|
|
|
else:
|
|
|
- stats['cpus'][int(cpu_index)] = cpu_stats
|
|
|
+ stats['cpus'][cpu_index] = cpu_stats
|
|
|
#
|
|
|
#
|
|
|
+
|
|
|
return stats
|
|
|
#
|
|
|
-def parse_stat_file(path):
|
|
|
- with open(path, 'r') as f:
|
|
|
- contents = f.read()
|
|
|
- #
|
|
|
- raw_stats = re.findall("(\(.*\)|\S+)", contents, flags=re.DOTALL)
|
|
|
+
|
|
|
+def read_proc_pid_stat_file(f):
|
|
|
+ f.seek(0)
|
|
|
+ return f.read()
|
|
|
+#
|
|
|
+
|
|
|
+def parse_proc_pid_stat_file(contents):
|
|
|
+ raw_stats = pid_stat_regex.findall(contents)
|
|
|
proc_stats = {x[0]: x[1] for x in zip(PROC_PID_STAT_HEADERS, raw_stats)}
|
|
|
+
|
|
|
for k in proc_stats:
|
|
|
if k != 'comm' and k != 'state':
|
|
|
proc_stats[k] = int(proc_stats[k])
|
|
|
#
|
|
|
#
|
|
|
+
|
|
|
return proc_stats
|
|
|
#
|
|
|
-def get_proc_stats(pid):
|
|
|
- pid = int(pid)
|
|
|
- path = os.path.join('/proc', str(pid), 'stat')
|
|
|
- #
|
|
|
- return parse_stat_file(path)
|
|
|
-#
|
|
|
-def get_thread_stats(tid):
|
|
|
- tid = int(tid)
|
|
|
- path = os.path.join('/proc', str(tid), 'task', str(tid), 'stat')
|
|
|
- #
|
|
|
- return parse_stat_file(path)
|
|
|
-#
|
|
|
-def calculate_cpu_stats(stats):
|
|
|
+
|
|
|
+def calculate_cpu_idle_stats(stats):
|
|
|
idle = stats['idle'] + stats['iowait']
|
|
|
non_idle = stats['user'] + stats['nice'] + stats['system'] + stats['irq'] + stats['softirq'] + stats['steal']
|
|
|
- #
|
|
|
+
|
|
|
return (idle, non_idle)
|
|
|
#
|
|
|
-def calculate_cpu_usage(initial, current):
|
|
|
+
|
|
|
+def calculate_core_cpu_usage(initial, current):
|
|
|
"""
|
|
|
Calculation adapted from: https://stackoverflow.com/questions/23367857/accurate-calculation-of-cpu-usage-given-in-percentage-in-linux/
|
|
|
"""
|
|
|
- #
|
|
|
- (initial_idle, initial_non_idle) = calculate_cpu_stats(initial)
|
|
|
+
|
|
|
+ (initial_idle, initial_non_idle) = calculate_cpu_idle_stats(initial)
|
|
|
initial_total = initial_idle + initial_non_idle
|
|
|
- #
|
|
|
- (current_idle, current_non_idle) = calculate_cpu_stats(current)
|
|
|
+
|
|
|
+ (current_idle, current_non_idle) = calculate_cpu_idle_stats(current)
|
|
|
current_total = current_idle + current_non_idle
|
|
|
- #
|
|
|
- return (current_non_idle-initial_non_idle)/(current_total-initial_total)
|
|
|
+
|
|
|
+ clock_ticks = current_non_idle-initial_non_idle
|
|
|
+ fraction = clock_ticks/(current_total-initial_total)
|
|
|
+ return (clock_ticks, fraction)
|
|
|
#
|
|
|
+
|
|
|
def calculate_process_cpu_usage(process_initial, process_current, cpu_initial, cpu_current):
|
|
|
- (initial_idle, initial_non_idle) = calculate_cpu_stats(cpu_initial)
|
|
|
- initial_total = initial_idle + initial_non_idle
|
|
|
- #
|
|
|
- (current_idle, current_non_idle) = calculate_cpu_stats(cpu_current)
|
|
|
- current_total = current_idle + current_non_idle
|
|
|
- #
|
|
|
process_initial_non_idle = process_initial['utime'] + process_initial['stime']
|
|
|
process_current_non_idle = process_current['utime'] + process_current['stime']
|
|
|
+
|
|
|
+ used_cores = (process_initial['processor'], process_current['processor'])
|
|
|
+ core_totals = []
|
|
|
+ for core in used_cores:
|
|
|
+ (initial_idle, initial_non_idle) = calculate_cpu_idle_stats(cpu_initial[core])
|
|
|
+ initial_total = initial_idle + initial_non_idle
|
|
|
+
|
|
|
+ (current_idle, current_non_idle) = calculate_cpu_idle_stats(cpu_current[core])
|
|
|
+ current_total = current_idle + current_non_idle
|
|
|
+
|
|
|
+ core_totals.append(current_total-initial_total)
|
|
|
#
|
|
|
- return (process_current_non_idle-process_initial_non_idle)/(current_total-initial_total)
|
|
|
+
|
|
|
+ clock_ticks = process_current_non_idle-process_initial_non_idle
|
|
|
+ fraction = clock_ticks/(sum(x**2 for x in core_totals)/sum(core_totals))
|
|
|
+ return (clock_ticks, fraction)
|
|
|
#
|
|
|
-def calculate_cpu_usage_continuous(stats):
|
|
|
- cpu_usages = []
|
|
|
+
|
|
|
+def calculate_core_cpu_usage_continuous(stats):
|
|
|
+ ticks = []
|
|
|
+ fractions = []
|
|
|
+
|
|
|
for i in range(len(stats)-1):
|
|
|
- cpu_usages.append(calculate_cpu_usage(stats[i], stats[i+1]))
|
|
|
+ (clock_ticks, fraction) = calculate_core_cpu_usage(stats[i], stats[i+1])
|
|
|
+ ticks.append(clock_ticks)
|
|
|
+ fractions.append(fraction)
|
|
|
#
|
|
|
- return cpu_usages
|
|
|
+
|
|
|
+ return {'ticks': ticks, 'fractions': fractions}
|
|
|
#
|
|
|
+
|
|
|
def calculate_process_cpu_usage_continuous(process_stats, cpu_stats):
|
|
|
- process_usages = []
|
|
|
+ ticks = []
|
|
|
+ fractions = []
|
|
|
+
|
|
|
assert all([len(process_stats) == len(cpu_stats[i]) for i in cpu_stats])
|
|
|
+
|
|
|
for i in range(len(process_stats)-1):
|
|
|
- using_core_0 = process_stats[i]['processor']
|
|
|
- using_core_1 = process_stats[i+1]['processor']
|
|
|
- usage_0 = calculate_process_cpu_usage(process_stats[i], process_stats[i+1], cpu_stats[using_core_0][i], cpu_stats[using_core_0][i+1])
|
|
|
- usage_1 = calculate_process_cpu_usage(process_stats[i], process_stats[i+1], cpu_stats[using_core_1][i], cpu_stats[using_core_1][i+1])
|
|
|
- process_usages.append((usage_0+usage_1)/2)
|
|
|
+ (clock_ticks, fraction) = calculate_process_cpu_usage(process_stats[i], process_stats[i+1], {core: cpu_stats[core][i] for core in cpu_stats}, {core: cpu_stats[core][i+1] for core in cpu_stats})
|
|
|
+ ticks.append(clock_ticks)
|
|
|
+ fractions.append(fraction)
|
|
|
#
|
|
|
- return process_usages
|
|
|
-#
|
|
|
-#def get_running_processes():
|
|
|
-# lines = subprocess.check_output(['ps', '-a', '-x', '-o', 'pid,state,args', '--no-headers']).decode('utf-8').split('\n')
|
|
|
-# lines = [line.strip() for line in lines]
|
|
|
-# lines = [line.split(' ', 2) for line in lines if len(line) != 0]
|
|
|
-# #
|
|
|
-# data = []
|
|
|
-# for line in lines:
|
|
|
-# data.append({'pid':int(line[0]), 'state':line[1], 'args':line[2]})
|
|
|
-# #
|
|
|
-# return data
|
|
|
+
|
|
|
+ return {'ticks': ticks, 'fractions': fractions}
|
|
|
#
|
|
|
+
|
|
|
def log_cpu_stats(path, interval, pids, stop_event):
|
|
|
- """
|
|
|
- Log the cpu stats to a gz compressed JSON file. Storing JSON
|
|
|
- seems to only use about 10% more disk space than storing
|
|
|
- bytes directly (4 bytes per value), so JSON is used for
|
|
|
- simplicity.
|
|
|
-
|
|
|
- path: file to save to
|
|
|
- interval: how many seconds to wait before getting more data
|
|
|
- stop_event: a threading.Event which stops the function
|
|
|
- """
|
|
|
- #
|
|
|
- pids = [int(pid) for pid in pids]
|
|
|
- threads = {pid: [int(tid) for tid in os.listdir('/proc/{}/task'.format(pid))] for pid in pids}
|
|
|
- stats = {'timestamps':[],
|
|
|
- 'cpu':{'system':[],
|
|
|
- 'id':{x: [] for x in get_cpu_stats()['cpus'].keys()}},
|
|
|
- 'process':{x: {'pid': [],
|
|
|
- 'tid': {y: [] for y in threads[x]}} for x in pids}}
|
|
|
- #
|
|
|
+ pids = sorted([int(pid) for pid in pids])
|
|
|
+ tids = {pid: sorted([int(tid) for tid in os.listdir('/proc/{}/task'.format(pid))]) for pid in pids}
|
|
|
+
|
|
|
+ stat_file = open('/proc/stat', 'r')
|
|
|
+ pid_files = {pid: open('/proc/{}/stat'.format(pid), 'r') for pid in pids}
|
|
|
+ tid_files = {pid: {tid: open('/proc/{}/task/{}/stat'.format(pid, tid), 'r') for tid in tids[pid]} for pid in pids}
|
|
|
+
|
|
|
+ raw_stats = {'timestamps': [],
|
|
|
+ 'timestamps_finished': [],
|
|
|
+ 'system': [],
|
|
|
+ 'process': {x: {'pid': [],
|
|
|
+ 'tid': {y: [] for y in tid_files[x]}} for x in pid_files}}
|
|
|
+
|
|
|
+ # begin collecting data
|
|
|
+
|
|
|
while not stop_event.is_set():
|
|
|
- current_time = time.time()
|
|
|
- stats['timestamps'].append(current_time)
|
|
|
+ start_time = time.time()
|
|
|
+ raw_stats['timestamps'].append(start_time)
|
|
|
+
|
|
|
+ t_0 = time.time()
|
|
|
+ contents = read_proc_stat_file(stat_file)
|
|
|
+ t_1 = time.time()
|
|
|
+ raw_stats['system'].append((contents, t_1, t_1-t_0))
|
|
|
+
|
|
|
+ for pid in pids:
|
|
|
+ t_0 = time.time()
|
|
|
+ contents = read_proc_pid_stat_file(pid_files[pid])
|
|
|
+ t_1 = time.time()
|
|
|
+ raw_stats['process'][pid]['pid'].append((contents, t_1, t_1-t_0))
|
|
|
+ for tid in tids[pid]:
|
|
|
+ t_0 = time.time()
|
|
|
+ contents = read_proc_pid_stat_file(tid_files[pid][tid])
|
|
|
+ t_1 = time.time()
|
|
|
+ raw_stats['process'][pid]['tid'][tid].append((contents, t_1, t_1-t_0))
|
|
|
+ #
|
|
|
#
|
|
|
- current_stats = get_cpu_stats()
|
|
|
- stats['cpu']['system'].append(current_stats['system'])
|
|
|
+
|
|
|
+ finished_time = time.time()
|
|
|
+ raw_stats['timestamps_finished'].append(finished_time)
|
|
|
+
|
|
|
+ wait_time = max(0, interval-(time.time()-finished_time))
|
|
|
+ stop_event.wait(wait_time)
|
|
|
+ #
|
|
|
+
|
|
|
+ # begin formatting data
|
|
|
+
|
|
|
+ stats = {'timestamps': raw_stats['timestamps'],
|
|
|
+ 'timestamps_finished': raw_stats['timestamps_finished'],
|
|
|
+ 'cpu':{'system': [],
|
|
|
+ 'id': {cpu: [] for cpu in parse_proc_stat_file(raw_stats['system'][0][0])['cpus'].keys()}},
|
|
|
+ 'process': {pid: {'pid': [],
|
|
|
+ 'tid': {tid: [] for tid in tids[pid]}} for pid in pids}}
|
|
|
+
|
|
|
+ for x in range(len(raw_stats['timestamps'])):
|
|
|
+ current_stats = parse_proc_stat_file(raw_stats['system'][x][0])
|
|
|
+
|
|
|
+ system_stats = current_stats['system']
|
|
|
+ system_stats['read_time'] = raw_stats['system'][x][1]
|
|
|
+ system_stats['read_duration'] = raw_stats['system'][x][2]
|
|
|
+ stats['cpu']['system'].append(system_stats)
|
|
|
+
|
|
|
for cpu in current_stats['cpus']:
|
|
|
stats['cpu']['id'][cpu].append(current_stats['cpus'][cpu])
|
|
|
#
|
|
|
+
|
|
|
for pid in pids:
|
|
|
- stats['process'][pid]['pid'].append(get_proc_stats(pid))
|
|
|
- for tid in threads[pid]:
|
|
|
- stats['process'][pid]['tid'][tid].append(get_thread_stats(tid))
|
|
|
+ pid_stats = parse_proc_pid_stat_file(raw_stats['process'][pid]['pid'][x][0])
|
|
|
+ pid_stats['read_time'] = raw_stats['process'][pid]['pid'][x][1]
|
|
|
+ pid_stats['read_duration'] = raw_stats['process'][pid]['pid'][x][2]
|
|
|
+ stats['process'][pid]['pid'].append(pid_stats)
|
|
|
+
|
|
|
+ for tid in tids[pid]:
|
|
|
+ tid_stats = parse_proc_pid_stat_file(raw_stats['process'][pid]['tid'][tid][x][0])
|
|
|
+ tid_stats['read_time'] = raw_stats['process'][pid]['tid'][tid][x][1]
|
|
|
+ tid_stats['read_duration'] = raw_stats['process'][pid]['tid'][tid][x][2]
|
|
|
+ stats['process'][pid]['tid'][tid].append(tid_stats)
|
|
|
#
|
|
|
#
|
|
|
- wait_time = max(0, interval-(time.time()-current_time))
|
|
|
- stop_event.wait(wait_time)
|
|
|
#
|
|
|
+
|
|
|
with gzip.GzipFile(path, 'wb') as f:
|
|
|
pickle.dump(stats, f, protocol=4)
|
|
|
#
|
|
|
#
|
|
|
+
|
|
|
def load_cpu_stats(path):
|
|
|
with gzip.GzipFile(path, 'rb') as f:
|
|
|
return pickle.load(f)
|
|
|
#
|
|
|
#
|
|
|
-'''
|
|
|
-def log_cpu_stats(path, interval, stop_event):
|
|
|
- path: file to save to
|
|
|
- interval: how many seconds to wait before getting more data
|
|
|
- stop_event: a threading.Event which stops the function
|
|
|
- #
|
|
|
- with gzip.GzipFile(path+'.2.gz', 'w') as f:
|
|
|
- f.write(' '.join(PROC_STAT_HEADERS).encode('utf-8'))
|
|
|
- f.write('\n\n'.encode('utf-8'))
|
|
|
- #
|
|
|
- while not stop_event.is_set():
|
|
|
- f.write(str(time.time()).encode('utf-8'))
|
|
|
- f.write('\n'.encode('utf-8'))
|
|
|
- stats = get_cpu_stats()
|
|
|
- f.write('cpu '.encode('utf-8'))
|
|
|
- #f.write(' '.join([str(stats['system'][x]) for x in PROC_STAT_HEADERS]).encode('utf-8'))
|
|
|
- f.write(b''.join([stats['system'][x].to_bytes(4, byteorder='big') for x in PROC_STAT_HEADERS]))
|
|
|
- f.write('\n'.encode('utf-8'))
|
|
|
- for cpu in stats['cpus']:
|
|
|
- f.write('cpu{} '.format(cpu).encode('utf-8'))
|
|
|
- #f.write(' '.join([str(stats['cpus'][cpu][x]) for x in PROC_STAT_HEADERS]).encode('utf-8'))
|
|
|
- f.write(b''.join([stats['cpus'][cpu][x].to_bytes(4, byteorder='big') for x in PROC_STAT_HEADERS]))
|
|
|
- f.write('\n'.encode('utf-8'))
|
|
|
- #
|
|
|
- f.write('\n'.encode('utf-8'))
|
|
|
- time.sleep(interval)
|
|
|
- #
|
|
|
- #
|
|
|
-#
|
|
|
-'''
|
|
|
+
|
|
|
if __name__ == '__main__':
|
|
|
stop_event = threading.Event()
|
|
|
- #
|
|
|
- assert len(sys.argv) >= 3
|
|
|
- interval = float(sys.argv[1])
|
|
|
- file_name = sys.argv[2]
|
|
|
- if len(sys.argv) > 3:
|
|
|
- pids = sys.argv[3].split(',')
|
|
|
+
|
|
|
+ parser = argparse.ArgumentParser(description='Log CPU usage data and save as a gzipped pickle file.',
|
|
|
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
|
+ parser.add_argument('--interval', type=float, default=1, help='interval for data collection')
|
|
|
+ parser.add_argument('--pids', type=str, help='comma-separated list of processes to log')
|
|
|
+ parser.add_argument('file_out', metavar='file-out', type=str, help='where to save the data')
|
|
|
+ args = parser.parse_args()
|
|
|
+
|
|
|
+ if args.pids != None:
|
|
|
+ pids = [int(pid) for pid in args.pids.split(',')]
|
|
|
else:
|
|
|
pids = []
|
|
|
#
|
|
|
- t = threading.Thread(target=log_cpu_stats, args=(file_name, interval, pids, stop_event))
|
|
|
+
|
|
|
+ t = threading.Thread(target=log_cpu_stats, args=(args.file_out, args.interval, pids, stop_event))
|
|
|
t.start()
|
|
|
- #
|
|
|
+
|
|
|
try:
|
|
|
- while True:
|
|
|
- time.sleep(100)
|
|
|
+ while t.is_alive():
|
|
|
+ t.join(timeout=100)
|
|
|
#
|
|
|
except KeyboardInterrupt:
|
|
|
stop_event.set()
|
|
|
print()
|
|
|
#
|
|
|
+
|
|
|
t.join()
|
|
|
#
|