Commit 6defdbc9 authored by Andrey Filippov's avatar Andrey Filippov

Handle SSD rollover and analyze split runs

parent aeb17752
#!/usr/bin/env python3
import argparse
import os
import re
from collections import Counter
TIMESTAMP_RE = re.compile(r"^(\d+)_(\d+)$")
def parse_timestamp(name):
match = TIMESTAMP_RE.match(name)
if not match:
return None
return int(match.group(1)) * 1000000 + int(match.group(2))
def get_suffix(file_name):
stem, ext = os.path.splitext(file_name)
if "_" not in stem:
return None
return stem.rsplit("_", 1)[1]
def scan_results(root, ext_filter):
entries = {}
for item in os.scandir(root):
if not item.is_dir():
continue
ts = parse_timestamp(item.name)
if ts is None:
continue
suffixes = []
for child in os.scandir(item.path):
if not child.is_file():
continue
if ext_filter and (not child.name.endswith(ext_filter)):
continue
suffix = get_suffix(child.name)
if suffix is not None:
suffixes.append(suffix)
suffixes = tuple(sorted(suffixes))
entries[item.name] = {
"ts": ts,
"count": len(suffixes),
"suffixes": suffixes,
}
return entries
def sorted_names(entries):
return sorted(entries.keys(), key=lambda name: entries[name]["ts"])
def find_expected_suffixes(entries, expected_files):
good = [
entries[name]["suffixes"]
for name in sorted_names(entries)
if entries[name]["count"] == expected_files
]
if not good:
return None
return Counter(good).most_common(1)[0][0]
def group_runs(entries, expected_files, max_gap_us):
bad = [
(entries[name]["ts"], name, entries[name]["count"], entries[name]["suffixes"])
for name in sorted_names(entries)
if entries[name]["count"] != expected_files
]
runs = []
current = []
prev_ts = None
for item in bad:
ts = item[0]
if (prev_ts is None) or ((ts - prev_ts) <= max_gap_us):
current.append(item)
else:
runs.append(current)
current = [item]
prev_ts = ts
if current:
runs.append(current)
return bad, runs
def summarize_patterns(entries, expected_files, limit, expected_suffixes):
pattern_counts = Counter(
entries[name]["suffixes"]
for name in sorted_names(entries)
if entries[name]["count"] != expected_files
)
print("patterns:")
for suffixes, count in pattern_counts.most_common(limit):
if expected_suffixes is None:
print(f" {count} present={suffixes}")
else:
missing = tuple(sorted(set(expected_suffixes) - set(suffixes)))
print(f" {count} present={suffixes} missing={missing}")
def run_signature(run):
patterns = Counter(item[3] for item in run)
dominant_suffixes, dominant_count = patterns.most_common(1)[0]
return dominant_suffixes, dominant_count, patterns
def summarize_runs(entries, runs, expected_suffixes, limit):
run_hist = Counter(len(run) for run in runs)
print(
"run_buckets {}".format(
{
"1-5": sum(1 for run in runs if len(run) <= 5),
"6-30": sum(1 for run in runs if 6 <= len(run) <= 30),
"31+": sum(1 for run in runs if len(run) > 30),
}
)
)
print(f"top_run_lengths {run_hist.most_common(limit)}")
print("top_runs:")
for run in sorted(runs, key=len, reverse=True)[:limit]:
start_name = run[0][1]
end_name = run[-1][1]
dominant_suffixes, dominant_count, patterns = run_signature(run)
if expected_suffixes is None:
missing = ()
else:
missing = tuple(sorted(set(expected_suffixes) - set(dominant_suffixes)))
print(
" {} {} {} present={} missing={} dominant={}/{} patterns={}".format(
len(run),
start_name,
end_name,
dominant_suffixes,
missing,
dominant_count,
len(run),
dict(patterns),
)
)
def analyze(root, expected_files, max_gap_us, limit, ext_filter):
entries = scan_results(root, ext_filter)
expected_suffixes = find_expected_suffixes(entries, expected_files)
bad, runs = group_runs(entries, expected_files, max_gap_us)
count_hist = Counter(item[2] for item in bad)
print(root)
print(f"directories {len(entries)}")
print(f"bad_dirs {len(bad)}")
if expected_suffixes is not None:
print(f"expected_suffixes {expected_suffixes}")
print(f"count_hist {dict(sorted(count_hist.items()))}")
print(f"runs {len(runs)}")
summarize_patterns(entries, expected_files, limit, expected_suffixes)
summarize_runs(entries, runs, expected_suffixes, limit)
print()
def main():
parser = argparse.ArgumentParser(
description=(
"Analyze extracted result trees and report incomplete-scene patterns, "
"contiguous bad runs, and dominant missing channels."
)
)
parser.add_argument("roots", nargs="+", help="One or more results directories")
parser.add_argument(
"--expected-files",
type=int,
default=4,
help="Expected number of files in each timestamp directory",
)
parser.add_argument(
"--max-gap-us",
type=int,
default=20000,
help="Maximum timestamp gap to merge incomplete directories into one run",
)
parser.add_argument(
"--limit",
type=int,
default=12,
help="Maximum number of patterns and runs to print",
)
parser.add_argument(
"--ext",
default=".tiff",
help="Only count files with this extension (default: .tiff)",
)
args = parser.parse_args()
for root in args.roots:
analyze(root, args.expected_files, args.max_gap_us, args.limit, args.ext)
if __name__ == "__main__":
main()
#!/usr/bin/env python3
import argparse
import os
import re
from statistics import mean
TIMESTAMP_RE = re.compile(r"^(\d+)_(\d+)$")
def parse_timestamp(name):
match = TIMESTAMP_RE.match(name)
if not match:
return None
sec = int(match.group(1))
usec = int(match.group(2))
return sec * 1000000 + usec
def scan_results(root):
entries = {}
for item in os.scandir(root):
if not item.is_dir():
continue
ts = parse_timestamp(item.name)
if ts is None:
continue
file_names = sorted(
child.name for child in os.scandir(item.path) if child.is_file()
)
entries[item.name] = {
"ts": ts,
"count": len(file_names),
"files": file_names,
}
return entries
def sorted_names(entries):
return sorted(entries.keys(), key=lambda name: entries[name]["ts"])
def gap_stats(entries):
names = sorted_names(entries)
gaps = []
for prev_name, next_name in zip(names, names[1:]):
prev_ts = entries[prev_name]["ts"]
next_ts = entries[next_name]["ts"]
gaps.append((prev_name, next_name, next_ts - prev_ts))
return gaps
def summarize_tree(label, entries, expected_files, limit):
names = sorted_names(entries)
print(f"{label}: directories={len(names)}")
if not names:
return
bad = [name for name in names if entries[name]["count"] != expected_files]
print(f" directories with file count != {expected_files}: {len(bad)}")
for name in bad[:limit]:
print(f" {name}: files={entries[name]['count']}")
gaps = gap_stats(entries)
if gaps:
gap_values = [gap for _, _, gap in gaps]
print(
" gap_us min={} max={} avg={:.1f}".format(
min(gap_values), max(gap_values), mean(gap_values)
)
)
large = sorted(gaps, key=lambda item: item[2], reverse=True)[:limit]
print(" largest gaps:")
for prev_name, next_name, gap in large:
print(f" {prev_name} -> {next_name}: {gap}")
def print_only(label, names, entries, limit):
print(f"{label}: {len(names)}")
for name in names[:limit]:
print(f" {name} files={entries[name]['count']}")
def compare_trees(left_label, left_entries, right_label, right_entries, limit):
left_names = set(left_entries.keys())
right_names = set(right_entries.keys())
common = sorted(left_names & right_names, key=lambda name: left_entries[name]["ts"])
only_left = sorted(left_names - right_names, key=lambda name: left_entries[name]["ts"])
only_right = sorted(right_names - left_names, key=lambda name: right_entries[name]["ts"])
print(f"common directories: {len(common)}")
mismatched_counts = [
name
for name in common
if left_entries[name]["count"] != right_entries[name]["count"]
]
print(f"common directories with file-count mismatch: {len(mismatched_counts)}")
for name in mismatched_counts[:limit]:
print(
f" {name}: {left_label}={left_entries[name]['count']} {right_label}={right_entries[name]['count']}"
)
print_only(f"only in {left_label}", only_left, left_entries, limit)
print_only(f"only in {right_label}", only_right, right_entries, limit)
def main():
parser = argparse.ArgumentParser(
description="Compare extracted TIFF/JP4 result trees by timestamp directory names."
)
parser.add_argument("left", help="Left results directory")
parser.add_argument("right", help="Right results directory")
parser.add_argument(
"--expected-files",
type=int,
default=4,
help="Expected number of files in each timestamp directory",
)
parser.add_argument(
"--limit",
type=int,
default=20,
help="Maximum number of anomalies to print per section",
)
args = parser.parse_args()
left_entries = scan_results(args.left)
right_entries = scan_results(args.right)
summarize_tree("left", left_entries, args.expected_files, args.limit)
summarize_tree("right", right_entries, args.expected_files, args.limit)
compare_trees("left", left_entries, "right", right_entries, args.limit)
if __name__ == "__main__":
main()
......@@ -54,6 +54,7 @@ $processed_subdir = "processed";
$forced_ext = "";
$add_to_chn = -1; // do not use and do not create scene directories
$marker_debug = false;
function print_help(){
global $argv;
......@@ -69,6 +70,7 @@ function print_help(){
* move-processed-files - 0(default) or 1 - if not 1 - will not move the processed files
* forced-ext - string - override extensions from exifs with this one
* add-to-chn - integer - add to decoded channel number
* marker_debug - 0(default) or 1 - write file_xxx.img.markers.txt with marker deltas and stats
* Examples:
** Split all *.img, *.bin and *.mov files in the current dir and 1 dir down, puts results to '0/':
......@@ -123,6 +125,10 @@ if (isset($_GET['chn_offs'])){
$add_to_chn = (integer) $_GET['chn_offs'];
}
if (isset($_GET['marker_debug'])){
$marker_debug = ($_GET['marker_debug'] == "1");
}
$list = preg_grep('/^([^.])/', scandir($path));
sort($list); // also re-indexes
//$list=array();
......@@ -190,6 +196,7 @@ function split_file($path, $file, $destination, $add_to_chn = - 1, $next_file =
global $chunksize;
global $input_exts;
global $forced_ext;
global $marker_debug;
if (in_array(get_ext("$path/$file"), $input_exts)) {
$next_path = $next_file ? "$path/$next_file" : "";
......@@ -257,6 +264,9 @@ function split_file($path, $file, $destination, $add_to_chn = - 1, $next_file =
$tmp_name = $file_type? "$path/$destination/header.tmp": "$path/$destination/image.tmp";
echo " images found: " . (count($markers) - 1) . "\n";
if ($marker_debug) {
write_marker_debug("$path/$file.markers.txt", $markers, $file_type, $next_file);
}
// second scan
$dbg_last = 0;
......@@ -361,6 +371,58 @@ function split_file($path, $file, $destination, $add_to_chn = - 1, $next_file =
}
}
function write_marker_debug($debug_file, $markers, $file_type, $next_file = "")
{
$frame_count = count($markers) - 1;
$lines = array();
$lines[] = "# marker debug";
$lines[] = "# type=" . ($file_type ? "TIFF" : "JPEG/JP4");
$lines[] = "# next_file=" . ($next_file ? $next_file : "-");
$lines[] = "# images_found=" . $frame_count;
if ($frame_count <= 0) {
$lines[] = "# no markers found";
file_put_contents($debug_file, implode(PHP_EOL, $lines) . PHP_EOL);
return;
}
$sum_delta = 0;
$min_delta = null;
$max_delta = null;
$hist = array();
for ($i = 0; $i < $frame_count; $i ++) {
$delta = $markers[$i + 1] - $markers[$i];
$sum_delta += $delta;
if (($min_delta === null) || ($delta < $min_delta)) {
$min_delta = $delta;
}
if (($max_delta === null) || ($delta > $max_delta)) {
$max_delta = $delta;
}
if (!isset($hist[$delta])) {
$hist[$delta] = 0;
}
$hist[$delta] ++;
}
$avg_delta = $sum_delta / $frame_count;
arsort($hist, SORT_NUMERIC);
$lines[] = sprintf("# stats min=%d max=%d avg=%.2f sum=%d", $min_delta, $max_delta, $avg_delta, $sum_delta);
$lines[] = "# most_common_deltas delta_bytes count";
$hist_count = 0;
foreach ($hist as $delta => $count) {
$lines[] = sprintf("# %d %d", $delta, $count);
$hist_count ++;
if ($hist_count >= 16) {
break;
}
}
$lines[] = "# index marker_offset next_marker_offset delta_bytes";
for ($i = 0; $i < $frame_count; $i ++) {
$lines[] = sprintf("%d %d %d %d", $i, $markers[$i], $markers[$i + 1], $markers[$i + 1] - $markers[$i]);
}
file_put_contents($debug_file, implode(PHP_EOL, $lines) . PHP_EOL);
}
function elphel_specific_result_name($file, &$image_length, $add_to_chn=-10){
global $forced_ext;
......
......@@ -80,6 +80,26 @@ print (args)
cams = []
dirs = []
def pointer_offset_blocks(info):
if info is None:
return 0
return info["current_lba"] - info["start_lba"]
def pointer_range_segments(start_info, end_info):
if end_info is None:
return [], 0, 0
end_offset = pointer_offset_blocks(end_info)
if start_info is None:
return [(0, end_offset)], end_offset, 0
if ((start_info["start_lba"] != end_info["start_lba"]) or
(start_info["end_lba"] != end_info["end_lba"])):
raise Exception("Pointer files are from different partitions")
start_offset = pointer_offset_blocks(start_info)
ring_blocks = end_info["end_lba"] - end_info["start_lba"]
if end_offset >= start_offset:
return [(start_offset, end_offset - start_offset)], end_offset - start_offset, start_offset
return [(start_offset, ring_blocks - start_offset), (0, end_offset)], ring_blocks - start_offset + end_offset, start_offset
if args.camera!="":
tmp = args.camera.split("@")
if len(tmp)==2:
......@@ -160,48 +180,55 @@ for i,cam in enumerate(cams):
if d==p[0]:
# p[1] == sdb2
# hardcoded /dev/sd?1
part1 = p[1][0:-1]+"1"
if args.n==0:
if (args.lan):
# data_size_blocks = pc.read_camogm_disk_file_blocks_lan(cams[i].ip, p[1][0:-1]+"1", args.file_end)
# data_skip_blocks = pc.read_camogm_disk_file_blocks_lan(cams[i].ip, p[1][0:-1]+"1", args.file_start)
data_size_blocks = cam['obj'].read_camogm_disk_file_blocks(p[1][0:-1]+"1", args.file_end)
data_skip_blocks = cam['obj'].read_camogm_disk_file_blocks(p[1][0:-1]+"1", args.file_start)
#read_camogm_disk_file_blocks
end_info = cam['obj'].read_camogm_disk_file_info(part1, args.file_end)
start_info = cam['obj'].read_camogm_disk_file_info(part1, args.file_start) if args.file_start else None
else:
data_size_blocks = pc.read_camogm_disk_file_blocks("/dev/"+p[1][0:-1]+"1", args.file_end)
data_skip_blocks = pc.read_camogm_disk_file_blocks("/dev/"+p[1][0:-1]+"1", args.file_start)
data_size_blocks -= data_skip_blocks # before it included skipped !
end_info = pc.read_camogm_disk_file_info("/dev/"+part1, args.file_end)
start_info = pc.read_camogm_disk_file_info("/dev/"+part1, args.file_start) if args.file_start else None
segments, data_size_blocks, data_skip_blocks = pointer_range_segments(start_info, end_info)
else:
segments = [(args.skip, args.n)]
data_size_blocks = args.n
data_skip_blocks = args.skip
if (data_size_blocks < 0):
data_size_blocks = 0 # if camogm.disk could not be read?
print ("data_size_blocks=%d, data_skip_blocks=%d"%(data_size_blocks,data_skip_blocks))
if len(segments) > 1:
print("Wrapped ring buffer range, downloading %d segments" % len(segments))
chunk_blocks=32768 # 4096
block_size= 512 # 4096
data_size_gb = (data_size_blocks * block_size) / (1024 * 1024 * 1024)
file_gb = args.bs*args.bc // 1024 # just for compatibility, change to parameter
print("Data size: %d %d-byte blocks (%f GB)"%(data_size_blocks, block_size, data_size_gb))
if (args.lan):
print(cam)
pc.download_blocks_lan(
cam,
args.dest,
"/dev/"+p[1],
blocks_load=data_size_blocks,
blocks_skip= data_skip_blocks,
file_gb=file_gb,
chunk_blocks=chunk_blocks,
block_size=block_size)
else:
pc.download_blocks( # after testing use download_blocks_lan (rename/delete download_blocks) with cam=None
args.dest,
"/dev/"+p[1],
blocks_load=data_size_blocks,
blocks_skip= data_skip_blocks,
file_gb=file_gb,
chunk_blocks=chunk_blocks,
block_size=block_size)
num_file = 0
for segment_skip, segment_load in segments:
if segment_load <= 0:
continue
if (args.lan):
print(cam)
num_file = pc.download_blocks_lan(
cam,
args.dest,
"/dev/"+p[1],
blocks_load=segment_load,
blocks_skip=segment_skip,
file_gb=file_gb,
chunk_blocks=chunk_blocks,
block_size=block_size,
num_file=num_file)
else:
num_file = pc.download_blocks( # after testing use download_blocks_lan (rename/delete download_blocks) with cam=None
args.dest,
"/dev/"+p[1],
blocks_load=segment_load,
blocks_skip=segment_skip,
file_gb=file_gb,
chunk_blocks=chunk_blocks,
block_size=block_size,
num_file=num_file)
dirs.remove(d)
proceed_to_next = True
......
......@@ -36,12 +36,28 @@ class bcolors:
BOLDWHITE = '\033[1;37m'
UNDERLINE = '\033[4m'
def parse_camogm_disk_data(lines):
if len(lines) != 2:
return None
pointers = lines[1].split()
if len(pointers) < 4:
return None
try:
return {
"device": pointers[0],
"start_lba": int(pointers[1]),
"current_lba": int(pointers[2]),
"end_lba": int(pointers[3]),
}
except ValueError:
return None
class Camera:
def __init__(self,user="root",ip="192.168.0.9"):
self.user = user
self.ip = ip
self.sshcmd = "ssh "+user+"@"+ip
self.scpcmd = "scp "+user+"@"+ip+":"
self.scpcmd = "scp -O "+user+"@"+ip+":"
self.lanurl = "http://"+ip+":2323/ssd"
self.disable = False
self.pattern = "ata-"
......@@ -194,6 +210,20 @@ class Camera:
shutil.rmtree(tmp_mount_point, ignore_errors=True) # non-empty, may contain read-only
return result
def read_camogm_disk_file_info(self, part, fname="camogm.disk"):
result = None
tmp_mount_point = tempfile.mkdtemp()
shout(self.scpcmd+"/mnt/"+part+"/"+fname+" "+tmp_mount_point)
try:
with open (tmp_mount_point+"/"+fname, "r") as myfile:
result = parse_camogm_disk_data(myfile.readlines())
if result is None:
print(tmp_mount_point+"/"+fname+" INVALID")
except IOError:
print(tmp_mount_point+"/"+fname+" NOT FOUND")
shutil.rmtree(tmp_mount_point, ignore_errors=True)
return result
class PC():
def __init__(self):
self.pattern = "ata-"
......@@ -238,8 +268,8 @@ class PC():
def read_camogm_disk_file_lan(self, ip, part, fname="camogm.disk"):
result = 0
tmp_mount_point = tempfile.mkdtemp()
print("scp root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
shout("scp root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
print("scp -O root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
shout("scp -O root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
try:
with open (tmp_mount_point+"/"+fname, "r") as myfile:
data=myfile.readlines()
......@@ -291,8 +321,8 @@ class PC():
def read_camogm_disk_file_blocks_lan(self, ip, part,fname="camogm.disk"):
result = 0
tmp_mount_point = tempfile.mkdtemp()
print("scp root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
shout("scp root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
print("scp -O root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
shout("scp -O root@"+ip+":/mnt/"+part+"/"+fname+" "+tmp_mount_point)
try:
with open (tmp_mount_point+"/"+fname, "r") as myfile:
# print("reading "+tmp_mount_point+"/"+fname)
......@@ -311,6 +341,24 @@ class PC():
shutil.rmtree(tmp_mount_point, ignore_errors=True) # non-empty, may contain read-only
return result
def read_camogm_disk_file_info(self, part, fname="camogm.disk"):
result = None
tmp_mount_point = tempfile.mkdtemp()
print("mounting "+part+" to "+tmp_mount_point)
shout("sudo mount "+part+" "+tmp_mount_point)
print("mounted "+part+" to "+tmp_mount_point)
time.sleep(1)
try:
with open (tmp_mount_point+"/"+fname, "r") as myfile:
result = parse_camogm_disk_data(myfile.readlines())
if result is None:
print(tmp_mount_point+"/"+fname+" INVALID")
except IOError:
print(tmp_mount_point+"/"+fname+" NOT FOUND")
shout("sudo umount "+tmp_mount_point)
os.rmdir(tmp_mount_point)
return result
def is_raw(self,part):
res = shout("sudo blkid | grep "+str(part))
typ = " TYPE=" # added space, otherwise: /dev/sdd2: PTTYPE="atari" PARTUUID="e174570b-02"
......@@ -349,7 +397,7 @@ class PC():
if i>=dl_skip:
shout("sudo dd if="+part+" "+" of="+fname+" bs="+str(dl_bs)+"M count="+str(dl_bc)+" skip="+str(skip))
def download_blocks(self, dest, part, blocks_load, blocks_skip= 0, file_gb=10, chunk_blocks=32768, block_size=512): #4096):
def download_blocks(self, dest, part, blocks_load, blocks_skip= 0, file_gb=10, chunk_blocks=32768, block_size=512, num_file=0): #4096):
chunk_bytes = block_size * chunk_blocks
file_chunks = (file_gb * 1024 * 1024 * 1024) // chunk_bytes
self.is_raw(part)
......@@ -361,7 +409,6 @@ class PC():
dirname = dest+"/"+dirname
if not os.path.isdir(dirname):
os.mkdir(dirname)
num_file = 0
# optional first file to align skip to chunk_blocks, 1 block at a time
if (blocks_skip > 0) and ((blocks_skip % chunk_blocks) > 0):
bwrite = chunk_blocks - (blocks_skip % chunk_blocks)
......@@ -396,11 +443,13 @@ class PC():
fname = "%s/file_%03d.img" %(dirname, num_file) #dirname+"/"+"file_"+str(num_file)+".img"
print("Downloading last %d %d-byte blocks, skipping %d blocks to %s"%(blocks_load, block_size, blocks_skip, fname))
shout("sudo dd if="+part+" "+" of="+fname+" bs="+str(block_size)+" count="+str(blocks_load)+" skip="+str(blocks_skip))
num_file += 1
return num_file
#time ssh root@192.168.0.41 "dd if=/dev/sda2 bs=16777216 count=409 skip=322" | dd of=/home/elphel/lwir16-proc/test_dd/file_0001.img
# res = shout(self.sshcmd+" 'ls -all /dev/disk/by-id | grep '"+self.pattern+"' | grep '"+partition[-4:]+"''")
#slow, replaced by download_blocks_lan using modified imgsrv 2023-08-02
def download_blocks_ssh(self, cam, dest, part, blocks_load, blocks_skip= 0, file_gb=10, chunk_blocks=32768, block_size=512): #4096):
def download_blocks_ssh(self, cam, dest, part, blocks_load, blocks_skip= 0, file_gb=10, chunk_blocks=32768, block_size=512, num_file=0): #4096):
chunk_bytes = block_size * chunk_blocks
file_chunks = (file_gb * 1024 * 1024 * 1024) // chunk_bytes
if (cam==None):
......@@ -418,7 +467,6 @@ class PC():
dirname = dest+"/"+dirname
if not os.path.isdir(dirname):
os.mkdir(dirname)
num_file = 0
# optional first file to align skip to chunk_blocks, 1 block at a time
if (blocks_skip > 0) and ((blocks_skip % chunk_blocks) > 0):
bwrite = chunk_blocks - (blocks_skip % chunk_blocks)
......@@ -465,6 +513,8 @@ class PC():
else:
print(cam['obj'].sshcmd+" 'dd if="+part+" bs="+str(block_size)+" count="+str(blocks_load)+" skip="+str(blocks_skip)+"' | dd of="+fname)
shout(cam['obj'].sshcmd+" 'dd if="+part+" bs="+str(block_size)+" count="+str(blocks_load)+" skip="+str(blocks_skip)+"' | dd of="+fname)
num_file += 1
return num_file
def download_with_imgsrv(self, cam, out_file, offs, count ):
# self.lanurl = "http://"+ip+":2323/ssd"
......@@ -479,7 +529,7 @@ class PC():
#if chunk:
f.write(chunk)
def download_blocks_lan(self, cam, dest, part, blocks_load, blocks_skip= 0, file_gb=10, chunk_blocks=32768, block_size=512): #4096):
def download_blocks_lan(self, cam, dest, part, blocks_load, blocks_skip= 0, file_gb=10, chunk_blocks=32768, block_size=512, num_file=0): #4096):
chunk_bytes = block_size * chunk_blocks
file_chunks = (file_gb * 1024 * 1024 * 1024) // chunk_bytes
if (cam==None):
......@@ -502,7 +552,6 @@ class PC():
dirname = dest+"/"+dirname
if not os.path.isdir(dirname):
os.mkdir(dirname)
num_file = 0
# optional first file to align skip to chunk_blocks, 1 block at a time
if (blocks_skip > 0) and ((blocks_skip % chunk_blocks) > 0):
bwrite = chunk_blocks - (blocks_skip % chunk_blocks)
......@@ -552,6 +601,8 @@ class PC():
# print(cam['obj'].sshcmd+" 'dd if="+part+" bs="+str(block_size)+" count="+str(blocks_load)+" skip="+str(blocks_skip)+"' | dd of="+fname)
# shout(cam['obj'].sshcmd+" 'dd if="+part+" bs="+str(block_size)+" count="+str(blocks_load)+" skip="+str(blocks_skip)+"' | dd of="+fname)
self.download_with_imgsrv(cam, fname, blocks_skip * block_size, blocks_load * block_size )
num_file += 1
return num_file
def partname(self,partition):
cmd = "ls /dev/disk/by-id/ -all | grep '"+self.pattern+"' | grep '"+partition[-4:]+"'"
......@@ -560,4 +611,3 @@ class PC():
if name[0:len(self.pattern)]==self.pattern:
return name[len(self.pattern):]
return ""
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment