wooo pretty colors
This commit is contained in:
		
							parent
							
								
									6dffbcd320
								
							
						
					
					
						commit
						dd6f3ff02c
					
				
					 1 changed files with 43 additions and 16 deletions
				
			
		| 
						 | 
					@ -107,10 +107,37 @@ SUGGESTED_WORKS_COUNT = 10
 | 
				
			||||||
READONLY_FILE = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
 | 
					READONLY_FILE = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
 | 
				
			||||||
READONLY_DIR = READONLY_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
 | 
					READONLY_DIR = READONLY_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ANSI_RED = '\x1b[1;31m'
 | 
				
			||||||
 | 
					ANSI_GREEN = '\x1b[1;32m'
 | 
				
			||||||
 | 
					ANSI_YELLOW = '\x1b[1;33m'
 | 
				
			||||||
 | 
					ANSI_GRAY = '\x1b[1;90m'
 | 
				
			||||||
 | 
					ANSI_NORMAL = '\x1b[0m'
 | 
				
			||||||
 | 
					ANSI_LINECLEAR = '\x1b[2K\r'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
debug_mode = False
 | 
					debug_mode = False
 | 
				
			||||||
def debug(s):
 | 
					def debug(s):
 | 
				
			||||||
    if debug_mode:
 | 
					    if debug_mode:
 | 
				
			||||||
        print(f'{time.strftime("%Y-%m-%d %H:%M:%S")} - {s}')
 | 
					        print(f'{ANSI_GRAY}{time.strftime("%Y-%m-%d %H:%M:%S")} - {s}{ANSI_NORMAL}')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def succ(s):
 | 
				
			||||||
 | 
					    print(f'{ANSI_GREEN}{s}{ANSI_NORMAL}')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def warn(s):
 | 
				
			||||||
 | 
					    print(f'{ANSI_YELLOW}{s}{ANSI_NORMAL}')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def err(s):
 | 
				
			||||||
 | 
					    print(f'{ANSI_RED}{s}{ANSI_NORMAL}')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def count_progress(idx, count, thing):
 | 
				
			||||||
 | 
					    if idx + 1 < count:
 | 
				
			||||||
 | 
					        pref = ''
 | 
				
			||||||
 | 
					        suf = '...'
 | 
				
			||||||
 | 
					        end = ''
 | 
				
			||||||
 | 
					    else:
 | 
				
			||||||
 | 
					        pref = ANSI_GREEN
 | 
				
			||||||
 | 
					        suf = ANSI_NORMAL
 | 
				
			||||||
 | 
					        end = '\n'
 | 
				
			||||||
 | 
					    print(f'{ANSI_LINECLEAR}{pref}{idx+1}/{count} {thing}{suf}', end=end)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def open_zipfile_with_encoding(path):
 | 
					def open_zipfile_with_encoding(path):
 | 
				
			||||||
    for enc in ["utf-8", "shift-jis", "shift-jisx0213"]:
 | 
					    for enc in ["utf-8", "shift-jis", "shift-jisx0213"]:
 | 
				
			||||||
| 
						 | 
					@ -119,7 +146,7 @@ def open_zipfile_with_encoding(path):
 | 
				
			||||||
        except UnicodeDecodeError:
 | 
					        except UnicodeDecodeError:
 | 
				
			||||||
            pass
 | 
					            pass
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    print(f'{path} contains filenames with unknown character encoding!')
 | 
					    err(f'{path} contains filenames with unknown character encoding!')
 | 
				
			||||||
    exit(1)
 | 
					    exit(1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def open_rarfile_with_encoding(path):
 | 
					def open_rarfile_with_encoding(path):
 | 
				
			||||||
| 
						 | 
					@ -128,7 +155,7 @@ def open_rarfile_with_encoding(path):
 | 
				
			||||||
        if all('<EFBFBD>' not in info.filename for info in rf.infolist()):
 | 
					        if all('<EFBFBD>' not in info.filename for info in rf.infolist()):
 | 
				
			||||||
            return rf
 | 
					            return rf
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    print(f'{path} contains filenames with unknown character encoding!')
 | 
					    err(f'{path} contains filenames with unknown character encoding!')
 | 
				
			||||||
    exit(1)
 | 
					    exit(1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def readonly(path):
 | 
					def readonly(path):
 | 
				
			||||||
| 
						 | 
					@ -408,7 +435,7 @@ def collate(args):
 | 
				
			||||||
                break
 | 
					                break
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if collation_result and collator.index > 0:
 | 
					        if collation_result and collator.index > 0:
 | 
				
			||||||
            print(f'Collated {collator.index} pages for {work_id}')
 | 
					            succ(f'Collated {collator.index} pages for {work_id}')
 | 
				
			||||||
            work_staging_dir.rename(work_collation_dir)
 | 
					            work_staging_dir.rename(work_collation_dir)
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            if work_staging_dir.is_dir():
 | 
					            if work_staging_dir.is_dir():
 | 
				
			||||||
| 
						 | 
					@ -503,20 +530,20 @@ class Collator:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def link_pdf(self, src):
 | 
					    def link_pdf(self, src):
 | 
				
			||||||
        with fitz.open(src) as pdf:
 | 
					        with fitz.open(src) as pdf:
 | 
				
			||||||
            images = pdf_images(pdf, self.args.pdf_strategy)
 | 
					            image_extractors = pdf_image_extractors(pdf, self.args.pdf_strategy)
 | 
				
			||||||
            if images is None:
 | 
					            if image_extractors is None:
 | 
				
			||||||
                print(f'Failed to enumerate page images in PDF {src}')
 | 
					                print(f'Failed to enumerate page images in PDF {src}')
 | 
				
			||||||
                return None
 | 
					                return None
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            self.dest.mkdir(parents=True, exist_ok=True)
 | 
					            self.dest.mkdir(parents=True, exist_ok=True)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            print(f'0 pages collated...', end='')
 | 
					            print(f'0 pages collated...', end='')
 | 
				
			||||||
            for (idx, image) in enumerate(images, start=self.index):
 | 
					            for (idx, extractor) in enumerate(image_extractors, start=self.index):
 | 
				
			||||||
 | 
					                image = extractor()
 | 
				
			||||||
                file_path = self.dest / f'{idx:04d}.{image["ext"]}'
 | 
					                file_path = self.dest / f'{idx:04d}.{image["ext"]}'
 | 
				
			||||||
                with open(file_path, 'wb') as f:
 | 
					                with open(file_path, 'wb') as f:
 | 
				
			||||||
                    f.write(image["image"])
 | 
					                    f.write(image["image"])
 | 
				
			||||||
                print(f'\x1b[2K\r{idx+1-self.index} pages collated...', end='')
 | 
					                count_progress(idx, len(image_extractors), 'pages collated')
 | 
				
			||||||
            print()
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            self.index += pdf.page_count
 | 
					            self.index += pdf.page_count
 | 
				
			||||||
            return True
 | 
					            return True
 | 
				
			||||||
| 
						 | 
					@ -725,7 +752,7 @@ def display_sixel_pixmap(pixmap_bytes):
 | 
				
			||||||
    finally:
 | 
					    finally:
 | 
				
			||||||
        sixel_output_unref(output)
 | 
					        sixel_output_unref(output)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def pdf_images(pdf, strategy):
 | 
					def pdf_image_extractors(pdf, strategy):
 | 
				
			||||||
    print(f'0/{pdf.page_count} pages analyzed...', end='')
 | 
					    print(f'0/{pdf.page_count} pages analyzed...', end='')
 | 
				
			||||||
    image_extractors = []
 | 
					    image_extractors = []
 | 
				
			||||||
    for (idx, page) in enumerate(pdf):
 | 
					    for (idx, page) in enumerate(pdf):
 | 
				
			||||||
| 
						 | 
					@ -762,9 +789,9 @@ def pdf_images(pdf, strategy):
 | 
				
			||||||
                        display_sixel_pixmap(pixmap.tobytes('png'))
 | 
					                        display_sixel_pixmap(pixmap.tobytes('png'))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                choice = input(f'[N]ope out / [c]onvert page{"" if xref is None else " / e[x]tract image"} / [d]rop page / [s]how page? [n/c{"" if xref is None else "/x"}/d/s] ')
 | 
					                choice = input(f'[N]ope out / [c]onvert page{"" if xref is None else " / e[x]tract image"} / [d]rop page / [s]how page? [n/c{"" if xref is None else "/x"}/d/s] ')
 | 
				
			||||||
        print(f'\x1b[2K\r{idx+1}/{pdf.page_count} pages analyzed...', end=('' if idx+1 < pdf.page_count else '\n'))
 | 
					        count_progress(idx, pdf.page_count, 'pages analyzed')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return (extractor() for extractor in image_extractors)
 | 
					    return image_extractors
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def nfc(s):
 | 
					def nfc(s):
 | 
				
			||||||
    return unicodedata.normalize('NFC', s)
 | 
					    return unicodedata.normalize('NFC', s)
 | 
				
			||||||
| 
						 | 
					@ -929,7 +956,7 @@ def standalone_image_size(filepath):
 | 
				
			||||||
        with Image.open(filepath) as im:
 | 
					        with Image.open(filepath) as im:
 | 
				
			||||||
            return im.size
 | 
					            return im.size
 | 
				
			||||||
    except UnidentifiedImageError:
 | 
					    except UnidentifiedImageError:
 | 
				
			||||||
        print(f'Warning: PIL failed to load image {filepath}! Retrying with less strict settings')
 | 
					        warn(f'PIL failed to load image {filepath}! Retrying with less strict settings')
 | 
				
			||||||
        PIL.ImageFile.LOAD_TRUNCATED_IMAGES = True
 | 
					        PIL.ImageFile.LOAD_TRUNCATED_IMAGES = True
 | 
				
			||||||
        try:
 | 
					        try:
 | 
				
			||||||
            with Image.open(filepath) as im:
 | 
					            with Image.open(filepath) as im:
 | 
				
			||||||
| 
						 | 
					@ -1171,7 +1198,7 @@ def generate(args):
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        works.append(work)
 | 
					        works.append(work)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        print(f'\x1b[2K\r{idx+1} database entries read...', end='')
 | 
					        print(f'{ANSI_LINECLEAR}{idx+1} database entries read...', end='')
 | 
				
			||||||
    print()
 | 
					    print()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for (idx, work) in enumerate(works):
 | 
					    for (idx, work) in enumerate(works):
 | 
				
			||||||
| 
						 | 
					@ -1191,7 +1218,7 @@ def generate(args):
 | 
				
			||||||
        with open(viewer_dir / 'index.html', 'w') as f:
 | 
					        with open(viewer_dir / 'index.html', 'w') as f:
 | 
				
			||||||
            f.write(viewer_template.render(depth=3, work=work, title=work['title']))
 | 
					            f.write(viewer_template.render(depth=3, work=work, title=work['title']))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        print(f'\x1b[2K\r{idx+1}/{len(works)} works processed...', end=('' if idx+1 < len(works) else '\n'))
 | 
					        count_progress(idx, len(works), 'works processed')
 | 
				
			||||||
    cache_con.commit()
 | 
					    cache_con.commit()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    uca = pyuca.Collator().sort_key
 | 
					    uca = pyuca.Collator().sort_key
 | 
				
			||||||
| 
						 | 
					@ -1214,7 +1241,7 @@ def generate(args):
 | 
				
			||||||
                    title=cat,
 | 
					                    title=cat,
 | 
				
			||||||
                    categorization=categorization,
 | 
					                    categorization=categorization,
 | 
				
			||||||
                ))
 | 
					                ))
 | 
				
			||||||
            print(f'\x1b[2K\r{idx+1}/{len(cats)} {categorization} processed...', end=('' if idx+1 < len(cats) else '\n'))
 | 
					            count_progress(idx, len(cats), f'{categorization} processed')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        categorization_dir.mkdir(parents=True, exist_ok=True)
 | 
					        categorization_dir.mkdir(parents=True, exist_ok=True)
 | 
				
			||||||
        with open(categorization_dir / 'index.html', 'w') as f:
 | 
					        with open(categorization_dir / 'index.html', 'w') as f:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue