black: Reformat skipping string normalization.
This commit is contained in:
parent
5580c68ae5
commit
fba21bb00d
178 changed files with 6562 additions and 4469 deletions
|
@ -8,89 +8,97 @@ if MYPY:
|
|||
|
||||
whitespace_rules = [
|
||||
# This linter should be first since bash_rules depends on it.
|
||||
{'pattern': r'\s+$',
|
||||
'strip': '\n',
|
||||
'description': 'Fix trailing whitespace'},
|
||||
{'pattern': '\t',
|
||||
'strip': '\n',
|
||||
'description': 'Fix tab-based whitespace'},
|
||||
{'pattern': r'\s+$', 'strip': '\n', 'description': 'Fix trailing whitespace'},
|
||||
{'pattern': '\t', 'strip': '\n', 'description': 'Fix tab-based whitespace'},
|
||||
] # type: List[Rule]
|
||||
|
||||
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
|
||||
markdown_whitespace_rules = list(
|
||||
[rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']
|
||||
) + [
|
||||
# Two spaces trailing a line with other content is okay--it's a markdown line break.
|
||||
# This rule finds one space trailing a non-space, three or more trailing spaces, and
|
||||
# spaces on an empty line.
|
||||
{'pattern': r'((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
|
||||
'strip': '\n',
|
||||
'description': 'Fix trailing whitespace'},
|
||||
{'pattern': r'^#+[A-Za-z0-9]',
|
||||
'strip': '\n',
|
||||
'description': 'Missing space after # in heading'},
|
||||
{
|
||||
'pattern': r'((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
|
||||
'strip': '\n',
|
||||
'description': 'Fix trailing whitespace',
|
||||
},
|
||||
{
|
||||
'pattern': r'^#+[A-Za-z0-9]',
|
||||
'strip': '\n',
|
||||
'description': 'Missing space after # in heading',
|
||||
},
|
||||
]
|
||||
|
||||
python_rules = RuleList(
|
||||
langs=['py'],
|
||||
rules=[
|
||||
{'pattern': r'".*"%\([a-z_].*\)?$',
|
||||
'description': 'Missing space around "%"'},
|
||||
{'pattern': r"'.*'%\([a-z_].*\)?$",
|
||||
'description': 'Missing space around "%"'},
|
||||
{'pattern': r'".*"%\([a-z_].*\)?$', 'description': 'Missing space around "%"'},
|
||||
{'pattern': r"'.*'%\([a-z_].*\)?$", 'description': 'Missing space around "%"'},
|
||||
# This rule is constructed with + to avoid triggering on itself
|
||||
{'pattern': r" =" + r'[^ =>~"]',
|
||||
'description': 'Missing whitespace after "="'},
|
||||
{'pattern': r'":\w[^"]*$',
|
||||
'description': 'Missing whitespace after ":"'},
|
||||
{'pattern': r"':\w[^']*$",
|
||||
'description': 'Missing whitespace after ":"'},
|
||||
{'pattern': r"^\s+[#]\w",
|
||||
'strip': '\n',
|
||||
'description': 'Missing whitespace after "#"'},
|
||||
{'pattern': r"assertEquals[(]",
|
||||
'description': 'Use assertEqual, not assertEquals (which is deprecated).'},
|
||||
{'pattern': r'self: Any',
|
||||
'description': 'you can omit Any annotation for self',
|
||||
'good_lines': ['def foo (self):'],
|
||||
'bad_lines': ['def foo(self: Any):']},
|
||||
{'pattern': r"== None",
|
||||
'description': 'Use `is None` to check whether something is None'},
|
||||
{'pattern': r"type:[(]",
|
||||
'description': 'Missing whitespace after ":" in type annotation'},
|
||||
{'pattern': r"# type [(]",
|
||||
'description': 'Missing : after type in type annotation'},
|
||||
{'pattern': r"#type",
|
||||
'description': 'Missing whitespace after "#" in type annotation'},
|
||||
{'pattern': r'if[(]',
|
||||
'description': 'Missing space between if and ('},
|
||||
{'pattern': r", [)]",
|
||||
'description': 'Unnecessary whitespace between "," and ")"'},
|
||||
{'pattern': r"% [(]",
|
||||
'description': 'Unnecessary whitespace between "%" and "("'},
|
||||
{'pattern': r" =" + r'[^ =>~"]', 'description': 'Missing whitespace after "="'},
|
||||
{'pattern': r'":\w[^"]*$', 'description': 'Missing whitespace after ":"'},
|
||||
{'pattern': r"':\w[^']*$", 'description': 'Missing whitespace after ":"'},
|
||||
{'pattern': r"^\s+[#]\w", 'strip': '\n', 'description': 'Missing whitespace after "#"'},
|
||||
{
|
||||
'pattern': r"assertEquals[(]",
|
||||
'description': 'Use assertEqual, not assertEquals (which is deprecated).',
|
||||
},
|
||||
{
|
||||
'pattern': r'self: Any',
|
||||
'description': 'you can omit Any annotation for self',
|
||||
'good_lines': ['def foo (self):'],
|
||||
'bad_lines': ['def foo(self: Any):'],
|
||||
},
|
||||
{'pattern': r"== None", 'description': 'Use `is None` to check whether something is None'},
|
||||
{'pattern': r"type:[(]", 'description': 'Missing whitespace after ":" in type annotation'},
|
||||
{'pattern': r"# type [(]", 'description': 'Missing : after type in type annotation'},
|
||||
{'pattern': r"#type", 'description': 'Missing whitespace after "#" in type annotation'},
|
||||
{'pattern': r'if[(]', 'description': 'Missing space between if and ('},
|
||||
{'pattern': r", [)]", 'description': 'Unnecessary whitespace between "," and ")"'},
|
||||
{'pattern': r"% [(]", 'description': 'Unnecessary whitespace between "%" and "("'},
|
||||
# This next check could have false positives, but it seems pretty
|
||||
# rare; if we find any, they can be added to the exclude list for
|
||||
# this rule.
|
||||
{'pattern': r' % [a-zA-Z0-9_.]*\)?$',
|
||||
'description': 'Used % comprehension without a tuple'},
|
||||
{'pattern': r'.*%s.* % \([a-zA-Z0-9_.]*\)$',
|
||||
'description': 'Used % comprehension without a tuple'},
|
||||
{'pattern': r'__future__',
|
||||
'include_only': {'zulip_bots/zulip_bots/bots/'},
|
||||
'description': 'Bots no longer need __future__ imports.'},
|
||||
{'pattern': r'#!/usr/bin/env python$',
|
||||
'include_only': {'zulip_bots/'},
|
||||
'description': 'Python shebangs must be python3'},
|
||||
{'pattern': r'(^|\s)open\s*\(',
|
||||
'description': 'open() should not be used in Zulip\'s bots. Use functions'
|
||||
' provided by the bots framework to access the filesystem.',
|
||||
'include_only': {'zulip_bots/zulip_bots/bots/'}},
|
||||
{'pattern': r'pprint',
|
||||
'description': 'Used pprint, which is most likely a debugging leftover. For user output, use print().'},
|
||||
{'pattern': r'\(BotTestCase\)',
|
||||
'bad_lines': ['class TestSomeBot(BotTestCase):'],
|
||||
'description': 'Bot test cases should directly inherit from BotTestCase *and* DefaultTests.'},
|
||||
{'pattern': r'\(DefaultTests, BotTestCase\)',
|
||||
'bad_lines': ['class TestSomeBot(DefaultTests, BotTestCase):'],
|
||||
'good_lines': ['class TestSomeBot(BotTestCase, DefaultTests):'],
|
||||
'description': 'Bot test cases should inherit from BotTestCase before DefaultTests.'},
|
||||
{
|
||||
'pattern': r' % [a-zA-Z0-9_.]*\)?$',
|
||||
'description': 'Used % comprehension without a tuple',
|
||||
},
|
||||
{
|
||||
'pattern': r'.*%s.* % \([a-zA-Z0-9_.]*\)$',
|
||||
'description': 'Used % comprehension without a tuple',
|
||||
},
|
||||
{
|
||||
'pattern': r'__future__',
|
||||
'include_only': {'zulip_bots/zulip_bots/bots/'},
|
||||
'description': 'Bots no longer need __future__ imports.',
|
||||
},
|
||||
{
|
||||
'pattern': r'#!/usr/bin/env python$',
|
||||
'include_only': {'zulip_bots/'},
|
||||
'description': 'Python shebangs must be python3',
|
||||
},
|
||||
{
|
||||
'pattern': r'(^|\s)open\s*\(',
|
||||
'description': 'open() should not be used in Zulip\'s bots. Use functions'
|
||||
' provided by the bots framework to access the filesystem.',
|
||||
'include_only': {'zulip_bots/zulip_bots/bots/'},
|
||||
},
|
||||
{
|
||||
'pattern': r'pprint',
|
||||
'description': 'Used pprint, which is most likely a debugging leftover. For user output, use print().',
|
||||
},
|
||||
{
|
||||
'pattern': r'\(BotTestCase\)',
|
||||
'bad_lines': ['class TestSomeBot(BotTestCase):'],
|
||||
'description': 'Bot test cases should directly inherit from BotTestCase *and* DefaultTests.',
|
||||
},
|
||||
{
|
||||
'pattern': r'\(DefaultTests, BotTestCase\)',
|
||||
'bad_lines': ['class TestSomeBot(DefaultTests, BotTestCase):'],
|
||||
'good_lines': ['class TestSomeBot(BotTestCase, DefaultTests):'],
|
||||
'description': 'Bot test cases should inherit from BotTestCase before DefaultTests.',
|
||||
},
|
||||
*whitespace_rules,
|
||||
],
|
||||
max_length=140,
|
||||
|
@ -99,9 +107,11 @@ python_rules = RuleList(
|
|||
bash_rules = RuleList(
|
||||
langs=['sh'],
|
||||
rules=[
|
||||
{'pattern': r'#!.*sh [-xe]',
|
||||
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
|
||||
' to set -x|set -e'},
|
||||
{
|
||||
'pattern': r'#!.*sh [-xe]',
|
||||
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
|
||||
' to set -x|set -e',
|
||||
},
|
||||
*whitespace_rules[0:1],
|
||||
],
|
||||
)
|
||||
|
@ -116,20 +126,27 @@ json_rules = RuleList(
|
|||
# version of the tab-based whitespace rule (we can't just use
|
||||
# exclude in whitespace_rules, since we only want to ignore
|
||||
# JSON files with tab-based whitespace, not webhook code).
|
||||
rules= whitespace_rules[0:1],
|
||||
rules=whitespace_rules[0:1],
|
||||
)
|
||||
|
||||
prose_style_rules = [
|
||||
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
|
||||
'description': "javascript should be spelled JavaScript"},
|
||||
{'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''', # exclude usage in hrefs/divs
|
||||
'description': "github should be spelled GitHub"},
|
||||
{'pattern': r'[oO]rganisation', # exclude usage in hrefs/divs
|
||||
'description': "Organization is spelled with a z"},
|
||||
{'pattern': r'!!! warning',
|
||||
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
|
||||
{'pattern': r'[^-_]botserver(?!rc)|bot server',
|
||||
'description': "Use Botserver instead of botserver or Botserver."},
|
||||
{
|
||||
'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
|
||||
'description': "javascript should be spelled JavaScript",
|
||||
},
|
||||
{
|
||||
'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''', # exclude usage in hrefs/divs
|
||||
'description': "github should be spelled GitHub",
|
||||
},
|
||||
{
|
||||
'pattern': r'[oO]rganisation', # exclude usage in hrefs/divs
|
||||
'description': "Organization is spelled with a z",
|
||||
},
|
||||
{'pattern': r'!!! warning', 'description': "!!! warning is invalid; it's spelled '!!! warn'"},
|
||||
{
|
||||
'pattern': r'[^-_]botserver(?!rc)|bot server',
|
||||
'description': "Use Botserver instead of botserver or Botserver.",
|
||||
},
|
||||
] # type: List[Rule]
|
||||
|
||||
markdown_docs_length_exclude = {
|
||||
|
@ -141,8 +158,10 @@ markdown_rules = RuleList(
|
|||
rules=[
|
||||
*markdown_whitespace_rules,
|
||||
*prose_style_rules,
|
||||
{'pattern': r'\[(?P<url>[^\]]+)\]\((?P=url)\)',
|
||||
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'}
|
||||
{
|
||||
'pattern': r'\[(?P<url>[^\]]+)\]\((?P=url)\)',
|
||||
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.',
|
||||
},
|
||||
],
|
||||
max_length=120,
|
||||
length_exclude=markdown_docs_length_exclude,
|
||||
|
|
100
tools/deploy
100
tools/deploy
|
@ -18,6 +18,7 @@ bold = '\033[1m' # type: str
|
|||
|
||||
bots_dir = '.bots' # type: str
|
||||
|
||||
|
||||
def pack(options: argparse.Namespace) -> None:
|
||||
# Basic sanity checks for input.
|
||||
if not options.path:
|
||||
|
@ -53,15 +54,20 @@ def pack(options: argparse.Namespace) -> None:
|
|||
# Pack the zuliprc
|
||||
zip_file.write(options.config, 'zuliprc')
|
||||
# Pack the config file for the botfarm.
|
||||
bot_config = textwrap.dedent('''\
|
||||
bot_config = textwrap.dedent(
|
||||
'''\
|
||||
[deploy]
|
||||
bot={}
|
||||
zuliprc=zuliprc
|
||||
'''.format(options.main))
|
||||
'''.format(
|
||||
options.main
|
||||
)
|
||||
)
|
||||
zip_file.writestr('config.ini', bot_config)
|
||||
zip_file.close()
|
||||
print('pack: Created zip file at: {}.'.format(zip_file_path))
|
||||
|
||||
|
||||
def check_common_options(options: argparse.Namespace) -> None:
|
||||
if not options.server:
|
||||
print('tools/deploy: URL to Botfarm server not specified.')
|
||||
|
@ -70,18 +76,20 @@ def check_common_options(options: argparse.Namespace) -> None:
|
|||
print('tools/deploy: Botfarm deploy token not specified.')
|
||||
sys.exit(1)
|
||||
|
||||
def handle_common_response_without_data(response: Response,
|
||||
operation: str,
|
||||
success_message: str) -> bool:
|
||||
|
||||
def handle_common_response_without_data(
|
||||
response: Response, operation: str, success_message: str
|
||||
) -> bool:
|
||||
return handle_common_response(
|
||||
response=response,
|
||||
operation=operation,
|
||||
success_handler=lambda r: print('{}: {}'.format(operation, success_message))
|
||||
success_handler=lambda r: print('{}: {}'.format(operation, success_message)),
|
||||
)
|
||||
|
||||
def handle_common_response(response: Response,
|
||||
operation: str,
|
||||
success_handler: Callable[[Dict[str, Any]], Any]) -> bool:
|
||||
|
||||
def handle_common_response(
|
||||
response: Response, operation: str, success_handler: Callable[[Dict[str, Any]], Any]
|
||||
) -> bool:
|
||||
if response.status_code == requests.codes.ok:
|
||||
response_data = response.json()
|
||||
if response_data['status'] == 'success':
|
||||
|
@ -99,6 +107,7 @@ def handle_common_response(response: Response,
|
|||
print('{}: Error {}. Aborting.'.format(operation, response.status_code))
|
||||
return False
|
||||
|
||||
|
||||
def upload(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
file_path = os.path.join(bots_dir, options.botname + '.zip')
|
||||
|
@ -109,10 +118,13 @@ def upload(options: argparse.Namespace) -> None:
|
|||
headers = {'key': options.token}
|
||||
url = urllib.parse.urljoin(options.server, 'bots/upload')
|
||||
response = requests.post(url, files=files, headers=headers)
|
||||
result = handle_common_response_without_data(response, 'upload', 'Uploaded the bot package to botfarm.')
|
||||
result = handle_common_response_without_data(
|
||||
response, 'upload', 'Uploaded the bot package to botfarm.'
|
||||
)
|
||||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def clean(options: argparse.Namespace) -> None:
|
||||
file_path = os.path.join(bots_dir, options.botname + '.zip')
|
||||
if os.path.exists(file_path):
|
||||
|
@ -121,42 +133,53 @@ def clean(options: argparse.Namespace) -> None:
|
|||
else:
|
||||
print('clean: File \'{}\' not found.'.format(file_path))
|
||||
|
||||
|
||||
def process(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
headers = {'key': options.token}
|
||||
url = urllib.parse.urljoin(options.server, 'bots/process')
|
||||
payload = {'name': options.botname}
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
result = handle_common_response_without_data(response, 'process', 'The bot has been processed by the botfarm.')
|
||||
result = handle_common_response_without_data(
|
||||
response, 'process', 'The bot has been processed by the botfarm.'
|
||||
)
|
||||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def start(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
headers = {'key': options.token}
|
||||
url = urllib.parse.urljoin(options.server, 'bots/start')
|
||||
payload = {'name': options.botname}
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
result = handle_common_response_without_data(response, 'start', 'The bot has been started by the botfarm.')
|
||||
result = handle_common_response_without_data(
|
||||
response, 'start', 'The bot has been started by the botfarm.'
|
||||
)
|
||||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def stop(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
headers = {'key': options.token}
|
||||
url = urllib.parse.urljoin(options.server, 'bots/stop')
|
||||
payload = {'name': options.botname}
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
result = handle_common_response_without_data(response, 'stop', 'The bot has been stopped by the botfarm.')
|
||||
result = handle_common_response_without_data(
|
||||
response, 'stop', 'The bot has been stopped by the botfarm.'
|
||||
)
|
||||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def prepare(options: argparse.Namespace) -> None:
|
||||
pack(options)
|
||||
upload(options)
|
||||
clean(options)
|
||||
process(options)
|
||||
|
||||
|
||||
def log(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
headers = {'key': options.token}
|
||||
|
@ -171,16 +194,20 @@ def log(options: argparse.Namespace) -> None:
|
|||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def delete(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
headers = {'key': options.token}
|
||||
url = urllib.parse.urljoin(options.server, 'bots/delete')
|
||||
payload = {'name': options.botname}
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
result = handle_common_response_without_data(response, 'delete', 'The bot has been removed from the botfarm.')
|
||||
result = handle_common_response_without_data(
|
||||
response, 'delete', 'The bot has been removed from the botfarm.'
|
||||
)
|
||||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def list_bots(options: argparse.Namespace) -> None:
|
||||
check_common_options(options)
|
||||
headers = {'key': options.token}
|
||||
|
@ -190,10 +217,13 @@ def list_bots(options: argparse.Namespace) -> None:
|
|||
pretty_print = False
|
||||
url = urllib.parse.urljoin(options.server, 'bots/list')
|
||||
response = requests.get(url, headers=headers)
|
||||
result = handle_common_response(response, 'ls', lambda r: print_bots(r['bots']['list'], pretty_print))
|
||||
result = handle_common_response(
|
||||
response, 'ls', lambda r: print_bots(r['bots']['list'], pretty_print)
|
||||
)
|
||||
if result is False:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def print_bots(bots: List[Any], pretty_print: bool) -> None:
|
||||
if pretty_print:
|
||||
print_bots_pretty(bots)
|
||||
|
@ -201,6 +231,7 @@ def print_bots(bots: List[Any], pretty_print: bool) -> None:
|
|||
for bot in bots:
|
||||
print('{}\t{}\t{}\t{}'.format(bot['name'], bot['status'], bot['email'], bot['site']))
|
||||
|
||||
|
||||
def print_bots_pretty(bots: List[Any]) -> None:
|
||||
if len(bots) == 0:
|
||||
print('ls: No bots found on the botfarm')
|
||||
|
@ -231,6 +262,7 @@ def print_bots_pretty(bots: List[Any]) -> None:
|
|||
)
|
||||
print(row)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
usage = """tools/deploy <command> <bot-name> [options]
|
||||
|
||||
|
@ -267,23 +299,25 @@ To list user's bots, use:
|
|||
parser = argparse.ArgumentParser(usage=usage)
|
||||
parser.add_argument('command', help='Command to run.')
|
||||
parser.add_argument('botname', nargs='?', help='Name of bot to operate on.')
|
||||
parser.add_argument('--server', '-s',
|
||||
metavar='SERVERURL',
|
||||
default=os.environ.get('SERVER', ''),
|
||||
help='Url of the Zulip Botfarm server.')
|
||||
parser.add_argument('--token', '-t',
|
||||
default=os.environ.get('TOKEN', ''),
|
||||
help='Deploy Token for the Botfarm.')
|
||||
parser.add_argument('--path', '-p',
|
||||
help='Path to the bot directory.')
|
||||
parser.add_argument('--config', '-c',
|
||||
help='Path to the zuliprc file.')
|
||||
parser.add_argument('--main', '-m',
|
||||
help='Path to the bot\'s main file, relative to the bot\'s directory.')
|
||||
parser.add_argument('--lines', '-l',
|
||||
help='Number of lines in log required.')
|
||||
parser.add_argument('--format', '-f', action='store_true',
|
||||
help='Print user\'s bots in human readable format')
|
||||
parser.add_argument(
|
||||
'--server',
|
||||
'-s',
|
||||
metavar='SERVERURL',
|
||||
default=os.environ.get('SERVER', ''),
|
||||
help='Url of the Zulip Botfarm server.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--token', '-t', default=os.environ.get('TOKEN', ''), help='Deploy Token for the Botfarm.'
|
||||
)
|
||||
parser.add_argument('--path', '-p', help='Path to the bot directory.')
|
||||
parser.add_argument('--config', '-c', help='Path to the zuliprc file.')
|
||||
parser.add_argument(
|
||||
'--main', '-m', help='Path to the bot\'s main file, relative to the bot\'s directory.'
|
||||
)
|
||||
parser.add_argument('--lines', '-l', help='Number of lines in log required.')
|
||||
parser.add_argument(
|
||||
'--format', '-f', action='store_true', help='Print user\'s bots in human readable format'
|
||||
)
|
||||
options = parser.parse_args()
|
||||
if not options.command:
|
||||
print('tools/deploy: No command specified.')
|
||||
|
@ -308,5 +342,7 @@ To list user's bots, use:
|
|||
commands[options.command](options)
|
||||
else:
|
||||
print('tools/deploy: No command \'{}\' found.'.format(options.command))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -11,84 +11,253 @@ from gitlint.rules import CommitMessageTitle, LineRule, RuleViolation
|
|||
# License: MIT
|
||||
# Ref: fit_commit/validators/tense.rb
|
||||
WORD_SET = {
|
||||
'adds', 'adding', 'added',
|
||||
'allows', 'allowing', 'allowed',
|
||||
'amends', 'amending', 'amended',
|
||||
'bumps', 'bumping', 'bumped',
|
||||
'calculates', 'calculating', 'calculated',
|
||||
'changes', 'changing', 'changed',
|
||||
'cleans', 'cleaning', 'cleaned',
|
||||
'commits', 'committing', 'committed',
|
||||
'corrects', 'correcting', 'corrected',
|
||||
'creates', 'creating', 'created',
|
||||
'darkens', 'darkening', 'darkened',
|
||||
'disables', 'disabling', 'disabled',
|
||||
'displays', 'displaying', 'displayed',
|
||||
'documents', 'documenting', 'documented',
|
||||
'drys', 'drying', 'dryed',
|
||||
'ends', 'ending', 'ended',
|
||||
'enforces', 'enforcing', 'enforced',
|
||||
'enqueues', 'enqueuing', 'enqueued',
|
||||
'extracts', 'extracting', 'extracted',
|
||||
'finishes', 'finishing', 'finished',
|
||||
'fixes', 'fixing', 'fixed',
|
||||
'formats', 'formatting', 'formatted',
|
||||
'guards', 'guarding', 'guarded',
|
||||
'handles', 'handling', 'handled',
|
||||
'hides', 'hiding', 'hid',
|
||||
'increases', 'increasing', 'increased',
|
||||
'ignores', 'ignoring', 'ignored',
|
||||
'implements', 'implementing', 'implemented',
|
||||
'improves', 'improving', 'improved',
|
||||
'keeps', 'keeping', 'kept',
|
||||
'kills', 'killing', 'killed',
|
||||
'makes', 'making', 'made',
|
||||
'merges', 'merging', 'merged',
|
||||
'moves', 'moving', 'moved',
|
||||
'permits', 'permitting', 'permitted',
|
||||
'prevents', 'preventing', 'prevented',
|
||||
'pushes', 'pushing', 'pushed',
|
||||
'rebases', 'rebasing', 'rebased',
|
||||
'refactors', 'refactoring', 'refactored',
|
||||
'removes', 'removing', 'removed',
|
||||
'renames', 'renaming', 'renamed',
|
||||
'reorders', 'reordering', 'reordered',
|
||||
'replaces', 'replacing', 'replaced',
|
||||
'requires', 'requiring', 'required',
|
||||
'restores', 'restoring', 'restored',
|
||||
'sends', 'sending', 'sent',
|
||||
'sets', 'setting',
|
||||
'separates', 'separating', 'separated',
|
||||
'shows', 'showing', 'showed',
|
||||
'simplifies', 'simplifying', 'simplified',
|
||||
'skips', 'skipping', 'skipped',
|
||||
'sorts', 'sorting',
|
||||
'speeds', 'speeding', 'sped',
|
||||
'starts', 'starting', 'started',
|
||||
'supports', 'supporting', 'supported',
|
||||
'takes', 'taking', 'took',
|
||||
'testing', 'tested', # 'tests' excluded to reduce false negative
|
||||
'truncates', 'truncating', 'truncated',
|
||||
'updates', 'updating', 'updated',
|
||||
'uses', 'using', 'used',
|
||||
'adds',
|
||||
'adding',
|
||||
'added',
|
||||
'allows',
|
||||
'allowing',
|
||||
'allowed',
|
||||
'amends',
|
||||
'amending',
|
||||
'amended',
|
||||
'bumps',
|
||||
'bumping',
|
||||
'bumped',
|
||||
'calculates',
|
||||
'calculating',
|
||||
'calculated',
|
||||
'changes',
|
||||
'changing',
|
||||
'changed',
|
||||
'cleans',
|
||||
'cleaning',
|
||||
'cleaned',
|
||||
'commits',
|
||||
'committing',
|
||||
'committed',
|
||||
'corrects',
|
||||
'correcting',
|
||||
'corrected',
|
||||
'creates',
|
||||
'creating',
|
||||
'created',
|
||||
'darkens',
|
||||
'darkening',
|
||||
'darkened',
|
||||
'disables',
|
||||
'disabling',
|
||||
'disabled',
|
||||
'displays',
|
||||
'displaying',
|
||||
'displayed',
|
||||
'documents',
|
||||
'documenting',
|
||||
'documented',
|
||||
'drys',
|
||||
'drying',
|
||||
'dryed',
|
||||
'ends',
|
||||
'ending',
|
||||
'ended',
|
||||
'enforces',
|
||||
'enforcing',
|
||||
'enforced',
|
||||
'enqueues',
|
||||
'enqueuing',
|
||||
'enqueued',
|
||||
'extracts',
|
||||
'extracting',
|
||||
'extracted',
|
||||
'finishes',
|
||||
'finishing',
|
||||
'finished',
|
||||
'fixes',
|
||||
'fixing',
|
||||
'fixed',
|
||||
'formats',
|
||||
'formatting',
|
||||
'formatted',
|
||||
'guards',
|
||||
'guarding',
|
||||
'guarded',
|
||||
'handles',
|
||||
'handling',
|
||||
'handled',
|
||||
'hides',
|
||||
'hiding',
|
||||
'hid',
|
||||
'increases',
|
||||
'increasing',
|
||||
'increased',
|
||||
'ignores',
|
||||
'ignoring',
|
||||
'ignored',
|
||||
'implements',
|
||||
'implementing',
|
||||
'implemented',
|
||||
'improves',
|
||||
'improving',
|
||||
'improved',
|
||||
'keeps',
|
||||
'keeping',
|
||||
'kept',
|
||||
'kills',
|
||||
'killing',
|
||||
'killed',
|
||||
'makes',
|
||||
'making',
|
||||
'made',
|
||||
'merges',
|
||||
'merging',
|
||||
'merged',
|
||||
'moves',
|
||||
'moving',
|
||||
'moved',
|
||||
'permits',
|
||||
'permitting',
|
||||
'permitted',
|
||||
'prevents',
|
||||
'preventing',
|
||||
'prevented',
|
||||
'pushes',
|
||||
'pushing',
|
||||
'pushed',
|
||||
'rebases',
|
||||
'rebasing',
|
||||
'rebased',
|
||||
'refactors',
|
||||
'refactoring',
|
||||
'refactored',
|
||||
'removes',
|
||||
'removing',
|
||||
'removed',
|
||||
'renames',
|
||||
'renaming',
|
||||
'renamed',
|
||||
'reorders',
|
||||
'reordering',
|
||||
'reordered',
|
||||
'replaces',
|
||||
'replacing',
|
||||
'replaced',
|
||||
'requires',
|
||||
'requiring',
|
||||
'required',
|
||||
'restores',
|
||||
'restoring',
|
||||
'restored',
|
||||
'sends',
|
||||
'sending',
|
||||
'sent',
|
||||
'sets',
|
||||
'setting',
|
||||
'separates',
|
||||
'separating',
|
||||
'separated',
|
||||
'shows',
|
||||
'showing',
|
||||
'showed',
|
||||
'simplifies',
|
||||
'simplifying',
|
||||
'simplified',
|
||||
'skips',
|
||||
'skipping',
|
||||
'skipped',
|
||||
'sorts',
|
||||
'sorting',
|
||||
'speeds',
|
||||
'speeding',
|
||||
'sped',
|
||||
'starts',
|
||||
'starting',
|
||||
'started',
|
||||
'supports',
|
||||
'supporting',
|
||||
'supported',
|
||||
'takes',
|
||||
'taking',
|
||||
'took',
|
||||
'testing',
|
||||
'tested', # 'tests' excluded to reduce false negative
|
||||
'truncates',
|
||||
'truncating',
|
||||
'truncated',
|
||||
'updates',
|
||||
'updating',
|
||||
'updated',
|
||||
'uses',
|
||||
'using',
|
||||
'used',
|
||||
}
|
||||
|
||||
imperative_forms = [
|
||||
'add', 'allow', 'amend', 'bump', 'calculate', 'change', 'clean', 'commit',
|
||||
'correct', 'create', 'darken', 'disable', 'display', 'document', 'dry',
|
||||
'end', 'enforce', 'enqueue', 'extract', 'finish', 'fix', 'format', 'guard',
|
||||
'handle', 'hide', 'ignore', 'implement', 'improve', 'increase', 'keep',
|
||||
'kill', 'make', 'merge', 'move', 'permit', 'prevent', 'push', 'rebase',
|
||||
'refactor', 'remove', 'rename', 'reorder', 'replace', 'require', 'restore',
|
||||
'send', 'separate', 'set', 'show', 'simplify', 'skip', 'sort', 'speed',
|
||||
'start', 'support', 'take', 'test', 'truncate', 'update', 'use',
|
||||
'add',
|
||||
'allow',
|
||||
'amend',
|
||||
'bump',
|
||||
'calculate',
|
||||
'change',
|
||||
'clean',
|
||||
'commit',
|
||||
'correct',
|
||||
'create',
|
||||
'darken',
|
||||
'disable',
|
||||
'display',
|
||||
'document',
|
||||
'dry',
|
||||
'end',
|
||||
'enforce',
|
||||
'enqueue',
|
||||
'extract',
|
||||
'finish',
|
||||
'fix',
|
||||
'format',
|
||||
'guard',
|
||||
'handle',
|
||||
'hide',
|
||||
'ignore',
|
||||
'implement',
|
||||
'improve',
|
||||
'increase',
|
||||
'keep',
|
||||
'kill',
|
||||
'make',
|
||||
'merge',
|
||||
'move',
|
||||
'permit',
|
||||
'prevent',
|
||||
'push',
|
||||
'rebase',
|
||||
'refactor',
|
||||
'remove',
|
||||
'rename',
|
||||
'reorder',
|
||||
'replace',
|
||||
'require',
|
||||
'restore',
|
||||
'send',
|
||||
'separate',
|
||||
'set',
|
||||
'show',
|
||||
'simplify',
|
||||
'skip',
|
||||
'sort',
|
||||
'speed',
|
||||
'start',
|
||||
'support',
|
||||
'take',
|
||||
'test',
|
||||
'truncate',
|
||||
'update',
|
||||
'use',
|
||||
]
|
||||
imperative_forms.sort()
|
||||
|
||||
|
||||
def head_binary_search(key: str, words: List[str]) -> str:
|
||||
""" Find the imperative mood version of `word` by looking at the first
|
||||
3 characters. """
|
||||
"""Find the imperative mood version of `word` by looking at the first
|
||||
3 characters."""
|
||||
|
||||
# Edge case: 'disable' and 'display' have the same 3 starting letters.
|
||||
if key in ['displays', 'displaying', 'displayed']:
|
||||
|
@ -114,16 +283,18 @@ def head_binary_search(key: str, words: List[str]) -> str:
|
|||
|
||||
|
||||
class ImperativeMood(LineRule):
|
||||
""" This rule will enforce that the commit message title uses imperative
|
||||
"""This rule will enforce that the commit message title uses imperative
|
||||
mood. This is done by checking if the first word is in `WORD_SET`, if so
|
||||
show the word in the correct mood. """
|
||||
show the word in the correct mood."""
|
||||
|
||||
name = "title-imperative-mood"
|
||||
id = "Z1"
|
||||
target = CommitMessageTitle
|
||||
|
||||
error_msg = ('The first word in commit title should be in imperative mood '
|
||||
'("{word}" -> "{imperative}"): "{title}"')
|
||||
error_msg = (
|
||||
'The first word in commit title should be in imperative mood '
|
||||
'("{word}" -> "{imperative}"): "{title}"'
|
||||
)
|
||||
|
||||
def validate(self, line: str, commit: GitCommit) -> List[RuleViolation]:
|
||||
violations = []
|
||||
|
@ -134,11 +305,14 @@ class ImperativeMood(LineRule):
|
|||
|
||||
if first_word in WORD_SET:
|
||||
imperative = head_binary_search(first_word, imperative_forms)
|
||||
violation = RuleViolation(self.id, self.error_msg.format(
|
||||
word=first_word,
|
||||
imperative=imperative,
|
||||
title=commit.message.title,
|
||||
))
|
||||
violation = RuleViolation(
|
||||
self.id,
|
||||
self.error_msg.format(
|
||||
word=first_word,
|
||||
imperative=imperative,
|
||||
title=commit.message.title,
|
||||
),
|
||||
)
|
||||
|
||||
violations.append(violation)
|
||||
|
||||
|
|
26
tools/lint
26
tools/lint
|
@ -12,6 +12,7 @@ EXCLUDED_FILES = [
|
|||
'zulip/integrations/perforce/git_p4.py',
|
||||
]
|
||||
|
||||
|
||||
def run() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
add_default_linter_arguments(parser)
|
||||
|
@ -19,15 +20,23 @@ def run() -> None:
|
|||
|
||||
linter_config = LinterConfig(args)
|
||||
|
||||
by_lang = linter_config.list_files(file_types=['py', 'sh', 'json', 'md', 'txt'],
|
||||
exclude=EXCLUDED_FILES)
|
||||
by_lang = linter_config.list_files(
|
||||
file_types=['py', 'sh', 'json', 'md', 'txt'], exclude=EXCLUDED_FILES
|
||||
)
|
||||
|
||||
linter_config.external_linter('mypy', [sys.executable, 'tools/run-mypy'], ['py'], pass_targets=False,
|
||||
description="Static type checker for Python (config: mypy.ini)")
|
||||
linter_config.external_linter('flake8', ['flake8'], ['py'],
|
||||
description="Standard Python linter (config: .flake8)")
|
||||
linter_config.external_linter('gitlint', ['tools/lint-commits'],
|
||||
description="Git Lint for commit messages")
|
||||
linter_config.external_linter(
|
||||
'mypy',
|
||||
[sys.executable, 'tools/run-mypy'],
|
||||
['py'],
|
||||
pass_targets=False,
|
||||
description="Static type checker for Python (config: mypy.ini)",
|
||||
)
|
||||
linter_config.external_linter(
|
||||
'flake8', ['flake8'], ['py'], description="Standard Python linter (config: .flake8)"
|
||||
)
|
||||
linter_config.external_linter(
|
||||
'gitlint', ['tools/lint-commits'], description="Git Lint for commit messages"
|
||||
)
|
||||
|
||||
@linter_config.lint
|
||||
def custom_py() -> int:
|
||||
|
@ -45,5 +54,6 @@ def run() -> None:
|
|||
|
||||
linter_config.do_lint()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run()
|
||||
|
|
|
@ -15,32 +15,39 @@ green = '\033[92m'
|
|||
end_format = '\033[0m'
|
||||
bold = '\033[1m'
|
||||
|
||||
|
||||
def main():
|
||||
usage = """./tools/provision
|
||||
|
||||
Creates a Python virtualenv. Its Python version is equal to
|
||||
the Python version this command is executed with."""
|
||||
parser = argparse.ArgumentParser(usage=usage)
|
||||
parser.add_argument('--python-interpreter', '-p',
|
||||
metavar='PATH_TO_PYTHON_INTERPRETER',
|
||||
default=os.path.abspath(sys.executable),
|
||||
help='Path to the Python interpreter to use when provisioning.')
|
||||
parser.add_argument('--force', '-f', action='store_true',
|
||||
help='create venv even with outdated Python version.')
|
||||
parser.add_argument(
|
||||
'--python-interpreter',
|
||||
'-p',
|
||||
metavar='PATH_TO_PYTHON_INTERPRETER',
|
||||
default=os.path.abspath(sys.executable),
|
||||
help='Path to the Python interpreter to use when provisioning.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--force', '-f', action='store_true', help='create venv even with outdated Python version.'
|
||||
)
|
||||
options = parser.parse_args()
|
||||
|
||||
base_dir = os.path.abspath(os.path.join(__file__, '..', '..'))
|
||||
py_version_output = subprocess.check_output([options.python_interpreter, '--version'],
|
||||
stderr=subprocess.STDOUT, universal_newlines=True)
|
||||
py_version_output = subprocess.check_output(
|
||||
[options.python_interpreter, '--version'], stderr=subprocess.STDOUT, universal_newlines=True
|
||||
)
|
||||
# The output has the format "Python 1.2.3"
|
||||
py_version_list = py_version_output.split()[1].split('.')
|
||||
py_version = tuple(int(num) for num in py_version_list[0:2])
|
||||
venv_name = 'zulip-api-py{}-venv'.format(py_version[0])
|
||||
|
||||
if py_version <= (3, 1) and (not options.force):
|
||||
print(red + "Provision failed: Cannot create venv with outdated Python version ({}).\n"
|
||||
"Maybe try `python3 tools/provision`."
|
||||
.format(py_version_output.strip()) + end_format)
|
||||
print(
|
||||
red + "Provision failed: Cannot create venv with outdated Python version ({}).\n"
|
||||
"Maybe try `python3 tools/provision`.".format(py_version_output.strip()) + end_format
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
venv_dir = os.path.join(base_dir, venv_name)
|
||||
|
@ -48,18 +55,24 @@ the Python version this command is executed with."""
|
|||
try:
|
||||
return_code = subprocess.call([options.python_interpreter, '-m', 'venv', venv_dir])
|
||||
except OSError:
|
||||
print("{red}Installation with venv failed. Probable errors are: "
|
||||
"You are on Ubuntu and you haven't installed python3-venv,"
|
||||
"or you are running an unsupported python version"
|
||||
"or python is not installed properly{end_format}"
|
||||
.format(red=red, end_format=end_format))
|
||||
print(
|
||||
"{red}Installation with venv failed. Probable errors are: "
|
||||
"You are on Ubuntu and you haven't installed python3-venv,"
|
||||
"or you are running an unsupported python version"
|
||||
"or python is not installed properly{end_format}".format(
|
||||
red=red, end_format=end_format
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
raise
|
||||
else:
|
||||
# subprocess.call returns 0 if a script executed successfully
|
||||
if return_code:
|
||||
raise OSError("The command `{} -m venv {}` failed. Virtualenv not created!"
|
||||
.format(options.python_interpreter, venv_dir))
|
||||
raise OSError(
|
||||
"The command `{} -m venv {}` failed. Virtualenv not created!".format(
|
||||
options.python_interpreter, venv_dir
|
||||
)
|
||||
)
|
||||
print("New virtualenv created.")
|
||||
else:
|
||||
print("Virtualenv already exists.")
|
||||
|
@ -85,10 +98,21 @@ the Python version this command is executed with."""
|
|||
pip_path = os.path.join(venv_dir, venv_exec_dir, 'pip')
|
||||
# We first install a modern version of pip that supports --prefix
|
||||
subprocess.call([pip_path, 'install', 'pip>=9.0'])
|
||||
if subprocess.call([pip_path, 'install', '--prefix', venv_dir, '-r',
|
||||
os.path.join(base_dir, requirements_filename)]):
|
||||
raise OSError("The command `pip install -r {}` failed. Dependencies not installed!"
|
||||
.format(os.path.join(base_dir, requirements_filename)))
|
||||
if subprocess.call(
|
||||
[
|
||||
pip_path,
|
||||
'install',
|
||||
'--prefix',
|
||||
venv_dir,
|
||||
'-r',
|
||||
os.path.join(base_dir, requirements_filename),
|
||||
]
|
||||
):
|
||||
raise OSError(
|
||||
"The command `pip install -r {}` failed. Dependencies not installed!".format(
|
||||
os.path.join(base_dir, requirements_filename)
|
||||
)
|
||||
)
|
||||
|
||||
install_dependencies('requirements.txt')
|
||||
|
||||
|
@ -105,10 +129,7 @@ the Python version this command is executed with."""
|
|||
|
||||
print(green + 'Success!' + end_format)
|
||||
|
||||
activate_command = os.path.join(base_dir,
|
||||
venv_dir,
|
||||
venv_exec_dir,
|
||||
'activate')
|
||||
activate_command = os.path.join(base_dir, venv_dir, venv_exec_dir, 'activate')
|
||||
# We make the path look like a Unix path, because most Windows users
|
||||
# are likely to be running in a bash shell.
|
||||
activate_command = activate_command.replace(os.sep, '/')
|
||||
|
|
|
@ -13,6 +13,7 @@ import twine.commands.upload
|
|||
|
||||
REPO_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
|
||||
@contextmanager
|
||||
def cd(newdir):
|
||||
prevdir = os.getcwd()
|
||||
|
@ -22,6 +23,7 @@ def cd(newdir):
|
|||
finally:
|
||||
os.chdir(prevdir)
|
||||
|
||||
|
||||
def _generate_dist(dist_type, setup_file, package_name, setup_args):
|
||||
message = 'Generating {dist_type} for {package_name}.'.format(
|
||||
dist_type=dist_type,
|
||||
|
@ -40,13 +42,13 @@ def _generate_dist(dist_type, setup_file, package_name, setup_args):
|
|||
)
|
||||
print(crayons.green(message, bold=True))
|
||||
|
||||
|
||||
def generate_bdist_wheel(setup_file, package_name, universal=False):
|
||||
if universal:
|
||||
_generate_dist('bdist_wheel', setup_file, package_name,
|
||||
['bdist_wheel', '--universal'])
|
||||
_generate_dist('bdist_wheel', setup_file, package_name, ['bdist_wheel', '--universal'])
|
||||
else:
|
||||
_generate_dist('bdist_wheel', setup_file, package_name,
|
||||
['bdist_wheel'])
|
||||
_generate_dist('bdist_wheel', setup_file, package_name, ['bdist_wheel'])
|
||||
|
||||
|
||||
def twine_upload(dist_dirs):
|
||||
message = 'Uploading distributions under the following directories:'
|
||||
|
@ -55,14 +57,12 @@ def twine_upload(dist_dirs):
|
|||
print(crayons.yellow(dist_dir))
|
||||
twine.commands.upload.main(dist_dirs)
|
||||
|
||||
|
||||
def cleanup(package_dir):
|
||||
build_dir = os.path.join(package_dir, 'build')
|
||||
temp_dir = os.path.join(package_dir, 'temp')
|
||||
dist_dir = os.path.join(package_dir, 'dist')
|
||||
egg_info = os.path.join(
|
||||
package_dir,
|
||||
'{}.egg-info'.format(os.path.basename(package_dir))
|
||||
)
|
||||
egg_info = os.path.join(package_dir, '{}.egg-info'.format(os.path.basename(package_dir)))
|
||||
|
||||
def _rm_if_it_exists(directory):
|
||||
if os.path.isdir(directory):
|
||||
|
@ -74,6 +74,7 @@ def cleanup(package_dir):
|
|||
_rm_if_it_exists(dist_dir)
|
||||
_rm_if_it_exists(egg_info)
|
||||
|
||||
|
||||
def set_variable(fp, variable, value):
|
||||
fh, temp_abs_path = tempfile.mkstemp()
|
||||
with os.fdopen(fh, 'w') as new_file, open(fp) as old_file:
|
||||
|
@ -90,10 +91,10 @@ def set_variable(fp, variable, value):
|
|||
os.remove(fp)
|
||||
shutil.move(temp_abs_path, fp)
|
||||
|
||||
message = 'Set {variable} in {fp} to {value}.'.format(
|
||||
fp=fp, variable=variable, value=value)
|
||||
message = 'Set {variable} in {fp} to {value}.'.format(fp=fp, variable=variable, value=value)
|
||||
print(crayons.white(message, bold=True))
|
||||
|
||||
|
||||
def update_requirements_in_zulip_repo(zulip_repo_dir, version, hash_or_tag):
|
||||
common = os.path.join(zulip_repo_dir, 'requirements', 'common.in')
|
||||
prod = os.path.join(zulip_repo_dir, 'requirements', 'prod.txt')
|
||||
|
@ -115,10 +116,8 @@ def update_requirements_in_zulip_repo(zulip_repo_dir, version, hash_or_tag):
|
|||
|
||||
url_zulip = 'git+https://github.com/zulip/python-zulip-api.git@{tag}#egg={name}=={version}_git&subdirectory={name}\n'
|
||||
url_zulip_bots = 'git+https://github.com/zulip/python-zulip-api.git@{tag}#egg={name}=={version}+git&subdirectory={name}\n'
|
||||
zulip_bots_line = url_zulip_bots.format(tag=hash_or_tag, name='zulip_bots',
|
||||
version=version)
|
||||
zulip_line = url_zulip.format(tag=hash_or_tag, name='zulip',
|
||||
version=version)
|
||||
zulip_bots_line = url_zulip_bots.format(tag=hash_or_tag, name='zulip_bots', version=version)
|
||||
zulip_line = url_zulip.format(tag=hash_or_tag, name='zulip', version=version)
|
||||
|
||||
_edit_reqs_file(prod, zulip_bots_line, zulip_line)
|
||||
_edit_reqs_file(dev, zulip_bots_line, zulip_line)
|
||||
|
@ -135,6 +134,7 @@ def update_requirements_in_zulip_repo(zulip_repo_dir, version, hash_or_tag):
|
|||
message = 'Updated zulip API package requirements in the main repo.'
|
||||
print(crayons.white(message, bold=True))
|
||||
|
||||
|
||||
def parse_args():
|
||||
usage = """
|
||||
Script to automate the PyPA release of the zulip, zulip_bots and
|
||||
|
@ -176,26 +176,36 @@ And you're done! Congrats!
|
|||
"""
|
||||
parser = argparse.ArgumentParser(usage=usage)
|
||||
|
||||
parser.add_argument('--cleanup', '-c',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Remove build directories (dist/, build/, egg-info/, etc).')
|
||||
parser.add_argument(
|
||||
'--cleanup',
|
||||
'-c',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Remove build directories (dist/, build/, egg-info/, etc).',
|
||||
)
|
||||
|
||||
parser.add_argument('--build', '-b',
|
||||
metavar='VERSION_NUM',
|
||||
help=('Build sdists and wheels for all packages with the'
|
||||
'specified version number.'
|
||||
' sdists and wheels are stored in <package_name>/dist/*.'))
|
||||
parser.add_argument(
|
||||
'--build',
|
||||
'-b',
|
||||
metavar='VERSION_NUM',
|
||||
help=(
|
||||
'Build sdists and wheels for all packages with the'
|
||||
'specified version number.'
|
||||
' sdists and wheels are stored in <package_name>/dist/*.'
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument('--release', '-r',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Upload the packages to PyPA using twine.')
|
||||
parser.add_argument(
|
||||
'--release',
|
||||
'-r',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Upload the packages to PyPA using twine.',
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest='subcommand')
|
||||
parser_main_repo = subparsers.add_parser(
|
||||
'update-main-repo',
|
||||
help='Update the zulip/requirements/* in the main zulip repo.'
|
||||
'update-main-repo', help='Update the zulip/requirements/* in the main zulip repo.'
|
||||
)
|
||||
parser_main_repo.add_argument('repo', metavar='PATH_TO_ZULIP_DIR')
|
||||
parser_main_repo.add_argument('version', metavar='version number of the packages')
|
||||
|
@ -203,6 +213,7 @@ And you're done! Congrats!
|
|||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
options = parse_args()
|
||||
|
||||
|
@ -239,11 +250,10 @@ def main():
|
|||
|
||||
if options.subcommand == 'update-main-repo':
|
||||
if options.hash:
|
||||
update_requirements_in_zulip_repo(options.repo, options.version,
|
||||
options.hash)
|
||||
update_requirements_in_zulip_repo(options.repo, options.version, options.hash)
|
||||
else:
|
||||
update_requirements_in_zulip_repo(options.repo, options.version,
|
||||
options.version)
|
||||
update_requirements_in_zulip_repo(options.repo, options.version, options.version)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
11
tools/review
11
tools/review
|
@ -9,24 +9,29 @@ def exit(message: str) -> None:
|
|||
print(message)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def run(command: str) -> None:
|
||||
print('\n>>> ' + command)
|
||||
subprocess.check_call(command.split())
|
||||
|
||||
|
||||
def check_output(command: str) -> str:
|
||||
return subprocess.check_output(command.split()).decode('ascii')
|
||||
|
||||
|
||||
def get_git_branch() -> str:
|
||||
command = 'git rev-parse --abbrev-ref HEAD'
|
||||
output = check_output(command)
|
||||
return output.strip()
|
||||
|
||||
|
||||
def check_git_pristine() -> None:
|
||||
command = 'git status --porcelain'
|
||||
output = check_output(command)
|
||||
if output.strip():
|
||||
exit('Git is not pristine:\n' + output)
|
||||
|
||||
|
||||
def ensure_on_clean_master() -> None:
|
||||
branch = get_git_branch()
|
||||
if branch != 'master':
|
||||
|
@ -35,6 +40,7 @@ def ensure_on_clean_master() -> None:
|
|||
run('git fetch upstream master')
|
||||
run('git rebase upstream/master')
|
||||
|
||||
|
||||
def create_pull_branch(pull_id: int) -> None:
|
||||
run('git fetch upstream pull/%d/head' % (pull_id,))
|
||||
run('git checkout -B review-%s FETCH_HEAD' % (pull_id,))
|
||||
|
@ -44,8 +50,8 @@ def create_pull_branch(pull_id: int) -> None:
|
|||
|
||||
print()
|
||||
print('PR: %d' % (pull_id,))
|
||||
print(subprocess.check_output(['git', 'log', 'HEAD~..',
|
||||
'--pretty=format:Author: %an']))
|
||||
print(subprocess.check_output(['git', 'log', 'HEAD~..', '--pretty=format:Author: %an']))
|
||||
|
||||
|
||||
def review_pr() -> None:
|
||||
try:
|
||||
|
@ -56,5 +62,6 @@ def review_pr() -> None:
|
|||
ensure_on_clean_master()
|
||||
create_pull_branch(pull_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
review_pr()
|
||||
|
|
|
@ -99,38 +99,78 @@ force_include = [
|
|||
"zulip_bots/zulip_bots/bots/front/front.py",
|
||||
"zulip_bots/zulip_bots/bots/front/test_front.py",
|
||||
"tools/custom_check.py",
|
||||
"tools/deploy"
|
||||
"tools/deploy",
|
||||
]
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run mypy on files tracked by git.")
|
||||
parser.add_argument('targets', nargs='*', default=[],
|
||||
help="""files and directories to include in the result.
|
||||
If this is not specified, the current directory is used""")
|
||||
parser.add_argument('-m', '--modified', action='store_true', default=False, help='list only modified files')
|
||||
parser.add_argument('-a', '--all', dest='all', action='store_true', default=False,
|
||||
help="""run mypy on all python files, ignoring the exclude list.
|
||||
This is useful if you have to find out which files fail mypy check.""")
|
||||
parser.add_argument('--no-disallow-untyped-defs', dest='disallow_untyped_defs', action='store_false', default=True,
|
||||
help="""Don't throw errors when functions are not annotated""")
|
||||
parser.add_argument('--scripts-only', dest='scripts_only', action='store_true', default=False,
|
||||
help="""Only type check extensionless python scripts""")
|
||||
parser.add_argument('--warn-unused-ignores', dest='warn_unused_ignores', action='store_true', default=False,
|
||||
help="""Use the --warn-unused-ignores flag with mypy""")
|
||||
parser.add_argument('--no-ignore-missing-imports', dest='ignore_missing_imports', action='store_false', default=True,
|
||||
help="""Don't use the --ignore-missing-imports flag with mypy""")
|
||||
parser.add_argument('--quick', action='store_true', default=False,
|
||||
help="""Use the --quick flag with mypy""")
|
||||
parser.add_argument(
|
||||
'targets',
|
||||
nargs='*',
|
||||
default=[],
|
||||
help="""files and directories to include in the result.
|
||||
If this is not specified, the current directory is used""",
|
||||
)
|
||||
parser.add_argument(
|
||||
'-m', '--modified', action='store_true', default=False, help='list only modified files'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-a',
|
||||
'--all',
|
||||
dest='all',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="""run mypy on all python files, ignoring the exclude list.
|
||||
This is useful if you have to find out which files fail mypy check.""",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-disallow-untyped-defs',
|
||||
dest='disallow_untyped_defs',
|
||||
action='store_false',
|
||||
default=True,
|
||||
help="""Don't throw errors when functions are not annotated""",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--scripts-only',
|
||||
dest='scripts_only',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="""Only type check extensionless python scripts""",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--warn-unused-ignores',
|
||||
dest='warn_unused_ignores',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="""Use the --warn-unused-ignores flag with mypy""",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-ignore-missing-imports',
|
||||
dest='ignore_missing_imports',
|
||||
action='store_false',
|
||||
default=True,
|
||||
help="""Don't use the --ignore-missing-imports flag with mypy""",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--quick', action='store_true', default=False, help="""Use the --quick flag with mypy"""
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.all:
|
||||
exclude = []
|
||||
|
||||
# find all non-excluded files in current directory
|
||||
files_dict = cast(Dict[str, List[str]],
|
||||
lister.list_files(targets=args.targets, ftypes=['py', 'pyi'],
|
||||
use_shebang=True, modified_only=args.modified,
|
||||
exclude = exclude + ['stubs'], group_by_ftype=True,
|
||||
extless_only=args.scripts_only))
|
||||
files_dict = cast(
|
||||
Dict[str, List[str]],
|
||||
lister.list_files(
|
||||
targets=args.targets,
|
||||
ftypes=['py', 'pyi'],
|
||||
use_shebang=True,
|
||||
modified_only=args.modified,
|
||||
exclude=exclude + ['stubs'],
|
||||
group_by_ftype=True,
|
||||
extless_only=args.scripts_only,
|
||||
),
|
||||
)
|
||||
|
||||
for inpath in force_include:
|
||||
try:
|
||||
|
@ -140,10 +180,13 @@ for inpath in force_include:
|
|||
files_dict[ext].append(inpath)
|
||||
|
||||
pyi_files = set(files_dict['pyi'])
|
||||
python_files = [fpath for fpath in files_dict['py']
|
||||
if not fpath.endswith('.py') or fpath + 'i' not in pyi_files]
|
||||
python_files = [
|
||||
fpath for fpath in files_dict['py'] if not fpath.endswith('.py') or fpath + 'i' not in pyi_files
|
||||
]
|
||||
|
||||
repo_python_files = OrderedDict([('zulip', []), ('zulip_bots', []), ('zulip_botserver', []), ('tools', [])])
|
||||
repo_python_files = OrderedDict(
|
||||
[('zulip', []), ('zulip_bots', []), ('zulip_botserver', []), ('tools', [])]
|
||||
)
|
||||
for file_path in python_files:
|
||||
repo = PurePath(file_path).parts[0]
|
||||
if repo in repo_python_files:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
|
@ -10,21 +9,26 @@ import pytest
|
|||
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
os.chdir(os.path.dirname(TOOLS_DIR))
|
||||
|
||||
|
||||
def handle_input_and_run_tests_for_package(package_name, path_list):
|
||||
parser = argparse.ArgumentParser(description="Run tests for {}.".format(package_name))
|
||||
parser.add_argument('--coverage',
|
||||
nargs='?',
|
||||
const=True,
|
||||
default=False,
|
||||
help='compute test coverage (--coverage combine to combine with previous reports)')
|
||||
parser.add_argument('--pytest', '-p',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help="run tests with pytest")
|
||||
parser.add_argument('--verbose', '-v',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='show verbose output (with pytest)')
|
||||
parser.add_argument(
|
||||
'--coverage',
|
||||
nargs='?',
|
||||
const=True,
|
||||
default=False,
|
||||
help='compute test coverage (--coverage combine to combine with previous reports)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--pytest', '-p', default=False, action='store_true', help="run tests with pytest"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose',
|
||||
'-v',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='show verbose output (with pytest)',
|
||||
)
|
||||
options = parser.parse_args()
|
||||
|
||||
test_session_title = ' Running tests for {} '.format(package_name)
|
||||
|
@ -33,6 +37,7 @@ def handle_input_and_run_tests_for_package(package_name, path_list):
|
|||
|
||||
if options.coverage:
|
||||
import coverage
|
||||
|
||||
cov = coverage.Coverage(config_file="tools/.coveragerc")
|
||||
if options.coverage == 'combine':
|
||||
cov.load()
|
||||
|
@ -42,11 +47,11 @@ def handle_input_and_run_tests_for_package(package_name, path_list):
|
|||
location_to_run_in = os.path.join(TOOLS_DIR, '..', *path_list)
|
||||
paths_to_test = ['.']
|
||||
pytest_options = [
|
||||
'-s', # show output from tests; this hides the progress bar though
|
||||
'-x', # stop on first test failure
|
||||
'-s', # show output from tests; this hides the progress bar though
|
||||
'-x', # stop on first test failure
|
||||
'--ff', # runs last failure first
|
||||
]
|
||||
pytest_options += (['-v'] if options.verbose else [])
|
||||
pytest_options += ['-v'] if options.verbose else []
|
||||
os.chdir(location_to_run_in)
|
||||
result = pytest.main(paths_to_test + pytest_options)
|
||||
if result != 0:
|
||||
|
|
|
@ -31,33 +31,37 @@ the tests for xkcd and wikipedia bots):
|
|||
"""
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
|
||||
parser.add_argument('bots_to_test',
|
||||
metavar='bot',
|
||||
nargs='*',
|
||||
default=[],
|
||||
help='specific bots to test (default is all)')
|
||||
parser.add_argument('--coverage',
|
||||
nargs='?',
|
||||
const=True,
|
||||
default=False,
|
||||
help='compute test coverage (--coverage combine to combine with previous reports)')
|
||||
parser.add_argument('--exclude',
|
||||
metavar='bot',
|
||||
nargs='*',
|
||||
default=[],
|
||||
help='bot(s) to exclude')
|
||||
parser.add_argument('--error-on-no-init',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="whether to exit if a bot has tests which won't run due to no __init__.py")
|
||||
parser.add_argument('--pytest', '-p',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help="run tests with pytest")
|
||||
parser.add_argument('--verbose', '-v',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='show verbose output (with pytest)')
|
||||
parser.add_argument(
|
||||
'bots_to_test',
|
||||
metavar='bot',
|
||||
nargs='*',
|
||||
default=[],
|
||||
help='specific bots to test (default is all)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--coverage',
|
||||
nargs='?',
|
||||
const=True,
|
||||
default=False,
|
||||
help='compute test coverage (--coverage combine to combine with previous reports)',
|
||||
)
|
||||
parser.add_argument('--exclude', metavar='bot', nargs='*', default=[], help='bot(s) to exclude')
|
||||
parser.add_argument(
|
||||
'--error-on-no-init',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="whether to exit if a bot has tests which won't run due to no __init__.py",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--pytest', '-p', default=False, action='store_true', help="run tests with pytest"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose',
|
||||
'-v',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='show verbose output (with pytest)',
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
|
@ -76,6 +80,7 @@ def main():
|
|||
|
||||
if options.coverage:
|
||||
import coverage
|
||||
|
||||
cov = coverage.Coverage(config_file="tools/.coveragerc")
|
||||
if options.coverage == 'combine':
|
||||
cov.load()
|
||||
|
@ -94,11 +99,11 @@ def main():
|
|||
excluded_bots = ['merels']
|
||||
pytest_bots_to_test = sorted([bot for bot in bots_to_test if bot not in excluded_bots])
|
||||
pytest_options = [
|
||||
'-s', # show output from tests; this hides the progress bar though
|
||||
'-x', # stop on first test failure
|
||||
'-s', # show output from tests; this hides the progress bar though
|
||||
'-x', # stop on first test failure
|
||||
'--ff', # runs last failure first
|
||||
]
|
||||
pytest_options += (['-v'] if options.verbose else [])
|
||||
pytest_options += ['-v'] if options.verbose else []
|
||||
os.chdir(bots_dir)
|
||||
result = pytest.main(pytest_bots_to_test + pytest_options)
|
||||
if result != 0:
|
||||
|
@ -116,7 +121,9 @@ def main():
|
|||
test_suites.append(loader.discover(top_level + name, top_level_dir=top_level))
|
||||
except ImportError as exception:
|
||||
print(exception)
|
||||
print("This likely indicates that you need a '__init__.py' file in your bot directory.")
|
||||
print(
|
||||
"This likely indicates that you need a '__init__.py' file in your bot directory."
|
||||
)
|
||||
if options.error_on_no_init:
|
||||
sys.exit(1)
|
||||
|
||||
|
@ -134,5 +141,6 @@ def main():
|
|||
cov.html_report()
|
||||
print("HTML report saved under directory 'htmlcov'.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue