Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CI: switch formatter to black. #763

Merged
merged 3 commits into from
Apr 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .git-blame-ignored-commits
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#This commit just switched the whole codebase to black format
c1d91c7c7e3dfa5f6c698c8591b2900d27203ab5
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: yapf formatting
name: black formatting

on:
# Trigger the workflow on push or pull request,
Expand Down Expand Up @@ -31,7 +31,7 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install -r requirements-optional.txt
- name: Check format with yapf
- name: Check format with black
run: |
# Returns en error code if the files are not formatted
yapf --diff -r .
black --diff --color --check .
5 changes: 1 addition & 4 deletions .github/workflows/update_xdsl_pyodide_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,6 @@
sha256_hash.update(byte_block)

# Make it build the local xDSL, not the PyPi release. The pyodide build still requires the SHA256 sum.
yaml_doc["source"] = {
"url": f"file://{xdsl_sdist}",
"sha256": sha256_hash.hexdigest()
}
yaml_doc["source"] = {"url": f"file://{xdsl_sdist}", "sha256": sha256_hash.hexdigest()}
with open(meta_yaml_path, "w") as f:
yaml.dump(yaml_doc, f)
29 changes: 16 additions & 13 deletions bench/parser/bench_lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,9 @@ def run_on_files(file_names: Iterable[str]):
try:
contents = open(file_name, "r").read()
input = Input(contents, file_name)
file_time = timeit.timeit(lambda: lex_file(input),
number=args.num_iterations)
file_time = timeit.timeit(
lambda: lex_file(input), number=args.num_iterations
)
total_time += file_time / args.num_iterations
print("Time taken: " + str(file_time))
except Exception as e:
Expand All @@ -45,20 +46,22 @@ def run_on_files(file_names: Iterable[str]):
arg_parser.add_argument(
"root_directory",
type=str,
help="Path to the root directory containing MLIR files.")
arg_parser.add_argument("--num_iterations",
type=int,
required=False,
default=1,
help="Number of times to lex each file.")
arg_parser.add_argument("--profile",
action="store_true",
help="Enable profiling metrics.")
help="Path to the root directory containing MLIR files.",
)
arg_parser.add_argument(
"--num_iterations",
type=int,
required=False,
default=1,
help="Number of times to lex each file.",
)
arg_parser.add_argument(
"--profile", action="store_true", help="Enable profiling metrics."
)

args = arg_parser.parse_args()

file_names = list(
glob.iglob(args.root_directory + "/**/*.mlir", recursive=True))
file_names = list(glob.iglob(args.root_directory + "/**/*.mlir", recursive=True))
print("Found " + str(len(file_names)) + " files to lex.")

if args.profile:
Expand Down
53 changes: 29 additions & 24 deletions bench/parser/bench_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,19 +54,20 @@ def run_on_files(file_names: Iterable[str], mlir_path: str, ctx: MLContext):

# Parse each sub-file separately.
for sub_contents in splitted_contents:

# First, parse the file with MLIR to check that it is valid, and
# print it back in generic form.
res = subprocess.run([
mlir_path,
"--allow-unregistered-dialect",
"-mlir-print-op-generic",
"-mlir-print-local-scope",
],
input=sub_contents,
text=True,
capture_output=True,
timeout=60)
res = subprocess.run(
[
mlir_path,
"--allow-unregistered-dialect",
"-mlir-print-op-generic",
"-mlir-print-local-scope",
],
input=sub_contents,
text=True,
capture_output=True,
timeout=60,
)
if res.returncode != 0:
continue
n_total_files += 1
Expand All @@ -78,7 +79,8 @@ def run_on_files(file_names: Iterable[str], mlir_path: str, ctx: MLContext):
try:
file_time = timeit.timeit(
lambda: parse_file(generic_sub_contents, ctx),
number=args.num_iterations)
number=args.num_iterations,
)
total_time += file_time / args.num_iterations
print("Time taken: " + str(file_time))
n_parsed_files += 1
Expand All @@ -96,27 +98,30 @@ def run_on_files(file_names: Iterable[str], mlir_path: str, ctx: MLContext):
arg_parser.add_argument(
"root_directory",
type=str,
help="Path to the root directory containing MLIR files.")
help="Path to the root directory containing MLIR files.",
)
arg_parser.add_argument("--mlir-path", type=str, help="Path to mlir-opt.")
arg_parser.add_argument("--num_iterations",
type=int,
required=False,
default=1,
help="Number of times to parse each file.")
arg_parser.add_argument("--profile",
action="store_true",
help="Enable profiling metrics.")
arg_parser.add_argument(
"--num_iterations",
type=int,
required=False,
default=1,
help="Number of times to parse each file.",
)
arg_parser.add_argument(
"--profile", action="store_true", help="Enable profiling metrics."
)
arg_parser.add_argument(
"--timeout",
type=int,
required=False,
default=60,
help="Timeout for processing each sub-program with MLIR. (in seconds)")
help="Timeout for processing each sub-program with MLIR. (in seconds)",
)

args = arg_parser.parse_args()

file_names = list(
glob.iglob(args.root_directory + "/**/*.mlir", recursive=True))
file_names = list(glob.iglob(args.root_directory + "/**/*.mlir", recursive=True))
print("Found " + str(len(file_names)) + " files to parse.")

ctx = MLContext()
Expand Down
Loading