Skip to content

Commit

Permalink
Formatting and add workflows 🧹
Browse files Browse the repository at this point in the history
  • Loading branch information
mmazurekgda committed Nov 22, 2023
1 parent 311b5bb commit f760f26
Show file tree
Hide file tree
Showing 7 changed files with 125 additions and 24 deletions.
29 changes: 29 additions & 0 deletions .github/workflows/pre-commit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@

name: Pre-commit

on:
push:
branches:
- main
pull_request:
branches:
- main

jobs:
pre-commit:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: 3.9.18

- name: Install pre-commit
run: pip install pre-commit

- name: Run pre-commit
run: pre-commit run --all-files
26 changes: 26 additions & 0 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
name: Run pytests

on: [push]

jobs:
build-linux:
runs-on: ubuntu-latest
strategy:
max-parallel: 5

steps:
- uses: actions/checkout@v3
- name: Set up Python 3.9.18
uses: actions/setup-python@v3
with:
python-version: '3.9.18'
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies
run: |
conda env update --file environment.yml --name base
- name: Test with pytest
run: |
pytest test
10 changes: 10 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
repos:
- repo: https://github.com/psf/black
rev: 23.9.1
hooks:
- id: black
language_version: python3.11
- repo: https://github.com/PyCQA/flake8
rev: 6.1.0
hooks:
- id: flake8
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[tool.black]
line-length = 79
65 changes: 41 additions & 24 deletions status_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


class StatusChecker:

slots_to_check = [
"lhcb-sim10-dev",
"lhcb-sim10",
Expand Down Expand Up @@ -62,7 +61,7 @@ class StatusChecker:
}

hidden_platform_prefix = "x86_64_v2-centos7-gcc11"
hidden_platform_prefix_re = r'x86_64_v2(-centos7)?(-gcc11)?'
hidden_platform_prefix_re = r"x86_64_v2(-centos7)?(-gcc11)?"

# there is no way you can get the list of build ids
# from the API, so we have to use the main page...
Expand Down Expand Up @@ -97,7 +96,9 @@ def get_current_builds(self):
response.raise_for_status()
slots_reg = "|".join(self.slots_to_check)
slots_reg = rf"(?:{slots_reg})\/[0-9]{{1,4}}\/"
slot_candidates = re.findall(slots_reg, response.content.decode("utf-8"))
slot_candidates = re.findall(
slots_reg, response.content.decode("utf-8")
)
if not slot_candidates:
msg = (
f"No slots from the list '{self.slots_to_check}' "
Expand Down Expand Up @@ -127,17 +128,21 @@ def _fetch_build_info(
if parsed["aborted"]:
return df, parsed_date
for project in parsed["projects"]:
if project["name"] in self.projects_to_check and project["enabled"]:
if (
project["name"] in self.projects_to_check
and project["enabled"]
):
if df.empty:
short_platforms = [
# platform.replace(self.hidden_platform_prefix, "*")
re.sub(self.hidden_platform_prefix_re, '*', platform)
re.sub(self.hidden_platform_prefix_re, "*", platform)
for platform in self.platforms_to_check
if platform in project["results"]
]
nested_results_cols = [("Project", ""), ("Failed MRs", "")]
nested_results_cols += [
(platform, "BUILD / TEST") for platform in short_platforms
(platform, "BUILD / TEST")
for platform in short_platforms
]
df = pd.DataFrame(
columns=pd.MultiIndex.from_tuples(nested_results_cols)
Expand All @@ -156,25 +161,29 @@ def _fetch_build_info(
f"{self.parsed_result_type[result_name]}"
f"{str(results[check_type][result_name])}"
)
except KeyError:
logging.debug(
f'Missing key [{check_type}][{result_name}] in {results}. Output will be incomplete')
tmp_tmp_res = ["UNKNOWN"]
break
except TypeError:
tmp_tmp_res = ["UNKNOWN"]
break
except KeyError:
logging.debug(f'Missing key [{check_type}][{result_name}] in {results}. Output will be incomplete')
logging.debug(
f"Missing key [{check_type}]"
f"[{result_name}] in {results}. "
"Output will be incomplete"
)
tmp_tmp_res = ["UNKNOWN"]
break
tmp_res.append(" ".join(tmp_tmp_res))
ptf_res.append(" / ".join(tmp_res))
failed_MRs = []
if project["checkout"] and "warnings" in project["checkout"]:
for warn in project["checkout"]["warnings"]:
tmp_res = re.findall(rf"{project['name']}\![0-9]{{1,5}}", warn)
failed_MRs += [r.replace(project["name"], "") for r in tmp_res]
tmp_res = re.findall(
rf"{project['name']}\![0-9]{{1,5}}",
warn,
)
failed_MRs += [
r.replace(project["name"], "") for r in tmp_res
]
df.loc[len(df.index)] = [
project["name"],
",".join(failed_MRs),
Expand Down Expand Up @@ -221,7 +230,9 @@ def check_status(
break
else:
msgs[slot][date_back]["build_id"] = tmp_build_id
msgs[slot][date_back]["df"] = df.reset_index(drop=True)
msgs[slot][date_back]["df"] = df.reset_index(
drop=True
)
break
except AttributeError as err:
logging.warning(
Expand All @@ -230,34 +241,40 @@ def check_status(
)
stream = ""
for slot, m_values in msgs.items():
sorted_m_values = dict(sorted(m_values.items(), key=lambda x: x[0]))
sorted_m_values = dict(
sorted(m_values.items(), key=lambda x: x[0])
)
if html:
stream += f"<h4 class='part'>{slot}</h4>\n"
for date_back, values in sorted_m_values.items():
parsed_date = date_back.strftime(self.date_format)
if values:
if html:
stream += f"<details><summary>{parsed_date}/{values['build_id']}</summary>"
stream += f"link to <a href=\"https://lhcb-nightlies.web.cern.ch/nightly/{slot}/{values['build_id']}/\">"
stream += f"<details><summary>{parsed_date}/{values['build_id']}</summary>" # noqa: E501
stream += f"link to <a href=\"https://lhcb-nightlies.web.cern.ch/nightly/{slot}/{values['build_id']}/\">" # noqa: E501
stream += f"{slot}/{values['build_id']}</a></br>"
pretty_df = values["df"].style.applymap(color_values)
stream += f"{pretty_df.to_html()}</details>"
else:
stream += f"-> {slot}/{parsed_date}/{values['build_id']}:\n"
stream += (
f"-> {slot}/{parsed_date}/{values['build_id']}:\n"
)
table = tabulate(
values["df"],
headers=list(map("\n".join, values["df"].columns.tolist())),
headers=list(
map("\n".join, values["df"].columns.tolist())
),
tablefmt="pretty",
)
stream += f"{table}\n"
else:
if html:
stream += (
f"<details><summary>{parsed_date}/(No build)</summary>"
)
stream += f"<details><summary>{parsed_date}/(No build)</summary>" # noqa: E501
stream += "No build available for this day.</details>"
else:
stream += f"-> {slot}/{parsed_date}: No slot available\n"
stream += (
f"-> {slot}/{parsed_date}: No slot available\n"
)
if filepath:
with open(filepath, "w") as f:
f.write(stream)
Expand Down
Empty file added test/__init__.py
Empty file.
17 changes: 17 additions & 0 deletions test/test_status_checker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import subprocess


def test_cli_simple_training():
ex_generate = subprocess.run(
[
"python",
"run.py",
"current-status",
"--slots",
"lhcb-sim11",
],
check=True,
capture_output=True,
text=True,
)
assert ex_generate.returncode == 0

0 comments on commit f760f26

Please sign in to comment.