diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..eeb6f20 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,72 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: tests + +on: + push: + branches: [ "main", "dev" ] + pull_request: + branches: [ "main", "dev" ] + +jobs: + # there is no python3.6 available on newer ubuntu instances + # so we have this one to run on ubuntu-20.04 + test-python36: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + python-version: ["3.6"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest + pip install -r requirements.txt + pip install -r requirements_ci.txt + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --exit-zero --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + pytest --cov=PyNomaly + + test: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest + pip install -r requirements.txt + pip install -r requirements_ci.txt + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --exit-zero --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + pytest --cov=PyNomaly diff --git a/readme.md b/readme.md index 1a23f85..77b1724 100644 --- a/readme.md +++ b/readme.md @@ -7,7 +7,7 @@ scores in the range of [0,1] that are directly interpretable as the probability [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![PyPi](https://img.shields.io/badge/pypi-0.3.3-blue.svg)](https://pypi.python.org/pypi/PyNomaly/0.3.3) ![](https://img.shields.io/pypi/dm/PyNomaly.svg?logoColor=blue) -[![Build Status](https://travis-ci.org/vc1492a/PyNomaly.svg?branch=main)](https://travis-ci.org/vc1492a/PyNomaly) +![Tests](https://github.com/vc1492a/PyNomaly/actions/workflows/tests.yml/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/vc1492a/PyNomaly/badge.svg?branch=main)](https://coveralls.io/github/vc1492a/PyNomaly?branch=main) [![JOSS](http://joss.theoj.org/papers/f4d2cfe680768526da7c1f6a2c103266/status.svg)](http://joss.theoj.org/papers/f4d2cfe680768526da7c1f6a2c103266) diff --git a/tests/test_loop.py b/tests/test_loop.py index b7a880b..bb214da 100644 --- a/tests/test_loop.py +++ b/tests/test_loop.py @@ -4,8 +4,9 @@ from PyNomaly import loop import logging +from typing import Tuple import numpy as np -from numpy.testing import assert_array_equal +from numpy.testing import assert_array_equal, assert_array_almost_equal import pandas as pd import pytest from sklearn.datasets import load_iris @@ -52,7 +53,7 @@ def X_n8() -> np.ndarray: @pytest.fixture() -def X_n20_scores() -> tuple[np.ndarray, np.ndarray]: +def X_n20_scores() -> Tuple[np.ndarray, np.ndarray]: """ Fixture that returns a tuple containing a 20 element numpy array and the precalculated loOP scores based on that array. @@ -153,7 +154,7 @@ def test_regression(X_n20_scores) -> None: input_data, expected_scores = X_n20_scores clf = loop.LocalOutlierProbability(input_data).fit() scores = clf.local_outlier_probabilities - assert np.array_equal(scores, expected_scores) + assert_array_almost_equal(scores, expected_scores, 6) def test_loop_performance(X_n120) -> None: