Skip to content

Commit

Permalink
Release 1.3.2 (#33)
Browse files Browse the repository at this point in the history
* add support and labels to stats output, clarify iou doc, fix example notebook

* Bump version: 1.3.1 → 1.3.2
  • Loading branch information
zachcoleman authored Jan 28, 2023
1 parent 94a0ab3 commit 86cd59e
Show file tree
Hide file tree
Showing 7 changed files with 163 additions and 74 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 1.3.1
current_version = 1.3.2
commit = True
tag = False

Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "fast-stats"
version = "1.3.1"
version = "1.3.2"
edition = "2021"

[lib]
Expand Down
209 changes: 142 additions & 67 deletions examples/stats.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,9 @@
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%load_ext filprofiler"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import fast_stats"
]
},
Expand All @@ -35,7 +27,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -52,7 +44,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -62,42 +54,23 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"=fil-profile= Preparing to write to fil-result/tmpzxndpb6p\n",
"=fil-profile= Wrote flamegraph to \"fil-result/tmpzxndpb6p/peak-memory.svg\"\n",
"=fil-profile= Wrote flamegraph to \"fil-result/tmpzxndpb6p/peak-memory-reversed.svg\"\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"100%\"\n",
" height=\"600\"\n",
" src=\"fil-result/tmpzxndpb6p/peak-memory.svg\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" \n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x102c09600>"
"{'precision': 0.49997291558742274,\n",
" 'recall': 0.4999755857512341,\n",
" 'f1-score': 0.4999742506657633}"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "display_data"
"output_type": "execute_result"
}
],
"source": [
"%%filprofile\n",
"fast_stats.binary_stats(y_true, y_pred)"
]
},
Expand All @@ -110,7 +83,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -120,58 +93,160 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"=fil-profile= Preparing to write to fil-result/tmpcg_juijl\n",
"=fil-profile= Wrote flamegraph to \"fil-result/tmpcg_juijl/peak-memory.svg\"\n",
"=fil-profile= Wrote flamegraph to \"fil-result/tmpcg_juijl/peak-memory-reversed.svg\"\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"100%\"\n",
" height=\"600\"\n",
" src=\"fil-result/tmpcg_juijl/peak-memory.svg\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" \n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x1260a1e60>"
"{'precision': array([0.12462003, 0.1254621 , 0.1250603 , 0.12443714, 0.12611127,\n",
" 0.1245599 , 0.12519658, 0.12590499]),\n",
" 'recall': array([0.12497745, 0.12492857, 0.12564759, 0.12429353, 0.12595279,\n",
" 0.12490906, 0.12502554, 0.12561796]),\n",
" 'f1-score': array([0.12479849, 0.12519476, 0.12535326, 0.12436529, 0.12603198,\n",
" 0.12473424, 0.125111 , 0.12576131]),\n",
" 'labels': array([0, 1, 2, 3, 4, 5, 6, 7]),\n",
" 'support': array([327051, 328980, 326015, 328392, 327853, 327134, 327917, 328098])}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "display_data"
"output_type": "execute_result"
}
],
"source": [
"%%filprofile\n",
"fast_stats.stats(y_true, y_pred)"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": []
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>precision</th>\n",
" <th>recall</th>\n",
" <th>f1-score</th>\n",
" <th>labels</th>\n",
" <th>support</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0.124620</td>\n",
" <td>0.124977</td>\n",
" <td>0.124798</td>\n",
" <td>0</td>\n",
" <td>327051</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>0.125462</td>\n",
" <td>0.124929</td>\n",
" <td>0.125195</td>\n",
" <td>1</td>\n",
" <td>328980</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>0.125060</td>\n",
" <td>0.125648</td>\n",
" <td>0.125353</td>\n",
" <td>2</td>\n",
" <td>326015</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>0.124437</td>\n",
" <td>0.124294</td>\n",
" <td>0.124365</td>\n",
" <td>3</td>\n",
" <td>328392</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>0.126111</td>\n",
" <td>0.125953</td>\n",
" <td>0.126032</td>\n",
" <td>4</td>\n",
" <td>327853</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>0.124560</td>\n",
" <td>0.124909</td>\n",
" <td>0.124734</td>\n",
" <td>5</td>\n",
" <td>327134</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>0.125197</td>\n",
" <td>0.125026</td>\n",
" <td>0.125111</td>\n",
" <td>6</td>\n",
" <td>327917</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>0.125905</td>\n",
" <td>0.125618</td>\n",
" <td>0.125761</td>\n",
" <td>7</td>\n",
" <td>328098</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" precision recall f1-score labels support\n",
"0 0.124620 0.124977 0.124798 0 327051\n",
"1 0.125462 0.124929 0.125195 1 328980\n",
"2 0.125060 0.125648 0.125353 2 326015\n",
"3 0.124437 0.124294 0.124365 3 328392\n",
"4 0.126111 0.125953 0.126032 4 327853\n",
"5 0.124560 0.124909 0.124734 5 327134\n",
"6 0.125197 0.125026 0.125111 6 327917\n",
"7 0.125905 0.125618 0.125761 7 328098"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.DataFrame(fast_stats.stats(y_true, y_pred))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 with Fil",
"display_name": ".venv",
"language": "python",
"name": "filprofile"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
Expand Down
11 changes: 7 additions & 4 deletions fast_stats/iou.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy as np

from ._fast_stats_ext import _binary_f1_score_reqs
from .exceptions import ShapeError

Result = Union[None, float]

Expand All @@ -29,7 +30,7 @@ def iou(
array2: np.ndarray,
zero_division: ZeroDivision = ZeroDivision.NONE,
) -> Result:
"""Calculation for IoU
"""Calculation for IoU (Intersection over Union) for binary arrays.

Args:
array1 (np.ndarray): array of 0/1 values (must be bool or int types)
Expand All @@ -38,13 +39,15 @@ def iou(
Returns:
Result: None or float depending on values and zero division
"""
assert array1.shape == array2.shape, "y_true and y_pred must be same shape"
assert all(
if not all(
[
isinstance(array1, np.ndarray),
isinstance(array2, np.ndarray),
]
), "y_true and y_pred must be numpy arrays"
):
raise TypeError("y_true and y_pred must be numpy arrays")
if array1.shape != array2.shape:
raise ShapeError("y_true and y_pred must be same shape")
zero_division = ZeroDivision(zero_division)

tp, tp_fp, tp_fn = _binary_f1_score_reqs(array1, array2)
Expand Down
5 changes: 5 additions & 0 deletions fast_stats/multiclass.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,4 +282,9 @@ def f1_from_ext(x, y, z):
{"f1-score": np.nanmean(f1_from_ext(x[:, 0], x[:, 1], x[:, 2])).item()}
)

# for none average add labels and support
if average == AverageType.NONE:
stats.update({"labels": labels})
stats.update({"support": x[:, 2]}) # support total y_true (TP + FN)

return stats
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "fast-stats"
version = "1.3.1"
version = "1.3.2"
description = "A fast and simple library for calculating basic statistics"
readme = "README.md"
license = {text="Apache 2.0"}
Expand Down
6 changes: 6 additions & 0 deletions tests/test_multiclass.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,8 @@ def test_f1_score(y_true, y_pred, kwargs, expected):
"precision": np.array([1.0, 1.0, 1.0]),
"recall": np.array([1.0, 1.0, 1.0]),
"f1-score": np.array([1.0, 1.0, 1.0]),
"labels": np.array([1, 2, 3]),
"support": np.array([2, 2, 2]),
},
), # perfect
(
Expand Down Expand Up @@ -483,6 +485,8 @@ def test_f1_score(y_true, y_pred, kwargs, expected):
"precision": np.array([0.5, 0.5, 0.5]),
"recall": np.array([0.5, 0.5, 0.5]),
"f1-score": np.array([0.5, 0.5, 0.5]),
"labels": np.array([1, 2, 3]),
"support": np.array([2, 2, 2]),
},
), # 50%
(
Expand Down Expand Up @@ -513,6 +517,8 @@ def test_f1_score(y_true, y_pred, kwargs, expected):
"precision": np.array([0.5, 0.5]),
"recall": np.array([0.5, 0.5]),
"f1-score": np.array([0.5, 0.5]),
"labels": np.array([1, 2]),
"support": np.array([2, 2]),
},
), # 50% subset
(
Expand Down

0 comments on commit 86cd59e

Please sign in to comment.