Skip to content

Graders

The graders module contains all the grader classes for evaluating student work.

BaseGrader

Bases: object

Base class for all graders.

Parameters:

Name Type Description Default
submission Any

Student's submission object.

required
answer Any

Correct answer object.

required
points int or float

Total point value awarded if submission is correct. Defaults to 1. DO NOT CHANGE IF YOU PLAN TO USE on WQET PLATFORM.

1
score int or float

Student's current score. Default is 0 because submission has yet to be graded.

0
passed bool

Whether student's score is equal to or greater than possible points. Default is False because submission has yet to be graded.

False
comment str

Feedback on student's submission. Default is an empty string because submission has yet to be graded. Note that you can use Markdown syntax.

''
Source code in grading_tools/graders.py
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
class BaseGrader(object):
    """Base class for all graders.

    Args:
        submission: Student's submission object.
        answer: Correct answer object.
        points (int or float, optional): Total point value awarded if submission
            is correct. Defaults to 1. DO NOT CHANGE IF YOU PLAN TO USE on WQET
            PLATFORM.
        score (int or float, optional): Student's current score. Default is 0
            because submission has yet to be graded.
        passed (bool, optional): Whether student's score is equal to or greater
            than possible points. Default is False because submission has yet to
            be graded.
        comment (str, optional): Feedback on student's submission. Default is an
            empty string because submission has yet to be graded. Note that you
            can use Markdown syntax.
    """

    def __init__(
        self,
        submission: Any,
        answer: Any,
        points: int = 1,
        score: int = 0,
        passed: bool = False,
        comment: str = "",
    ):
        self.answer = answer
        self.comment = comment
        self.passed = passed
        self.points = points
        self.score = score
        self.submission = submission

        if not isinstance(
            self.submission, type(self.answer)
        ) and not isinstance(self.submission, tempfile._TemporaryFileWrapper):
            raise TypeError(
                f"Your submission needs to be type {type(self.answer).__name__}, "
                f"not type {type(self.submission).__name__}."
            )

    def __repr__(self) -> str:
        rep_dict = {
            "points": self.points,
            "submission dtype": type(self.submission),
            "answer dtype": type(self.answer),
            "current score": self.score,
            "passed": self.passed,
            "comment": self.comment,
        }

        return pprint.pformat(rep_dict, indent=2, sort_dicts=False)

    def positive_comment(self) -> None:
        """Generate positive comment.

        Assigns a randomly-chosen comment to the `comment` attribute of the grader
        object.

        Returns:
            None
        """
        comments = [
            "πŸ₯³",
            "Awesome work.",
            "Boom! You got it.",
            "Correct.",
            "Excellent work.",
            "Excellent! Keep going.",
            "Good work!",
            "Party time! πŸŽ‰πŸŽ‰πŸŽ‰",
            "Python master 😁",
            "Yes! Keep on rockin'. 🎸" "That's right.",
            "That's the right answer. Keep it up!",
            "Very impressive.",
            "Way to go!",
            "Wow, you're making great progress.",
            "Yes! Great problem solving.",
            "Yes! Your hard work is paying off.",
            "You = coding πŸ₯·",
            "You got it. Dance party time! πŸ•ΊπŸ’ƒπŸ•ΊπŸ’ƒ",
            "You're making this look easy. πŸ˜‰",
            "Yup. You got it.",
        ]

        self.comment = random.choice(comments)

    def add_to_score(self, points: Union[int, float] = 1) -> None:
        """Increment score.

        This method adds points to the grader's `score` attribute and checks if the `score`
        meets the threshold specified by `points`. If the threshold is met, a positive comment
        is added to the `comment` attribute.

        Args:
            points (int or float, optional): Number of points to add to the `score` attribute.

        Returns:
            None
        """
        self.score += points
        self.passed = self.score >= self.points
        if self.passed:
            self.positive_comment()

    def update_comment(self, new_comment: str, *args) -> None:
        """Change grader ``comment``.

        Change the comment of the grader object.

        Args:
            new_comment (str): Text of the new comment. Markdown syntax can be used.
            *args (str): Additional comments to add to the new_comment string.

        Returns:
            None

        """
        new_comment = " ".join([new_comment] + list(args))
        self.comment = new_comment

    def return_feedback(self, html=True) -> dict[str, Union[int, bool, str]]:
        """Return feedback to student.

        Parameters
        ----------
        html : bool, default=True
            If ``True`` converts comment text to HTML. This is only important is you
            the comment has been written using [Markdown syntax]
            (https://daringfireball.net/projects/markdown/).

        Returns
        -------
        feedback_dict : dict
            Dictionary has three keys: ``{"score": self.score, "passed": self.passed,
            "comment": comment}``

        """
        if html:
            comment = markdown.markdown(self.comment)
        else:
            comment = self.comment
        feedback_dict = {
            "score": self.score,
            "passed": self.passed,
            "comment": comment,
        }

        if hasattr(self, "diff_path"):
            feedback_dict["image"] = self.diff_path

        return feedback_dict

add_to_score(points=1)

Increment score.

This method adds points to the grader's score attribute and checks if the score meets the threshold specified by points. If the threshold is met, a positive comment is added to the comment attribute.

Parameters:

Name Type Description Default
points int or float

Number of points to add to the score attribute.

1

Returns:

Type Description
None

None

Source code in grading_tools/graders.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
def add_to_score(self, points: Union[int, float] = 1) -> None:
    """Increment score.

    This method adds points to the grader's `score` attribute and checks if the `score`
    meets the threshold specified by `points`. If the threshold is met, a positive comment
    is added to the `comment` attribute.

    Args:
        points (int or float, optional): Number of points to add to the `score` attribute.

    Returns:
        None
    """
    self.score += points
    self.passed = self.score >= self.points
    if self.passed:
        self.positive_comment()

positive_comment()

Generate positive comment.

Assigns a randomly-chosen comment to the comment attribute of the grader object.

Returns:

Type Description
None

None

Source code in grading_tools/graders.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
def positive_comment(self) -> None:
    """Generate positive comment.

    Assigns a randomly-chosen comment to the `comment` attribute of the grader
    object.

    Returns:
        None
    """
    comments = [
        "πŸ₯³",
        "Awesome work.",
        "Boom! You got it.",
        "Correct.",
        "Excellent work.",
        "Excellent! Keep going.",
        "Good work!",
        "Party time! πŸŽ‰πŸŽ‰πŸŽ‰",
        "Python master 😁",
        "Yes! Keep on rockin'. 🎸" "That's right.",
        "That's the right answer. Keep it up!",
        "Very impressive.",
        "Way to go!",
        "Wow, you're making great progress.",
        "Yes! Great problem solving.",
        "Yes! Your hard work is paying off.",
        "You = coding πŸ₯·",
        "You got it. Dance party time! πŸ•ΊπŸ’ƒπŸ•ΊπŸ’ƒ",
        "You're making this look easy. πŸ˜‰",
        "Yup. You got it.",
    ]

    self.comment = random.choice(comments)

return_feedback(html=True)

Return feedback to student.

Parameters

html : bool, default=True If True converts comment text to HTML. This is only important is you the comment has been written using [Markdown syntax] (https://daringfireball.net/projects/markdown/).

Returns

feedback_dict : dict Dictionary has three keys: {"score": self.score, "passed": self.passed, "comment": comment}

Source code in grading_tools/graders.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def return_feedback(self, html=True) -> dict[str, Union[int, bool, str]]:
    """Return feedback to student.

    Parameters
    ----------
    html : bool, default=True
        If ``True`` converts comment text to HTML. This is only important is you
        the comment has been written using [Markdown syntax]
        (https://daringfireball.net/projects/markdown/).

    Returns
    -------
    feedback_dict : dict
        Dictionary has three keys: ``{"score": self.score, "passed": self.passed,
        "comment": comment}``

    """
    if html:
        comment = markdown.markdown(self.comment)
    else:
        comment = self.comment
    feedback_dict = {
        "score": self.score,
        "passed": self.passed,
        "comment": comment,
    }

    if hasattr(self, "diff_path"):
        feedback_dict["image"] = self.diff_path

    return feedback_dict

update_comment(new_comment, *args)

Change grader comment.

Change the comment of the grader object.

Parameters:

Name Type Description Default
new_comment str

Text of the new comment. Markdown syntax can be used.

required
*args str

Additional comments to add to the new_comment string.

()

Returns:

Type Description
None

None

Source code in grading_tools/graders.py
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
def update_comment(self, new_comment: str, *args) -> None:
    """Change grader ``comment``.

    Change the comment of the grader object.

    Args:
        new_comment (str): Text of the new comment. Markdown syntax can be used.
        *args (str): Additional comments to add to the new_comment string.

    Returns:
        None

    """
    new_comment = " ".join([new_comment] + list(args))
    self.comment = new_comment

Evaluation

Data class for managing feedback passed to student machine.

Source code in grading_tools/graders.py
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
class Evaluation:
    """Data class for managing feedback passed to student machine."""

    def __init__(
        self,
        eval_code: str,
        points_awarded: Union[int, float] = 0,
        eval_context: Union[EvaluationContext, None] = None,
    ) -> None:
        """Istantiate ``Evaluation class``

        Args:
            eval_code (str): Eight-character error code passed to feedback module. If ``0``, student passes.
            points_awarded (Union[int, float], optional): Points awarded to student submission. Defaults to 0.
            eval_context (Union[EvaluationContext, None], optional): Additional context needed to generate feedback. Defaults to None.
        """
        self.eval_code = str(eval_code)
        self.eval_context = eval_context
        self.points_awarded = points_awarded
        self.__feedback = Feedback(self)

    @property
    def student_feedback(self):
        return self.__feedback.__dict__

__init__(eval_code, points_awarded=0, eval_context=None)

Istantiate Evaluation class

Parameters:

Name Type Description Default
eval_code str

Eight-character error code passed to feedback module. If 0, student passes.

required
points_awarded Union[int, float]

Points awarded to student submission. Defaults to 0.

0
eval_context Union[EvaluationContext, None]

Additional context needed to generate feedback. Defaults to None.

None
Source code in grading_tools/graders.py
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
def __init__(
    self,
    eval_code: str,
    points_awarded: Union[int, float] = 0,
    eval_context: Union[EvaluationContext, None] = None,
) -> None:
    """Istantiate ``Evaluation class``

    Args:
        eval_code (str): Eight-character error code passed to feedback module. If ``0``, student passes.
        points_awarded (Union[int, float], optional): Points awarded to student submission. Defaults to 0.
        eval_context (Union[EvaluationContext, None], optional): Additional context needed to generate feedback. Defaults to None.
    """
    self.eval_code = str(eval_code)
    self.eval_context = eval_context
    self.points_awarded = points_awarded
    self.__feedback = Feedback(self)

MatplotGrader

Bases: BaseGrader

Grader for evaluating plots made with Matplotlib, pandas, or seaborn.

Feedback mechanism currently works differently that other classes in this module. Instead of English feedback being generated in class, uses Evaluation class.

Borrows heavily from matplotcheck: https://github.com/earthlab/matplotcheck.

Source code in grading_tools/graders.py
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
class MatplotGrader(BaseGrader):
    """Grader for evaluating plots made with Matplotlib, pandas, or seaborn.

    Feedback mechanism currently works differently that other classes in this module.
    Instead of English feedback being generated in class, uses ``Evaluation`` class.

    Borrows heavily from ``matplotcheck``: https://github.com/earthlab/matplotcheck.
    """

    def __is_scatter(self) -> bool:
        """Boolean expressing if ax contains scatter points.

        If plot contains scatter points as well as lines, functions will return
        true. From ``matplotcheck``.

        Returns
        -------
        is_scatter : boolean
            True if Axes ax is a scatter plot, False if not
        """
        if self.submission.collections:
            return True
        elif self.submission.lines:
            for line in self.submission.lines:
                if (
                    line.get_linestyle() == "None"
                    or line.get_linewidth() == "None"
                    or line.get_linewidth() == 0
                ):
                    return True
        return False

    def __is_line(self) -> bool:
        """Boolean expressing if ax contains scatter points.

        If plot contains scatter points and lines return True.
        From ``matplotcheck``.

        Returns
        -------
        is_line : boolean
            True if Axes ax is a line plot, False if not
        """
        if self.submission.lines:
            for line in self.submission.lines:
                if (
                    not line.get_linestyle()
                    or not line.get_linewidth()
                    or line.get_linewidth() > 0
                ):
                    return True

    def __is_bar(self) -> bool:
        """Boolean expressing if ax contains bar containers.

        Returns
        -------
        is_bar : boolean
            True id Axes ax is a bar chart or histogram, False if not
        """
        return self.submission.containers

    def get_xy(
        self, ax=Literal["submission", "answer"], points_only=False
    ) -> pd.DataFrame:
        """Get (x, y) data from plot.

        Returns a pandas dataframe with columns "x" and "y" holding the x
        and y coords on the axis. From ``matplotcheck``.

        Parameters
        ----------
        ax : matplotlib.axes.Axes
            Matplotlib Axes object to be tested
        points_only : boolean
            Set ``True`` to check only points, set ``False`` to check all data
            on plot.

        Returns
        -------
        df : pandas.DataFrame
            Pandas dataframe with columns "x" and "y" containing the x and y
            coords of each point on the axis.
        """
        if ax == "submission":
            ax = self.submission
        else:
            ax = self.answer

        if points_only:
            xy_coords = [
                val
                for line in ax.lines
                if (
                    line.get_linestyle() == "None"
                    or line.get_linewidth() == "None"
                )
                for val in line.get_xydata()
            ]  # .plot()
            xy_coords += [
                val
                for c in ax.collections
                if not isinstance(c, matplotlib.collections.PolyCollection)
                for val in c.get_offsets()
            ]  # .scatter()

        else:
            xy_coords = [
                val for line in ax.lines for val in line.get_xydata()
            ]  # .plot()
            xy_coords += [
                val for c in ax.collections for val in c.get_offsets()
            ]  # .scatter()
            xy_coords += [
                [(p.get_x() + (p.get_width() / 2)), p.get_height()]
                for p in ax.patches
            ]  # .bar()

        xy_data = pd.DataFrame(data=xy_coords, columns=["x", "y"]).dropna()

        # crop to limits
        lims = ax.get_xlim()
        xy_data = xy_data[xy_data["x"] >= lims[0]]
        xy_data = xy_data[xy_data["x"] <= lims[1]].reset_index(drop=True)

        # sort
        xy_data.sort_values(by=["x", "y"], inplace=True)

        return xy_data

    def grade_plot(
        self,
        plot_type: Literal[
            "acf", "bar", "barh", "box", "hist", "line", "pacf", "scatter"
        ],
        check_title=True,
        check_xlabel=True,
        check_ylabel=True,
        check_xticks=False,
        check_yticks=False,
        match_data=True,
        tolerance=0.01,
        data_source: Union[Literal["answer"], pd.DataFrame] = "answer",
    ) -> Union[dict, None]:
        """Grade submitted plot (axis) against answer.

        Args:
            plot_type (Literal[ "acf", "bar", "barh", "box", "hist", "line", "pacf", "scatter" ]): Plot type that submission should be.
            check_title (bool, optional): Whether to check submission axis title against answer. Defaults to True.
            check_xlabel (bool, optional): Whether to check submission x-axis label against answer. Defaults to True.
            check_ylabel (bool, optional): Whether to check submission y-axis label against answer. Defaults to True.
            check_xticks (bool, optional): Whether to check submission x-axis ticks against answer. Defaults to False.
            check_yticks (bool, optional): Whether to check submission y-axis ticks against answer. Defaults to False.
            match_data (bool, optional): Whether to check submission data points against answer. Defaults to True.
            tolerance (float, optional): Tolerance when checking submission data points or data source against answer. Defaults to 0.01.
            data_source (Union[Literal["answer"], pd.DataFrame], optional): If ``match_data`` set to ``True``, you can either set this parameter to ``"answer"`` to check submission against answer plot. Otherwise, you can supply a DataFrame to match against submission. Defaults to "answer".

        Returns:
            dict : Feedback dict to be passed to student machine.
        """
        if plot_type == "scatter" and not self.__is_scatter():
            self.evaluation = Evaluation(
                eval_code="MPA01E02",
                eval_context=EvaluationContext(object_name="scatter plot"),
            )
            return self.evaluation.student_feedback

        # Box plots in matplotlib consist of lines
        if plot_type in ("line", "box") and not self.__is_line():
            plot_type_dict = {"line": "line plot", "box": "boxplot"}
            plot_type_name = plot_type_dict.get(plot_type, "")
            self.evaluation = Evaluation(
                eval_code="MPA01E02",
                eval_context=EvaluationContext(object_name=plot_type_name),
            )
            return self.evaluation.student_feedback

        if plot_type in ("bar", "hist") and not self.__is_bar():
            plot_type_dict = {
                "bar": "bar chart",
                "barh": "horizontal bar chart",
                "hist": "histogram",
            }
            plot_type_name = plot_type_dict.get(plot_type, "")
            self.evaluation = Evaluation(
                eval_code="MPA01E02",
                eval_context=EvaluationContext(object_name=plot_type_name),
            )
            return self.evaluation.student_feedback

        if plot_type in ("acf", "pacf") and not (
            self.__is_scatter() and self.__is_line()
        ):
            plot_type_dict = {
                "acf": "ACF",
                "pacf": "PACF",
            }
            plot_type_name = plot_type_dict.get(plot_type, "")
            self.evaluation = Evaluation(
                eval_code="MPA01E02",
                eval_context=EvaluationContext(object_name=plot_type_name),
            )
            return self.evaluation.student_feedback

        if check_title:
            # Missing title
            if not self.submission.get_title() and self.answer.get_title():
                self.evaluation = Evaluation(eval_code="MPA02E01")
                return self.evaluation.student_feedback
            # Incorrect title
            if self.submission.get_title() != self.answer.get_title():
                self.evaluation = Evaluation(
                    eval_code="MPA02E03",
                    eval_context=EvaluationContext(
                        actual_object_val=self.submission.get_title(),
                        expected_object_val=self.answer.get_title(),
                    ),
                )
                return self.evaluation.student_feedback

        if check_xlabel:
            # Missing x-axis label
            if not self.submission.get_xlabel() and self.answer.get_xlabel():
                self.evaluation = Evaluation(eval_code="MPA03E01")
                return self.evaluation.student_feedback
            # Incorrect x-axis label
            if self.submission.get_xlabel() != self.answer.get_xlabel():
                self.evaluation = Evaluation(
                    eval_code="MPA03E03",
                    eval_context=EvaluationContext(
                        actual_object_val=self.submission.get_xlabel(),
                        expected_object_val=self.answer.get_xlabel(),
                    ),
                )
                return self.evaluation.student_feedback

        if check_ylabel:
            # Missing x-axis label
            if not self.submission.get_ylabel() and self.answer.get_ylabel():
                self.evaluation = Evaluation(eval_code="MPA04E01")
                return self.evaluation.student_feedback
            # Incorrect x-axis label
            if self.submission.get_ylabel() != self.answer.get_ylabel():
                self.evaluation = Evaluation(
                    eval_code="MPA04E03",
                    eval_context=EvaluationContext(
                        actual_object_val=self.submission.get_ylabel(),
                        expected_object_val=self.answer.get_ylabel(),
                    ),
                )
                return self.evaluation.student_feedback

        if match_data and data_source == "answer":
            pdg = PandasGrader(
                submission=self.get_xy(ax="submission").astype(float),
                answer=self.get_xy(ax="answer").astype(float),
            )
            if not pdg.grade_df(tolerance=tolerance, return_bool=True):
                self.evaluation = Evaluation(eval_code="MPA05E04")
                return self.evaluation.student_feedback

        # Everything is correct
        self.evaluation = Evaluation(eval_code="0", points_awarded=self.points)
        return self.evaluation.student_feedback

__is_bar()

Boolean expressing if ax contains bar containers.

Returns

is_bar : boolean True id Axes ax is a bar chart or histogram, False if not

Source code in grading_tools/graders.py
1648
1649
1650
1651
1652
1653
1654
1655
1656
def __is_bar(self) -> bool:
    """Boolean expressing if ax contains bar containers.

    Returns
    -------
    is_bar : boolean
        True id Axes ax is a bar chart or histogram, False if not
    """
    return self.submission.containers

__is_line()

Boolean expressing if ax contains scatter points.

If plot contains scatter points and lines return True. From matplotcheck.

Returns

is_line : boolean True if Axes ax is a line plot, False if not

Source code in grading_tools/graders.py
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
def __is_line(self) -> bool:
    """Boolean expressing if ax contains scatter points.

    If plot contains scatter points and lines return True.
    From ``matplotcheck``.

    Returns
    -------
    is_line : boolean
        True if Axes ax is a line plot, False if not
    """
    if self.submission.lines:
        for line in self.submission.lines:
            if (
                not line.get_linestyle()
                or not line.get_linewidth()
                or line.get_linewidth() > 0
            ):
                return True

__is_scatter()

Boolean expressing if ax contains scatter points.

If plot contains scatter points as well as lines, functions will return true. From matplotcheck.

Returns

is_scatter : boolean True if Axes ax is a scatter plot, False if not

Source code in grading_tools/graders.py
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
def __is_scatter(self) -> bool:
    """Boolean expressing if ax contains scatter points.

    If plot contains scatter points as well as lines, functions will return
    true. From ``matplotcheck``.

    Returns
    -------
    is_scatter : boolean
        True if Axes ax is a scatter plot, False if not
    """
    if self.submission.collections:
        return True
    elif self.submission.lines:
        for line in self.submission.lines:
            if (
                line.get_linestyle() == "None"
                or line.get_linewidth() == "None"
                or line.get_linewidth() == 0
            ):
                return True
    return False

get_xy(ax=Literal['submission', 'answer'], points_only=False)

Get (x, y) data from plot.

Returns a pandas dataframe with columns "x" and "y" holding the x and y coords on the axis. From matplotcheck.

Parameters

ax : matplotlib.axes.Axes Matplotlib Axes object to be tested points_only : boolean Set True to check only points, set False to check all data on plot.

Returns

df : pandas.DataFrame Pandas dataframe with columns "x" and "y" containing the x and y coords of each point on the axis.

Source code in grading_tools/graders.py
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
def get_xy(
    self, ax=Literal["submission", "answer"], points_only=False
) -> pd.DataFrame:
    """Get (x, y) data from plot.

    Returns a pandas dataframe with columns "x" and "y" holding the x
    and y coords on the axis. From ``matplotcheck``.

    Parameters
    ----------
    ax : matplotlib.axes.Axes
        Matplotlib Axes object to be tested
    points_only : boolean
        Set ``True`` to check only points, set ``False`` to check all data
        on plot.

    Returns
    -------
    df : pandas.DataFrame
        Pandas dataframe with columns "x" and "y" containing the x and y
        coords of each point on the axis.
    """
    if ax == "submission":
        ax = self.submission
    else:
        ax = self.answer

    if points_only:
        xy_coords = [
            val
            for line in ax.lines
            if (
                line.get_linestyle() == "None"
                or line.get_linewidth() == "None"
            )
            for val in line.get_xydata()
        ]  # .plot()
        xy_coords += [
            val
            for c in ax.collections
            if not isinstance(c, matplotlib.collections.PolyCollection)
            for val in c.get_offsets()
        ]  # .scatter()

    else:
        xy_coords = [
            val for line in ax.lines for val in line.get_xydata()
        ]  # .plot()
        xy_coords += [
            val for c in ax.collections for val in c.get_offsets()
        ]  # .scatter()
        xy_coords += [
            [(p.get_x() + (p.get_width() / 2)), p.get_height()]
            for p in ax.patches
        ]  # .bar()

    xy_data = pd.DataFrame(data=xy_coords, columns=["x", "y"]).dropna()

    # crop to limits
    lims = ax.get_xlim()
    xy_data = xy_data[xy_data["x"] >= lims[0]]
    xy_data = xy_data[xy_data["x"] <= lims[1]].reset_index(drop=True)

    # sort
    xy_data.sort_values(by=["x", "y"], inplace=True)

    return xy_data

grade_plot(plot_type, check_title=True, check_xlabel=True, check_ylabel=True, check_xticks=False, check_yticks=False, match_data=True, tolerance=0.01, data_source='answer')

Grade submitted plot (axis) against answer.

Parameters:

Name Type Description Default
plot_type Literal['acf', 'bar', 'barh', 'box', 'hist', 'line', 'pacf', 'scatter']

Plot type that submission should be.

required
check_title bool

Whether to check submission axis title against answer. Defaults to True.

True
check_xlabel bool

Whether to check submission x-axis label against answer. Defaults to True.

True
check_ylabel bool

Whether to check submission y-axis label against answer. Defaults to True.

True
check_xticks bool

Whether to check submission x-axis ticks against answer. Defaults to False.

False
check_yticks bool

Whether to check submission y-axis ticks against answer. Defaults to False.

False
match_data bool

Whether to check submission data points against answer. Defaults to True.

True
tolerance float

Tolerance when checking submission data points or data source against answer. Defaults to 0.01.

0.01
data_source Union[Literal['answer'], DataFrame]

If match_data set to True, you can either set this parameter to "answer" to check submission against answer plot. Otherwise, you can supply a DataFrame to match against submission. Defaults to "answer".

'answer'

Returns:

Name Type Description
dict Union[dict, None]

Feedback dict to be passed to student machine.

Source code in grading_tools/graders.py
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
def grade_plot(
    self,
    plot_type: Literal[
        "acf", "bar", "barh", "box", "hist", "line", "pacf", "scatter"
    ],
    check_title=True,
    check_xlabel=True,
    check_ylabel=True,
    check_xticks=False,
    check_yticks=False,
    match_data=True,
    tolerance=0.01,
    data_source: Union[Literal["answer"], pd.DataFrame] = "answer",
) -> Union[dict, None]:
    """Grade submitted plot (axis) against answer.

    Args:
        plot_type (Literal[ "acf", "bar", "barh", "box", "hist", "line", "pacf", "scatter" ]): Plot type that submission should be.
        check_title (bool, optional): Whether to check submission axis title against answer. Defaults to True.
        check_xlabel (bool, optional): Whether to check submission x-axis label against answer. Defaults to True.
        check_ylabel (bool, optional): Whether to check submission y-axis label against answer. Defaults to True.
        check_xticks (bool, optional): Whether to check submission x-axis ticks against answer. Defaults to False.
        check_yticks (bool, optional): Whether to check submission y-axis ticks against answer. Defaults to False.
        match_data (bool, optional): Whether to check submission data points against answer. Defaults to True.
        tolerance (float, optional): Tolerance when checking submission data points or data source against answer. Defaults to 0.01.
        data_source (Union[Literal["answer"], pd.DataFrame], optional): If ``match_data`` set to ``True``, you can either set this parameter to ``"answer"`` to check submission against answer plot. Otherwise, you can supply a DataFrame to match against submission. Defaults to "answer".

    Returns:
        dict : Feedback dict to be passed to student machine.
    """
    if plot_type == "scatter" and not self.__is_scatter():
        self.evaluation = Evaluation(
            eval_code="MPA01E02",
            eval_context=EvaluationContext(object_name="scatter plot"),
        )
        return self.evaluation.student_feedback

    # Box plots in matplotlib consist of lines
    if plot_type in ("line", "box") and not self.__is_line():
        plot_type_dict = {"line": "line plot", "box": "boxplot"}
        plot_type_name = plot_type_dict.get(plot_type, "")
        self.evaluation = Evaluation(
            eval_code="MPA01E02",
            eval_context=EvaluationContext(object_name=plot_type_name),
        )
        return self.evaluation.student_feedback

    if plot_type in ("bar", "hist") and not self.__is_bar():
        plot_type_dict = {
            "bar": "bar chart",
            "barh": "horizontal bar chart",
            "hist": "histogram",
        }
        plot_type_name = plot_type_dict.get(plot_type, "")
        self.evaluation = Evaluation(
            eval_code="MPA01E02",
            eval_context=EvaluationContext(object_name=plot_type_name),
        )
        return self.evaluation.student_feedback

    if plot_type in ("acf", "pacf") and not (
        self.__is_scatter() and self.__is_line()
    ):
        plot_type_dict = {
            "acf": "ACF",
            "pacf": "PACF",
        }
        plot_type_name = plot_type_dict.get(plot_type, "")
        self.evaluation = Evaluation(
            eval_code="MPA01E02",
            eval_context=EvaluationContext(object_name=plot_type_name),
        )
        return self.evaluation.student_feedback

    if check_title:
        # Missing title
        if not self.submission.get_title() and self.answer.get_title():
            self.evaluation = Evaluation(eval_code="MPA02E01")
            return self.evaluation.student_feedback
        # Incorrect title
        if self.submission.get_title() != self.answer.get_title():
            self.evaluation = Evaluation(
                eval_code="MPA02E03",
                eval_context=EvaluationContext(
                    actual_object_val=self.submission.get_title(),
                    expected_object_val=self.answer.get_title(),
                ),
            )
            return self.evaluation.student_feedback

    if check_xlabel:
        # Missing x-axis label
        if not self.submission.get_xlabel() and self.answer.get_xlabel():
            self.evaluation = Evaluation(eval_code="MPA03E01")
            return self.evaluation.student_feedback
        # Incorrect x-axis label
        if self.submission.get_xlabel() != self.answer.get_xlabel():
            self.evaluation = Evaluation(
                eval_code="MPA03E03",
                eval_context=EvaluationContext(
                    actual_object_val=self.submission.get_xlabel(),
                    expected_object_val=self.answer.get_xlabel(),
                ),
            )
            return self.evaluation.student_feedback

    if check_ylabel:
        # Missing x-axis label
        if not self.submission.get_ylabel() and self.answer.get_ylabel():
            self.evaluation = Evaluation(eval_code="MPA04E01")
            return self.evaluation.student_feedback
        # Incorrect x-axis label
        if self.submission.get_ylabel() != self.answer.get_ylabel():
            self.evaluation = Evaluation(
                eval_code="MPA04E03",
                eval_context=EvaluationContext(
                    actual_object_val=self.submission.get_ylabel(),
                    expected_object_val=self.answer.get_ylabel(),
                ),
            )
            return self.evaluation.student_feedback

    if match_data and data_source == "answer":
        pdg = PandasGrader(
            submission=self.get_xy(ax="submission").astype(float),
            answer=self.get_xy(ax="answer").astype(float),
        )
        if not pdg.grade_df(tolerance=tolerance, return_bool=True):
            self.evaluation = Evaluation(eval_code="MPA05E04")
            return self.evaluation.student_feedback

    # Everything is correct
    self.evaluation = Evaluation(eval_code="0", points_awarded=self.points)
    return self.evaluation.student_feedback

PandasGrader

Bases: BaseGrader

Grader for evaluating objects from pandas <https://pandas.pydata.org/docs/index.html>_. library.

Source code in grading_tools/graders.py
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
class PandasGrader(BaseGrader):
    """Grader for evaluating objects from `pandas <https://pandas.pydata.org/docs/index.html>`_. library."""

    def __init__(self, submission, answer, points=1):
        super().__init__(submission, answer, points)

    # https://tinyurl.com/y3sg2umv
    @staticmethod
    def _clean_assert_message(message: AssertionError) -> str:
        """Make feedback student-friendly.

        Helper function used by ``grade_df`` and ``grade_series``.
        """
        message = str(message)
        s = ""

        if "DataFrame" in message:
            if 'Attribute "names"' in message:
                s = "The index name of your DataFrame doesn't"
            elif "index values" in message:
                s = "The index values of your DataFrame don't"
            elif "index classes" in message:
                s = "The class type of your DataFrame index doesn't"
            # These last two clauses look wrong, but they're right
            elif "columns values" in message:
                s = "The column names of your DataFrame don't"
            elif "column name" in message:
                p = re.compile(r'name=(".+?")')
                col = p.search(message).group(1)
                s = f"The values in the `{col}` column in your DataFrame don't"

        if "Series.index" in message:
            if 'Attribute "names"' in message:
                s = "The index name of your Series doesn't"
            elif "index values" in message:
                s = "The index values of your Series don't"
            elif "dtype" in message:
                s = "The dtype of your Series index doesn't"

        if message.startswith("Series are"):
            if 'Attribute "name"' in message:
                s = "The name of your Series doesn't"
            if "Series values" in message:
                s = "The values in your Series don't"

        if s == "":
            raise ValueError(
                "Pandas Assertion error doesn't have parseable text."
            )

        return s + " match the expected result."

    def grade_df(
        self,
        sort_values_by=None,
        match_index=True,
        match_index_col_order=False,
        match_colnames_only=False,
        tolerance=0.01,
        return_bool=False,
    ):
        """Evaluate submitted DataFrame.

        Parameters
        ----------
        sort_values_by : str, list, default=None
            If specified, submission and answer will be sorted by that column (or list
            of columns) before evaluation. If ``"all_cols"``, DataFrame will be sorted
            by a list of all columns (ordered alphabetically).

        match_index : bool, default=True
            Whether or not to consider the index of the submitted DataFrame. If
            ``False``, index is reset before it's evaluated.

        match_index_col_order : bool, default=False
            Whether or not to consider the order of the index and columns in the
            submitted DataFrame.

        match_colnames_only : bool, default=False
            If ``True`` only column names will be evaluated, not index labels or
            DataFrame values. Note that the index and column order will always be ignored.

        tolerance: int or float, default=0.01
            For numerical values, what is the maximum allowed
            difference between ``submission`` and ``answer``? If ``tolerance=0.1``, values must be
            identical. If ``tolerance=0.1``, values must be within 10% of each
            other (relative to the larger absolute value of the two).

        return_bool : bool, default=False
            Whether to return ``self.passed`` once grading is complete. You'll need this if
            you want to design your own grading workflow beyond the default.

        Examples
        --------
        Here are two DataFrames. The first ``ans_df`` is the expected answer, and the second
        ``sub_df`` is the student submission. Note that both have the same values, but order of the
        indices and columns is different.

        >>> import pandas as pd
        >>> ans_df = pd.DataFrame(
        ...     {"city": ["Puhi", "Napa", "Derby"], "pop": [3, 79, 13]}, index=[16, 14, 4]
        ... )
        >>> sub_df = pd.DataFrame(
        ...     {"pop": [79, 3, 13], "city": ["Napa", "Puhi", "Derby"]}, index=[14, 16, 4]
        ... )
        >>> print(ans_df)
             city  pop
        16   Puhi    3
        14   Napa   79
        4   Derby   13
        >>> print(sub_df)
            pop   city
        14   79   Napa
        16    3   Puhi
        4    13  Derby
        >>> g = PandasGrader(submission=sub_df, answer=ans_df)
        >>> g.grade_df(match_index_col_order=False, return_bool=True)
        True
        >>> g.grade_df(match_index_col_order=True, return_bool=True)
        False
        >>> print(g.comment)
        DataFrame.index are different
        DataFrame.index values are different (66.66667 %)
        [submission]:  Int64Index([14, 16, 4], dtype='int64')
        [answer]: Int64Index([16, 14, 4], dtype='int64')
        """
        if not isinstance(self.submission, pd.DataFrame):
            raise TypeError(
                f"grade_df method can only be used with DataFrames submissions, not {type(self.submission).__name__}."
            )

        if sort_values_by:
            try:
                if sort_values_by == "all_cols":
                    # Sorts df cols alphabetically and then sorts rows by all cols
                    self.submission = self.submission.sort_index(axis=1)
                    self.submission = self.submission.sort_values(
                        by=self.submission.columns.tolist(), axis=0
                    )
                    self.answer = self.answer.sort_index(axis=1)
                    self.answer = self.answer.sort_values(
                        by=self.answer.columns.tolist(), axis=0
                    )
                else:
                    self.submission.sort_values(sort_values_by, inplace=True)
                    self.answer.sort_values(sort_values_by, inplace=True)
            except KeyError:
                raise KeyError(
                    f"Either the submission or answer does not have column(s) {sort_values_by}."
                )

        if not match_index:
            self.submission = self.submission.reset_index(drop=True)
            self.answer = self.answer.reset_index(drop=True)

        if match_colnames_only:
            for c in self.answer.columns:
                if c not in self.submission.columns:
                    self.update_comment(
                        f"Your submission is missing a `'{c}'` column."
                    )
                    return False if return_bool else self.return_feedback()
            self.add_to_score()
            return True if return_bool else self.return_feedback()

        # Check shape
        if self.submission.shape != self.answer.shape:
            self.update_comment(
                f"The shape of your {type(self.submission).__name__} should be `{self.answer.shape}`,"
                f"not `{self.submission.shape}`."
            )
            if return_bool:
                return self.passed
            else:
                return None

        try:
            pd.testing.assert_frame_equal(
                self.submission,
                self.answer,
                check_like=not match_index_col_order,
                check_exact=not bool(tolerance),
                rtol=tolerance,
            )
            self.add_to_score()
            if return_bool:
                return self.passed
            else:
                return None

        except AssertionError as e:
            comment = self._clean_assert_message(e)
            self.update_comment(comment)
            if return_bool:
                return self.passed
            else:
                return None

    def grade_series(
        self,
        match_index=True,
        match_index_order=False,
        sort_values=False,
        match_names=True,
        tolerance=0.01,
        return_bool=False,
    ):
        """Evaluate submitted Series.

        Parameters
        ----------
        match_index : bool, default=True
            Whether to consider the submission's index when evaluating against
            answer.

        match_index_order : bool, default=False
            Whether to consider the submission's index order when evaluating
            against answer. If ``False``, both submission and answer are sorted
            ascending.

        sort_values : bool, default=False
            If true, submission and answer will be sorted ascending before being
            compared. This will override any option for ``match_index_order``.

        match_names : bool, default=True
            Whether to consider the submission's Series and Index names attributes.

        tolerance : int or float, default=0.01
            For numerical values, what is the maximum allowed
            difference between ``submission`` and ``answer``? If ``tolerance=0.0``,
            values must be identical. If ``tolerance=0.1``, values must be within 10%
            of each other (relative to the larger absolute value of the two).

        return_bool : bool, default=False
            Whether to return ``self.passed`` once grading is complete. You'll need
            this if you want to design your own grading workflow beyond the default.

        Examples
        --------
        >>> from grading_tools.graders import PandasGrader
        >>> import pandas as pd

        Let's create two Series: the ``ans`` and the ``sub``. The latter is in a
        different order, has a different name; its values are close to the answer but
        not exactly the same.

        >>> ans = pd.Series([10, 20, 30], name="pop", index=[1, 2, 3])
        >>> ans
        1    10
        2    20
        3    30
        Name: pop, dtype: int64
        >>> sub = pd.Series([22, 11, 33], name="wrong_name", index=[2, 1, 3])
        >>> sub
        2    22
        1    11
        3    33
        Name: wrong_name, dtype: int64

        If the Series are put into a ``PandasGrader`` and then ``grade_series`` is
        used with default arguments, the submission is evaluated as ``False``, and an
        informative comment is created.

        >>> g = PandasGrader(submission=sub, answer=ans)
        >>> g.grade_series(
        ...     match_index=True,
        ...     match_index_order=True,
        ...     match_names=True,
        ...     tolerance=0.0,
        ...     return_bool=True,
        ... )
        False
        >>> print(g.comment)
        The values in your Series don't match the expected result.

        If we add tolerance and remove requirements for index order and name
        matching, the submission is evaluated at passing.

        >>> g.grade_series(
        ...     match_index=True,
        ...     match_index_order=False,
        ...     match_names=False,
        ...     tolerance=0.1,
        ...     return_bool=True,
        ... )
        True
        >>> print(g.comment)
        Python master 😁

        """
        if not isinstance(self.submission, pd.Series):
            raise TypeError(
                f"grade_series method can only be used with Series submissions, not {type(self.submission).__name__}."
            )

        if not match_index_order:
            self.submission = self.submission.sort_index()
            self.answer = self.answer.sort_index()

        if sort_values:
            self.submission = self.submission.sort_values()
            self.answer = self.answer.sort_values()

        # Check shape
        if self.submission.shape != self.answer.shape:
            self.update_comment(
                f"The shape of your Series should be `{self.answer.shape}`,"
                f"not `{self.submission.shape}`."
            )
            if return_bool:
                return self.passed
            else:
                return None

        try:
            pd.testing.assert_series_equal(
                self.submission,
                self.answer,
                check_index=match_index,
                check_names=match_names,
                check_exact=not bool(tolerance),
                rtol=tolerance,
            )
            self.add_to_score()
            if return_bool:
                return self.passed
            else:
                return None
        except AssertionError as e:
            comment = self._clean_assert_message(e)
            self.update_comment(comment)
            if return_bool:
                return self.passed
            else:
                return None

grade_df(sort_values_by=None, match_index=True, match_index_col_order=False, match_colnames_only=False, tolerance=0.01, return_bool=False)

Evaluate submitted DataFrame.

Parameters

sort_values_by : str, list, default=None If specified, submission and answer will be sorted by that column (or list of columns) before evaluation. If "all_cols", DataFrame will be sorted by a list of all columns (ordered alphabetically).

bool, default=True

Whether or not to consider the index of the submitted DataFrame. If False, index is reset before it's evaluated.

bool, default=False

Whether or not to consider the order of the index and columns in the submitted DataFrame.

bool, default=False

If True only column names will be evaluated, not index labels or DataFrame values. Note that the index and column order will always be ignored.

int or float, default=0.01

For numerical values, what is the maximum allowed difference between submission and answer? If tolerance=0.1, values must be identical. If tolerance=0.1, values must be within 10% of each other (relative to the larger absolute value of the two).

bool, default=False

Whether to return self.passed once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.

Examples

Here are two DataFrames. The first ans_df is the expected answer, and the second sub_df is the student submission. Note that both have the same values, but order of the indices and columns is different.

import pandas as pd ans_df = pd.DataFrame( ... {"city": ["Puhi", "Napa", "Derby"], "pop": [3, 79, 13]}, index=[16, 14, 4] ... ) sub_df = pd.DataFrame( ... {"pop": [79, 3, 13], "city": ["Napa", "Puhi", "Derby"]}, index=[14, 16, 4] ... ) print(ans_df) city pop 16 Puhi 3 14 Napa 79 4 Derby 13 print(sub_df) pop city 14 79 Napa 16 3 Puhi 4 13 Derby g = PandasGrader(submission=sub_df, answer=ans_df) g.grade_df(match_index_col_order=False, return_bool=True) True g.grade_df(match_index_col_order=True, return_bool=True) False print(g.comment) DataFrame.index are different DataFrame.index values are different (66.66667 %)

Source code in grading_tools/graders.py
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
def grade_df(
    self,
    sort_values_by=None,
    match_index=True,
    match_index_col_order=False,
    match_colnames_only=False,
    tolerance=0.01,
    return_bool=False,
):
    """Evaluate submitted DataFrame.

    Parameters
    ----------
    sort_values_by : str, list, default=None
        If specified, submission and answer will be sorted by that column (or list
        of columns) before evaluation. If ``"all_cols"``, DataFrame will be sorted
        by a list of all columns (ordered alphabetically).

    match_index : bool, default=True
        Whether or not to consider the index of the submitted DataFrame. If
        ``False``, index is reset before it's evaluated.

    match_index_col_order : bool, default=False
        Whether or not to consider the order of the index and columns in the
        submitted DataFrame.

    match_colnames_only : bool, default=False
        If ``True`` only column names will be evaluated, not index labels or
        DataFrame values. Note that the index and column order will always be ignored.

    tolerance: int or float, default=0.01
        For numerical values, what is the maximum allowed
        difference between ``submission`` and ``answer``? If ``tolerance=0.1``, values must be
        identical. If ``tolerance=0.1``, values must be within 10% of each
        other (relative to the larger absolute value of the two).

    return_bool : bool, default=False
        Whether to return ``self.passed`` once grading is complete. You'll need this if
        you want to design your own grading workflow beyond the default.

    Examples
    --------
    Here are two DataFrames. The first ``ans_df`` is the expected answer, and the second
    ``sub_df`` is the student submission. Note that both have the same values, but order of the
    indices and columns is different.

    >>> import pandas as pd
    >>> ans_df = pd.DataFrame(
    ...     {"city": ["Puhi", "Napa", "Derby"], "pop": [3, 79, 13]}, index=[16, 14, 4]
    ... )
    >>> sub_df = pd.DataFrame(
    ...     {"pop": [79, 3, 13], "city": ["Napa", "Puhi", "Derby"]}, index=[14, 16, 4]
    ... )
    >>> print(ans_df)
         city  pop
    16   Puhi    3
    14   Napa   79
    4   Derby   13
    >>> print(sub_df)
        pop   city
    14   79   Napa
    16    3   Puhi
    4    13  Derby
    >>> g = PandasGrader(submission=sub_df, answer=ans_df)
    >>> g.grade_df(match_index_col_order=False, return_bool=True)
    True
    >>> g.grade_df(match_index_col_order=True, return_bool=True)
    False
    >>> print(g.comment)
    DataFrame.index are different
    DataFrame.index values are different (66.66667 %)
    [submission]:  Int64Index([14, 16, 4], dtype='int64')
    [answer]: Int64Index([16, 14, 4], dtype='int64')
    """
    if not isinstance(self.submission, pd.DataFrame):
        raise TypeError(
            f"grade_df method can only be used with DataFrames submissions, not {type(self.submission).__name__}."
        )

    if sort_values_by:
        try:
            if sort_values_by == "all_cols":
                # Sorts df cols alphabetically and then sorts rows by all cols
                self.submission = self.submission.sort_index(axis=1)
                self.submission = self.submission.sort_values(
                    by=self.submission.columns.tolist(), axis=0
                )
                self.answer = self.answer.sort_index(axis=1)
                self.answer = self.answer.sort_values(
                    by=self.answer.columns.tolist(), axis=0
                )
            else:
                self.submission.sort_values(sort_values_by, inplace=True)
                self.answer.sort_values(sort_values_by, inplace=True)
        except KeyError:
            raise KeyError(
                f"Either the submission or answer does not have column(s) {sort_values_by}."
            )

    if not match_index:
        self.submission = self.submission.reset_index(drop=True)
        self.answer = self.answer.reset_index(drop=True)

    if match_colnames_only:
        for c in self.answer.columns:
            if c not in self.submission.columns:
                self.update_comment(
                    f"Your submission is missing a `'{c}'` column."
                )
                return False if return_bool else self.return_feedback()
        self.add_to_score()
        return True if return_bool else self.return_feedback()

    # Check shape
    if self.submission.shape != self.answer.shape:
        self.update_comment(
            f"The shape of your {type(self.submission).__name__} should be `{self.answer.shape}`,"
            f"not `{self.submission.shape}`."
        )
        if return_bool:
            return self.passed
        else:
            return None

    try:
        pd.testing.assert_frame_equal(
            self.submission,
            self.answer,
            check_like=not match_index_col_order,
            check_exact=not bool(tolerance),
            rtol=tolerance,
        )
        self.add_to_score()
        if return_bool:
            return self.passed
        else:
            return None

    except AssertionError as e:
        comment = self._clean_assert_message(e)
        self.update_comment(comment)
        if return_bool:
            return self.passed
        else:
            return None

grade_series(match_index=True, match_index_order=False, sort_values=False, match_names=True, tolerance=0.01, return_bool=False)

Evaluate submitted Series.

Parameters

match_index : bool, default=True Whether to consider the submission's index when evaluating against answer.

bool, default=False

Whether to consider the submission's index order when evaluating against answer. If False, both submission and answer are sorted ascending.

bool, default=False

If true, submission and answer will be sorted ascending before being compared. This will override any option for match_index_order.

bool, default=True

Whether to consider the submission's Series and Index names attributes.

int or float, default=0.01

For numerical values, what is the maximum allowed difference between submission and answer? If tolerance=0.0, values must be identical. If tolerance=0.1, values must be within 10% of each other (relative to the larger absolute value of the two).

bool, default=False

Whether to return self.passed once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.

Examples

from grading_tools.graders import PandasGrader import pandas as pd

Let's create two Series: the ans and the sub. The latter is in a different order, has a different name; its values are close to the answer but not exactly the same.

ans = pd.Series([10, 20, 30], name="pop", index=[1, 2, 3]) ans 1 10 2 20 3 30 Name: pop, dtype: int64 sub = pd.Series([22, 11, 33], name="wrong_name", index=[2, 1, 3]) sub 2 22 1 11 3 33 Name: wrong_name, dtype: int64

If the Series are put into a PandasGrader and then grade_series is used with default arguments, the submission is evaluated as False, and an informative comment is created.

g = PandasGrader(submission=sub, answer=ans) g.grade_series( ... match_index=True, ... match_index_order=True, ... match_names=True, ... tolerance=0.0, ... return_bool=True, ... ) False print(g.comment) The values in your Series don't match the expected result.

If we add tolerance and remove requirements for index order and name matching, the submission is evaluated at passing.

g.grade_series( ... match_index=True, ... match_index_order=False, ... match_names=False, ... tolerance=0.1, ... return_bool=True, ... ) True print(g.comment) Python master 😁

Source code in grading_tools/graders.py
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
def grade_series(
    self,
    match_index=True,
    match_index_order=False,
    sort_values=False,
    match_names=True,
    tolerance=0.01,
    return_bool=False,
):
    """Evaluate submitted Series.

    Parameters
    ----------
    match_index : bool, default=True
        Whether to consider the submission's index when evaluating against
        answer.

    match_index_order : bool, default=False
        Whether to consider the submission's index order when evaluating
        against answer. If ``False``, both submission and answer are sorted
        ascending.

    sort_values : bool, default=False
        If true, submission and answer will be sorted ascending before being
        compared. This will override any option for ``match_index_order``.

    match_names : bool, default=True
        Whether to consider the submission's Series and Index names attributes.

    tolerance : int or float, default=0.01
        For numerical values, what is the maximum allowed
        difference between ``submission`` and ``answer``? If ``tolerance=0.0``,
        values must be identical. If ``tolerance=0.1``, values must be within 10%
        of each other (relative to the larger absolute value of the two).

    return_bool : bool, default=False
        Whether to return ``self.passed`` once grading is complete. You'll need
        this if you want to design your own grading workflow beyond the default.

    Examples
    --------
    >>> from grading_tools.graders import PandasGrader
    >>> import pandas as pd

    Let's create two Series: the ``ans`` and the ``sub``. The latter is in a
    different order, has a different name; its values are close to the answer but
    not exactly the same.

    >>> ans = pd.Series([10, 20, 30], name="pop", index=[1, 2, 3])
    >>> ans
    1    10
    2    20
    3    30
    Name: pop, dtype: int64
    >>> sub = pd.Series([22, 11, 33], name="wrong_name", index=[2, 1, 3])
    >>> sub
    2    22
    1    11
    3    33
    Name: wrong_name, dtype: int64

    If the Series are put into a ``PandasGrader`` and then ``grade_series`` is
    used with default arguments, the submission is evaluated as ``False``, and an
    informative comment is created.

    >>> g = PandasGrader(submission=sub, answer=ans)
    >>> g.grade_series(
    ...     match_index=True,
    ...     match_index_order=True,
    ...     match_names=True,
    ...     tolerance=0.0,
    ...     return_bool=True,
    ... )
    False
    >>> print(g.comment)
    The values in your Series don't match the expected result.

    If we add tolerance and remove requirements for index order and name
    matching, the submission is evaluated at passing.

    >>> g.grade_series(
    ...     match_index=True,
    ...     match_index_order=False,
    ...     match_names=False,
    ...     tolerance=0.1,
    ...     return_bool=True,
    ... )
    True
    >>> print(g.comment)
    Python master 😁

    """
    if not isinstance(self.submission, pd.Series):
        raise TypeError(
            f"grade_series method can only be used with Series submissions, not {type(self.submission).__name__}."
        )

    if not match_index_order:
        self.submission = self.submission.sort_index()
        self.answer = self.answer.sort_index()

    if sort_values:
        self.submission = self.submission.sort_values()
        self.answer = self.answer.sort_values()

    # Check shape
    if self.submission.shape != self.answer.shape:
        self.update_comment(
            f"The shape of your Series should be `{self.answer.shape}`,"
            f"not `{self.submission.shape}`."
        )
        if return_bool:
            return self.passed
        else:
            return None

    try:
        pd.testing.assert_series_equal(
            self.submission,
            self.answer,
            check_index=match_index,
            check_names=match_names,
            check_exact=not bool(tolerance),
            rtol=tolerance,
        )
        self.add_to_score()
        if return_bool:
            return self.passed
        else:
            return None
    except AssertionError as e:
        comment = self._clean_assert_message(e)
        self.update_comment(comment)
        if return_bool:
            return self.passed
        else:
            return None

PlotGrader

Bases: BaseGrader

Grader for evaluating images.

Source code in grading_tools/graders.py
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
class PlotGrader(BaseGrader):
    """Grader for evaluating images."""

    def __init__(self, submission, answer, points=1):
        if isinstance(submission, tempfile._TemporaryFileWrapper):
            submission = Image.open(submission)
        super().__init__(submission, answer, points)

    def grade_plot_image(
        self,
        threshold=0.0,
        return_diff=True,
        diff_path="./diff.png",
        highlight_size=50,
        return_bool=False,
    ):
        """Compare two images.

        Evaluates how similar two images are by calculating the root mean square error
        between their pixels.

        Inspired by Matplotlib's `compare_images <https://matplotlib.org/devdocs/api/testing_api.html#matplotlib.testing.compare.compare_images>`_.

        Parameters
        ----------
        threshold: float, default=0.0
            The RMSE under which the submitted image needs to score in order to still
            be considered correct.

        return_diff: bool, default=True
            Whether to generate a file highlighting difference between submission and
            answer. If ``True``, file is saved to ``diff_path``.

        diff_path: str, default="./diff.png"
            Location to which the diff image will be saved.

        highlight_size: int, default=50
            How large the overlayed highlights in the diff image should be.

        return_bool : bool, default=False
            Whether to return ``self.passed`` once grading is complete. You'll need this if
            you want to design your own grading workflow beyond the default.

        """
        # Check that submission type is correct
        if not isinstance(self.submission, PngImageFile):
            raise TypeError(
                "grade_plot_image method can only be used on `PIL.Image` objects, "
                f"not {type(self.submission).__name__} objects."
            )

        # The submission needs to be the same size as answer
        if self.submission.size != self.answer.size:
            self.update_comment(
                "Your submission plot is not the same size as the expected result."
                "Make sure your plot has the fig size specified in the instructions"
                "and that you're saving the plot with the correct `dpi`."
            )
            if return_bool:
                return self.passed
            else:
                return None

        # Turn images into B&W ndarrays
        sub_bw_array = np.asarray(self.submission.convert("1")).astype(
            np.int16
        )
        ans_bw_array = np.asarray(self.answer.convert("1")).astype(np.int16)

        # If arrays are equal, passed
        if threshold <= 0:
            if np.array_equal(sub_bw_array, ans_bw_array):
                self.add_to_score()
                if return_bool:
                    return self.passed
                else:
                    return None

        # Calculate the per-pixel errors, then compute the root mean square error.
        rmse = np.sqrt(
            ((sub_bw_array - ans_bw_array).astype(float) ** 2).mean()
        )

        # If RMSE below threshold, passed
        if rmse < threshold:
            self.add_to_score()
            if return_bool:
                return self.passed
            else:
                return None

        # If RMSE is above threshold and you want a diff image
        if rmse > threshold and return_diff:
            # If it's not below threshold, make diff image
            # Step 1: Create diff array
            diff_bw_array = np.abs(ans_bw_array - sub_bw_array)

            # Step 2. Create diff B&W highlights array
            s = np.ones((highlight_size, highlight_size))
            diff_hl_bw = ndimage.morphology.binary_dilation(
                diff_bw_array, structure=s
            ).astype(int)

            # Step 3. Create RGB version of diff
            # 3.1. Start with empty array. The `4` refers RBG + alpha channels
            diff_hl_rgb = np.zeros(list(diff_hl_bw.shape) + [4])
            # 3.2. Add red pixels
            diff_hl_rgb[:, :, 0] = diff_hl_bw * 255
            # 3.3. Add alpha
            diff_hl_rgb[:, :, -1] = 125
            # 3.4. Add white pixels
            diff_hl_rgb[
                np.where((diff_hl_rgb == [0, 0, 0, 125]).all(axis=2))
            ] = [
                255,
                255,
                255,
                0,
            ]
            diff_hl_rgb = diff_hl_rgb.astype(np.uint8)

            # Step 4: Create diff image
            diff_hl_img = Image.fromarray(diff_hl_rgb)
            self.diff = self.submission.copy()
            self.diff.paste(diff_hl_img, (0, 0), mask=diff_hl_img)

            # Step 5: Save diff image
            self.diff.save(diff_path, format="png")
            self.diff_path = diff_path

            # Step 6: Update feedback
            self.update_comment(
                "Your submission doesn't match the expected result. "
                "Check the image below to see where your plot differs from the answer."
            )

        if rmse > threshold and not return_diff:
            self.update_comment(
                "Your submission doesn't match the expected result."
            )

        if return_bool:
            return self.passed
        else:
            return None

grade_plot_image(threshold=0.0, return_diff=True, diff_path='./diff.png', highlight_size=50, return_bool=False)

Compare two images.

Evaluates how similar two images are by calculating the root mean square error between their pixels.

Inspired by Matplotlib's compare_images <https://matplotlib.org/devdocs/api/testing_api.html#matplotlib.testing.compare.compare_images>_.

Parameters

threshold: float, default=0.0 The RMSE under which the submitted image needs to score in order to still be considered correct.

bool, default=True

Whether to generate a file highlighting difference between submission and answer. If True, file is saved to diff_path.

str, default="./diff.png"

Location to which the diff image will be saved.

int, default=50

How large the overlayed highlights in the diff image should be.

bool, default=False

Whether to return self.passed once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.

Source code in grading_tools/graders.py
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
def grade_plot_image(
    self,
    threshold=0.0,
    return_diff=True,
    diff_path="./diff.png",
    highlight_size=50,
    return_bool=False,
):
    """Compare two images.

    Evaluates how similar two images are by calculating the root mean square error
    between their pixels.

    Inspired by Matplotlib's `compare_images <https://matplotlib.org/devdocs/api/testing_api.html#matplotlib.testing.compare.compare_images>`_.

    Parameters
    ----------
    threshold: float, default=0.0
        The RMSE under which the submitted image needs to score in order to still
        be considered correct.

    return_diff: bool, default=True
        Whether to generate a file highlighting difference between submission and
        answer. If ``True``, file is saved to ``diff_path``.

    diff_path: str, default="./diff.png"
        Location to which the diff image will be saved.

    highlight_size: int, default=50
        How large the overlayed highlights in the diff image should be.

    return_bool : bool, default=False
        Whether to return ``self.passed`` once grading is complete. You'll need this if
        you want to design your own grading workflow beyond the default.

    """
    # Check that submission type is correct
    if not isinstance(self.submission, PngImageFile):
        raise TypeError(
            "grade_plot_image method can only be used on `PIL.Image` objects, "
            f"not {type(self.submission).__name__} objects."
        )

    # The submission needs to be the same size as answer
    if self.submission.size != self.answer.size:
        self.update_comment(
            "Your submission plot is not the same size as the expected result."
            "Make sure your plot has the fig size specified in the instructions"
            "and that you're saving the plot with the correct `dpi`."
        )
        if return_bool:
            return self.passed
        else:
            return None

    # Turn images into B&W ndarrays
    sub_bw_array = np.asarray(self.submission.convert("1")).astype(
        np.int16
    )
    ans_bw_array = np.asarray(self.answer.convert("1")).astype(np.int16)

    # If arrays are equal, passed
    if threshold <= 0:
        if np.array_equal(sub_bw_array, ans_bw_array):
            self.add_to_score()
            if return_bool:
                return self.passed
            else:
                return None

    # Calculate the per-pixel errors, then compute the root mean square error.
    rmse = np.sqrt(
        ((sub_bw_array - ans_bw_array).astype(float) ** 2).mean()
    )

    # If RMSE below threshold, passed
    if rmse < threshold:
        self.add_to_score()
        if return_bool:
            return self.passed
        else:
            return None

    # If RMSE is above threshold and you want a diff image
    if rmse > threshold and return_diff:
        # If it's not below threshold, make diff image
        # Step 1: Create diff array
        diff_bw_array = np.abs(ans_bw_array - sub_bw_array)

        # Step 2. Create diff B&W highlights array
        s = np.ones((highlight_size, highlight_size))
        diff_hl_bw = ndimage.morphology.binary_dilation(
            diff_bw_array, structure=s
        ).astype(int)

        # Step 3. Create RGB version of diff
        # 3.1. Start with empty array. The `4` refers RBG + alpha channels
        diff_hl_rgb = np.zeros(list(diff_hl_bw.shape) + [4])
        # 3.2. Add red pixels
        diff_hl_rgb[:, :, 0] = diff_hl_bw * 255
        # 3.3. Add alpha
        diff_hl_rgb[:, :, -1] = 125
        # 3.4. Add white pixels
        diff_hl_rgb[
            np.where((diff_hl_rgb == [0, 0, 0, 125]).all(axis=2))
        ] = [
            255,
            255,
            255,
            0,
        ]
        diff_hl_rgb = diff_hl_rgb.astype(np.uint8)

        # Step 4: Create diff image
        diff_hl_img = Image.fromarray(diff_hl_rgb)
        self.diff = self.submission.copy()
        self.diff.paste(diff_hl_img, (0, 0), mask=diff_hl_img)

        # Step 5: Save diff image
        self.diff.save(diff_path, format="png")
        self.diff_path = diff_path

        # Step 6: Update feedback
        self.update_comment(
            "Your submission doesn't match the expected result. "
            "Check the image below to see where your plot differs from the answer."
        )

    if rmse > threshold and not return_diff:
        self.update_comment(
            "Your submission doesn't match the expected result."
        )

    if return_bool:
        return self.passed
    else:
        return None

PythonGrader

Bases: BaseGrader

Evaluate data types from the Python standard library.

Source code in grading_tools/graders.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
class PythonGrader(BaseGrader):
    """Evaluate data types from the Python standard library."""

    def __init__(self, submission, answer, points=1):
        super().__init__(submission, answer, points)

    def grade_list(
        self, match_order=True, tolerance=0.0, return_bool=False
    ) -> Union[bool, None]:
        """
        Evaluates a student's submitted list against the answer list.

        Args:
            match_order (bool): Determines if the items in the submission need to be in the
                same order as those in the answer. Defaults to True.
            tolerance (float): The maximum allowable difference between items in `submission`
                and `answer`. A tolerance of 0.0 requires exact matches, while a higher value
                allows for some discrepancy in numerical items, assessed relatively to the
                larger absolute value between the two using `math.isclose()`. Defaults to 0.0.
            return_bool (bool): If True, returns whether the submission passed the grading
                after evaluation is complete. Useful for custom grading workflows.
                Defaults to False.

        Returns:
            bool: The result of the evaluation, indicating whether the submission passed
                the grading criteria.

        Examples:
            Exact match required, order not important:
            >>> g = PythonGrader(submission=[1, 0], answer=[0, 1])
            >>> g.grade_list(match_order=False, tolerance=0.0, return_bool=True)
            True

            Exact numerical match required, order important:
            >>> g = PythonGrader(submission=[1.1, 2.2], answer=[1, 2])
            >>> g.grade_list(match_order=True, tolerance=0.0, return_bool=True)
            False

            Approximate numerical match allowed, order important:
            >>> g = PythonGrader(submission=[1.1, 2.2], answer=[1, 2])
            >>> g.grade_list(match_order=True, tolerance=0.1, return_bool=True)
            True
        """
        if not isinstance(self.submission, list):
            raise TypeError(
                f"grade_list method can only be used with list submissions, not {type(self.submission).__name__}."
            )

        if len(self.submission) != len(self.answer):
            self.update_comment(
                f"Your submission should have `{len(self.answer)}` items, not `{len(self.submission)}`."
            )
            return

        if match_order is False:
            # For dealing with records
            if isinstance(self.submission[0], dict):
                sort_key = list(self.submission[0].keys())[0]
                self.submission.sort(key=lambda x: x[sort_key])
                self.answer.sort(key=lambda x: x[sort_key])
            else:
                self.submission.sort()
                self.answer.sort()

        if not tolerance and self.submission == self.answer:
            self.add_to_score()

        # If tolerance is included and list items match dtypes with each other
        if tolerance:
            for a, b in zip(self.submission, self.answer):
                # Check numbers using relative tolerance
                if (
                    isinstance(a, (float, int))
                    and isinstance(b, (float, int))
                    and isclose(a, b, rel_tol=tolerance)
                ):
                    pass
                # Check strings using Levenshtein distance
                elif (
                    (isinstance(a, str))
                    and (isinstance(b, str))
                    and (1 - fuzz.ratio(a, b) / 100 < tolerance)
                ):
                    pass
                # Return fail at first mismatch
                else:
                    self.update_comment(
                        f"Your list contains the item `{a}`, which doesn't match the expected result."
                    )
                    if return_bool:
                        return self.passed
                    else:
                        return
            self.add_to_score()

        if return_bool:
            return self.passed
        else:
            return

    def grade_dict(self, tolerance=0.0, return_bool=False):
        """Evaluates whether the submitted dictionary matches the expected answer.

        This function checks if each value in the `submission` dictionary matches its
        corresponding value in the `answer` dictionary within a specified tolerance.
        This is particularly useful for submissions containing floating-point numbers.
        The function uses `math.isclose()` for comparing numerical values.

        Args:
            submission (dict): The dictionary submitted by the student.
            answer (dict): The correct answer dictionary.
            tolerance (float, optional): The maximum allowed difference between values
                in the `submission` and `answer`. For example, a tolerance of 0.1 means
                values must be within 10% of each other, relative to the larger of the
                two values. Defaults to 0.0.
            return_bool (bool, optional): If True, returns `self.passed` after grading
                is complete. This is useful for custom grading workflows. Defaults to False.

        Examples:
            Check exact match between dictionaries:
            >>> g = PythonGrader(submission={"a": 1, "b": 2}, answer={"a": 1, "b": 2})
            >>> g.grade_dict(tolerance=0.0, return_bool=True)
            True

            Allow for approximate value matches:
            >>> g = PythonGrader(submission={"a": 1, "b": 2.2}, answer={"a": 1, "b": 2})
            >>> g.grade_dict(tolerance=0.1, return_bool=True)
            True

            Handle unmatched keys:
            >>> g = PythonGrader(submission={"a": 1, "z": 2}, answer={"a": 1, "b": 2})
            >>> g.grade_dict(tolerance=0.0, return_bool=False)
            >>> print(g.comment)
            One or more keys in your dictionary doesn't match the expected result.

            Handle value mismatches:
            >>> g = PythonGrader(submission={"a": 1, "b": 2.2}, answer={"a": 1, "b": 2})
            >>> g.grade_dict(tolerance=0.0, return_bool=False)
            >>> print(g.comment)
            The value for key `b` doesn't match the expected result.
        """
        if not isinstance(self.submission, dict):
            raise TypeError(
                f"grade_dict method can only be used with dict submissions, not {type(self.submission).__name__}."
            )

        # Exact match, give point and done
        if self.submission == self.answer:
            self.add_to_score()
            if return_bool:
                return self.passed
            else:
                return

        # Is it the keys that don't match?
        if self.submission.keys() != self.answer.keys():
            self.update_comment(
                "One or more of the keys in your dictionary doesn't match the expected result."
            )
            if return_bool:
                return self.passed
            else:
                return

        # If keys match, iteratate through keys and check values
        for k in self.submission.keys():
            # Flag set to True when vals don't match or not w/in tolerance
            break_flag = False
            sub = self.submission[k]
            ans = self.answer[k]
            sub_is_num = isinstance(sub, (int, float))
            key_val_comment = f"The value for the key `{k}` doesn't match the expected result."

            # For numerical values
            if sub_is_num and sub != ans:
                if isnan(sub) and isnan(ans):
                    self.passed = True
                elif (tolerance > 0) and isclose(sub, ans, rel_tol=tolerance):
                    # This will continue to be True as long as all vals are w/in tolerance
                    self.passed = True
                else:
                    self.update_comment(key_val_comment)
                    self.passed = False
                    break_flag = True

            # For non-numerical values
            if not sub_is_num and sub != ans:
                self.update_comment(key_val_comment)
                self.passed = False
                break_flag = True

            if break_flag:
                break

        # If submission got through loop with self.passed==True, all vals are w/in tolerance
        if self.passed:
            self.add_to_score()

        if return_bool:
            return self.passed
        else:
            return

    def grade_number(
        self, tolerance=0, return_bool=False
    ) -> Union[bool, None]:
        """Evaluates whether the submitted number matches the expected answer.

        This function determines if the submitted number (integer or float) is within a
        specified tolerance of the expected answer. It is suitable for grading numerical
        responses where exact matches may not always be feasible due to precision issues
        in floating-point representations.

        Args:
            submission (int or float): The number submitted by the student.
            answer (int or float): The correct answer.
            tolerance (int or float, optional): The maximum allowed difference between
                the `submission` and `answer`. For example, a tolerance of 0.1 allows
                the values to be within 10% of each other, relative to the larger absolute
                value of the two. Defaults to 0.0.
            return_bool (bool, optional): If True, returns `self.passed` after grading is
                complete. Useful for custom grading workflows. Defaults to False.
        """
        if not isclose(self.submission, self.answer, rel_tol=tolerance):
            self.update_comment(
                f"Your submission `{self.submission}` doesn't match the expected result."
            )
            return False if return_bool else self.return_feedback()

        self.add_to_score()
        return True if return_bool else self.return_feedback()

    def grade_bool(self, return_bool=False) -> Union[bool, None]:
        """Evaluate student's submitted number (int or float).

        Parameters:
            return_bool (bool, default=False): Whether to return `self.passed` once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.
        """
        if self.submission != self.answer:
            self.update_comment(
                f"Your submission `{self.submission}` doesn't match the expected result."
            )
            return False if return_bool else self.return_feedback()

        self.add_to_score()
        return True if return_bool else self.return_feedback()

    def grade_string(
        self,
        ignore_case: bool = False,
        contains: Union[str, list, None] = None,
        return_bool: bool = False,
    ) -> Union[bool, None]:
        """Evaluates whether the submitted string matches the expected answer.

        By default, this function checks for an exact match between the submission and the
        answer. If the `contains` parameter is specified, the function will check if the
        submission includes the specified term or terms.

        Args:
            submission (str): The string submitted by the student.
            answer (str): The correct answer.
            ignore_case (bool, optional): If True, the case will be ignored when evaluating
                the submission. Defaults to False.
            contains (str or list, optional): If None, an exact match is required. If a str,
                only that substring must be present in the submission. If a list, all specified
                items must be present. Defaults to None.
            return_bool (bool, optional): If True, returns `self.passed` once grading is
                complete. Useful for custom grading workflows. Defaults to False.

        Examples:
            Check for an exact match:
            >>> g = PythonGrader(submission="Hello world", answer="Hello world")
            >>> g.grade_string(ignore_case=False, return_bool=True)
            True

            Check for a substring, ignoring case:
            >>> g = PythonGrader(submission="Hello world", answer="")
            >>> g.grade_string(contains="WORLD", ignore_case=True, return_bool=True)
            True

            Check for multiple substrings:
            >>> g = PythonGrader(submission="Hello world", answer="")
            >>> g.grade_string(contains=["Hello", "monkey"], ignore_case=False, return_bool=True)
            False
            >>> print(g.comment)
            Your submission is missing an important term: 'monkey'.
        """
        if not isinstance(self.submission, str):
            raise TypeError(
                f"grade_string method can only be used with str submissions, not {type(self.submission).__name__}."
            )

        if ignore_case:
            self.submission = self.submission.lower()
            self.answer = self.answer.lower()
            if isinstance(contains, str):
                contains = contains.lower()
            if isinstance(contains, Iterable):
                contains = [str(s).lower() for s in list(contains)]

        if (not contains) and (self.submission != self.answer):
            self.update_comment(
                "Your submission doesn't match the expected result"
            )
            return False if return_bool else self.return_feedback()

        if (isinstance(contains, str)) and contains not in self.submission:
            self.update_comment(
                f"Your submission is missing an important term: `'{contains}'`."
            )
            return False if return_bool else self.return_feedback()

        if isinstance(contains, Iterable):
            for s in list(contains):
                if str(s) not in self.submission:
                    self.update_comment(
                        f"Your submission is missing an important term: `'{s}'`."
                    )
                    return False if return_bool else self.return_feedback()

        self.add_to_score()
        return True if return_bool else self.return_feedback()

grade_bool(return_bool=False)

Evaluate student's submitted number (int or float).

Parameters:

Name Type Description Default
return_bool bool, default=False

Whether to return self.passed once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.

False
Source code in grading_tools/graders.py
425
426
427
428
429
430
431
432
433
434
435
436
437
438
def grade_bool(self, return_bool=False) -> Union[bool, None]:
    """Evaluate student's submitted number (int or float).

    Parameters:
        return_bool (bool, default=False): Whether to return `self.passed` once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.
    """
    if self.submission != self.answer:
        self.update_comment(
            f"Your submission `{self.submission}` doesn't match the expected result."
        )
        return False if return_bool else self.return_feedback()

    self.add_to_score()
    return True if return_bool else self.return_feedback()

grade_dict(tolerance=0.0, return_bool=False)

Evaluates whether the submitted dictionary matches the expected answer.

This function checks if each value in the submission dictionary matches its corresponding value in the answer dictionary within a specified tolerance. This is particularly useful for submissions containing floating-point numbers. The function uses math.isclose() for comparing numerical values.

Parameters:

Name Type Description Default
submission dict

The dictionary submitted by the student.

required
answer dict

The correct answer dictionary.

required
tolerance float

The maximum allowed difference between values in the submission and answer. For example, a tolerance of 0.1 means values must be within 10% of each other, relative to the larger of the two values. Defaults to 0.0.

0.0
return_bool bool

If True, returns self.passed after grading is complete. This is useful for custom grading workflows. Defaults to False.

False

Examples:

Check exact match between dictionaries:

>>> g = PythonGrader(submission={"a": 1, "b": 2}, answer={"a": 1, "b": 2})
>>> g.grade_dict(tolerance=0.0, return_bool=True)
True

Allow for approximate value matches:

>>> g = PythonGrader(submission={"a": 1, "b": 2.2}, answer={"a": 1, "b": 2})
>>> g.grade_dict(tolerance=0.1, return_bool=True)
True

Handle unmatched keys:

>>> g = PythonGrader(submission={"a": 1, "z": 2}, answer={"a": 1, "b": 2})
>>> g.grade_dict(tolerance=0.0, return_bool=False)
>>> print(g.comment)
One or more keys in your dictionary doesn't match the expected result.

Handle value mismatches:

>>> g = PythonGrader(submission={"a": 1, "b": 2.2}, answer={"a": 1, "b": 2})
>>> g.grade_dict(tolerance=0.0, return_bool=False)
>>> print(g.comment)
The value for key `b` doesn't match the expected result.
Source code in grading_tools/graders.py
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
def grade_dict(self, tolerance=0.0, return_bool=False):
    """Evaluates whether the submitted dictionary matches the expected answer.

    This function checks if each value in the `submission` dictionary matches its
    corresponding value in the `answer` dictionary within a specified tolerance.
    This is particularly useful for submissions containing floating-point numbers.
    The function uses `math.isclose()` for comparing numerical values.

    Args:
        submission (dict): The dictionary submitted by the student.
        answer (dict): The correct answer dictionary.
        tolerance (float, optional): The maximum allowed difference between values
            in the `submission` and `answer`. For example, a tolerance of 0.1 means
            values must be within 10% of each other, relative to the larger of the
            two values. Defaults to 0.0.
        return_bool (bool, optional): If True, returns `self.passed` after grading
            is complete. This is useful for custom grading workflows. Defaults to False.

    Examples:
        Check exact match between dictionaries:
        >>> g = PythonGrader(submission={"a": 1, "b": 2}, answer={"a": 1, "b": 2})
        >>> g.grade_dict(tolerance=0.0, return_bool=True)
        True

        Allow for approximate value matches:
        >>> g = PythonGrader(submission={"a": 1, "b": 2.2}, answer={"a": 1, "b": 2})
        >>> g.grade_dict(tolerance=0.1, return_bool=True)
        True

        Handle unmatched keys:
        >>> g = PythonGrader(submission={"a": 1, "z": 2}, answer={"a": 1, "b": 2})
        >>> g.grade_dict(tolerance=0.0, return_bool=False)
        >>> print(g.comment)
        One or more keys in your dictionary doesn't match the expected result.

        Handle value mismatches:
        >>> g = PythonGrader(submission={"a": 1, "b": 2.2}, answer={"a": 1, "b": 2})
        >>> g.grade_dict(tolerance=0.0, return_bool=False)
        >>> print(g.comment)
        The value for key `b` doesn't match the expected result.
    """
    if not isinstance(self.submission, dict):
        raise TypeError(
            f"grade_dict method can only be used with dict submissions, not {type(self.submission).__name__}."
        )

    # Exact match, give point and done
    if self.submission == self.answer:
        self.add_to_score()
        if return_bool:
            return self.passed
        else:
            return

    # Is it the keys that don't match?
    if self.submission.keys() != self.answer.keys():
        self.update_comment(
            "One or more of the keys in your dictionary doesn't match the expected result."
        )
        if return_bool:
            return self.passed
        else:
            return

    # If keys match, iteratate through keys and check values
    for k in self.submission.keys():
        # Flag set to True when vals don't match or not w/in tolerance
        break_flag = False
        sub = self.submission[k]
        ans = self.answer[k]
        sub_is_num = isinstance(sub, (int, float))
        key_val_comment = f"The value for the key `{k}` doesn't match the expected result."

        # For numerical values
        if sub_is_num and sub != ans:
            if isnan(sub) and isnan(ans):
                self.passed = True
            elif (tolerance > 0) and isclose(sub, ans, rel_tol=tolerance):
                # This will continue to be True as long as all vals are w/in tolerance
                self.passed = True
            else:
                self.update_comment(key_val_comment)
                self.passed = False
                break_flag = True

        # For non-numerical values
        if not sub_is_num and sub != ans:
            self.update_comment(key_val_comment)
            self.passed = False
            break_flag = True

        if break_flag:
            break

    # If submission got through loop with self.passed==True, all vals are w/in tolerance
    if self.passed:
        self.add_to_score()

    if return_bool:
        return self.passed
    else:
        return

grade_list(match_order=True, tolerance=0.0, return_bool=False)

Evaluates a student's submitted list against the answer list.

Parameters:

Name Type Description Default
match_order bool

Determines if the items in the submission need to be in the same order as those in the answer. Defaults to True.

True
tolerance float

The maximum allowable difference between items in submission and answer. A tolerance of 0.0 requires exact matches, while a higher value allows for some discrepancy in numerical items, assessed relatively to the larger absolute value between the two using math.isclose(). Defaults to 0.0.

0.0
return_bool bool

If True, returns whether the submission passed the grading after evaluation is complete. Useful for custom grading workflows. Defaults to False.

False

Returns:

Name Type Description
bool Union[bool, None]

The result of the evaluation, indicating whether the submission passed the grading criteria.

Examples:

Exact match required, order not important:

>>> g = PythonGrader(submission=[1, 0], answer=[0, 1])
>>> g.grade_list(match_order=False, tolerance=0.0, return_bool=True)
True

Exact numerical match required, order important:

>>> g = PythonGrader(submission=[1.1, 2.2], answer=[1, 2])
>>> g.grade_list(match_order=True, tolerance=0.0, return_bool=True)
False

Approximate numerical match allowed, order important:

>>> g = PythonGrader(submission=[1.1, 2.2], answer=[1, 2])
>>> g.grade_list(match_order=True, tolerance=0.1, return_bool=True)
True
Source code in grading_tools/graders.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
def grade_list(
    self, match_order=True, tolerance=0.0, return_bool=False
) -> Union[bool, None]:
    """
    Evaluates a student's submitted list against the answer list.

    Args:
        match_order (bool): Determines if the items in the submission need to be in the
            same order as those in the answer. Defaults to True.
        tolerance (float): The maximum allowable difference between items in `submission`
            and `answer`. A tolerance of 0.0 requires exact matches, while a higher value
            allows for some discrepancy in numerical items, assessed relatively to the
            larger absolute value between the two using `math.isclose()`. Defaults to 0.0.
        return_bool (bool): If True, returns whether the submission passed the grading
            after evaluation is complete. Useful for custom grading workflows.
            Defaults to False.

    Returns:
        bool: The result of the evaluation, indicating whether the submission passed
            the grading criteria.

    Examples:
        Exact match required, order not important:
        >>> g = PythonGrader(submission=[1, 0], answer=[0, 1])
        >>> g.grade_list(match_order=False, tolerance=0.0, return_bool=True)
        True

        Exact numerical match required, order important:
        >>> g = PythonGrader(submission=[1.1, 2.2], answer=[1, 2])
        >>> g.grade_list(match_order=True, tolerance=0.0, return_bool=True)
        False

        Approximate numerical match allowed, order important:
        >>> g = PythonGrader(submission=[1.1, 2.2], answer=[1, 2])
        >>> g.grade_list(match_order=True, tolerance=0.1, return_bool=True)
        True
    """
    if not isinstance(self.submission, list):
        raise TypeError(
            f"grade_list method can only be used with list submissions, not {type(self.submission).__name__}."
        )

    if len(self.submission) != len(self.answer):
        self.update_comment(
            f"Your submission should have `{len(self.answer)}` items, not `{len(self.submission)}`."
        )
        return

    if match_order is False:
        # For dealing with records
        if isinstance(self.submission[0], dict):
            sort_key = list(self.submission[0].keys())[0]
            self.submission.sort(key=lambda x: x[sort_key])
            self.answer.sort(key=lambda x: x[sort_key])
        else:
            self.submission.sort()
            self.answer.sort()

    if not tolerance and self.submission == self.answer:
        self.add_to_score()

    # If tolerance is included and list items match dtypes with each other
    if tolerance:
        for a, b in zip(self.submission, self.answer):
            # Check numbers using relative tolerance
            if (
                isinstance(a, (float, int))
                and isinstance(b, (float, int))
                and isclose(a, b, rel_tol=tolerance)
            ):
                pass
            # Check strings using Levenshtein distance
            elif (
                (isinstance(a, str))
                and (isinstance(b, str))
                and (1 - fuzz.ratio(a, b) / 100 < tolerance)
            ):
                pass
            # Return fail at first mismatch
            else:
                self.update_comment(
                    f"Your list contains the item `{a}`, which doesn't match the expected result."
                )
                if return_bool:
                    return self.passed
                else:
                    return
        self.add_to_score()

    if return_bool:
        return self.passed
    else:
        return

grade_number(tolerance=0, return_bool=False)

Evaluates whether the submitted number matches the expected answer.

This function determines if the submitted number (integer or float) is within a specified tolerance of the expected answer. It is suitable for grading numerical responses where exact matches may not always be feasible due to precision issues in floating-point representations.

Parameters:

Name Type Description Default
submission int or float

The number submitted by the student.

required
answer int or float

The correct answer.

required
tolerance int or float

The maximum allowed difference between the submission and answer. For example, a tolerance of 0.1 allows the values to be within 10% of each other, relative to the larger absolute value of the two. Defaults to 0.0.

0
return_bool bool

If True, returns self.passed after grading is complete. Useful for custom grading workflows. Defaults to False.

False
Source code in grading_tools/graders.py
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
def grade_number(
    self, tolerance=0, return_bool=False
) -> Union[bool, None]:
    """Evaluates whether the submitted number matches the expected answer.

    This function determines if the submitted number (integer or float) is within a
    specified tolerance of the expected answer. It is suitable for grading numerical
    responses where exact matches may not always be feasible due to precision issues
    in floating-point representations.

    Args:
        submission (int or float): The number submitted by the student.
        answer (int or float): The correct answer.
        tolerance (int or float, optional): The maximum allowed difference between
            the `submission` and `answer`. For example, a tolerance of 0.1 allows
            the values to be within 10% of each other, relative to the larger absolute
            value of the two. Defaults to 0.0.
        return_bool (bool, optional): If True, returns `self.passed` after grading is
            complete. Useful for custom grading workflows. Defaults to False.
    """
    if not isclose(self.submission, self.answer, rel_tol=tolerance):
        self.update_comment(
            f"Your submission `{self.submission}` doesn't match the expected result."
        )
        return False if return_bool else self.return_feedback()

    self.add_to_score()
    return True if return_bool else self.return_feedback()

grade_string(ignore_case=False, contains=None, return_bool=False)

Evaluates whether the submitted string matches the expected answer.

By default, this function checks for an exact match between the submission and the answer. If the contains parameter is specified, the function will check if the submission includes the specified term or terms.

Parameters:

Name Type Description Default
submission str

The string submitted by the student.

required
answer str

The correct answer.

required
ignore_case bool

If True, the case will be ignored when evaluating the submission. Defaults to False.

False
contains str or list

If None, an exact match is required. If a str, only that substring must be present in the submission. If a list, all specified items must be present. Defaults to None.

None
return_bool bool

If True, returns self.passed once grading is complete. Useful for custom grading workflows. Defaults to False.

False

Examples:

Check for an exact match:

>>> g = PythonGrader(submission="Hello world", answer="Hello world")
>>> g.grade_string(ignore_case=False, return_bool=True)
True

Check for a substring, ignoring case:

>>> g = PythonGrader(submission="Hello world", answer="")
>>> g.grade_string(contains="WORLD", ignore_case=True, return_bool=True)
True

Check for multiple substrings:

>>> g = PythonGrader(submission="Hello world", answer="")
>>> g.grade_string(contains=["Hello", "monkey"], ignore_case=False, return_bool=True)
False
>>> print(g.comment)
Your submission is missing an important term: 'monkey'.
Source code in grading_tools/graders.py
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
def grade_string(
    self,
    ignore_case: bool = False,
    contains: Union[str, list, None] = None,
    return_bool: bool = False,
) -> Union[bool, None]:
    """Evaluates whether the submitted string matches the expected answer.

    By default, this function checks for an exact match between the submission and the
    answer. If the `contains` parameter is specified, the function will check if the
    submission includes the specified term or terms.

    Args:
        submission (str): The string submitted by the student.
        answer (str): The correct answer.
        ignore_case (bool, optional): If True, the case will be ignored when evaluating
            the submission. Defaults to False.
        contains (str or list, optional): If None, an exact match is required. If a str,
            only that substring must be present in the submission. If a list, all specified
            items must be present. Defaults to None.
        return_bool (bool, optional): If True, returns `self.passed` once grading is
            complete. Useful for custom grading workflows. Defaults to False.

    Examples:
        Check for an exact match:
        >>> g = PythonGrader(submission="Hello world", answer="Hello world")
        >>> g.grade_string(ignore_case=False, return_bool=True)
        True

        Check for a substring, ignoring case:
        >>> g = PythonGrader(submission="Hello world", answer="")
        >>> g.grade_string(contains="WORLD", ignore_case=True, return_bool=True)
        True

        Check for multiple substrings:
        >>> g = PythonGrader(submission="Hello world", answer="")
        >>> g.grade_string(contains=["Hello", "monkey"], ignore_case=False, return_bool=True)
        False
        >>> print(g.comment)
        Your submission is missing an important term: 'monkey'.
    """
    if not isinstance(self.submission, str):
        raise TypeError(
            f"grade_string method can only be used with str submissions, not {type(self.submission).__name__}."
        )

    if ignore_case:
        self.submission = self.submission.lower()
        self.answer = self.answer.lower()
        if isinstance(contains, str):
            contains = contains.lower()
        if isinstance(contains, Iterable):
            contains = [str(s).lower() for s in list(contains)]

    if (not contains) and (self.submission != self.answer):
        self.update_comment(
            "Your submission doesn't match the expected result"
        )
        return False if return_bool else self.return_feedback()

    if (isinstance(contains, str)) and contains not in self.submission:
        self.update_comment(
            f"Your submission is missing an important term: `'{contains}'`."
        )
        return False if return_bool else self.return_feedback()

    if isinstance(contains, Iterable):
        for s in list(contains):
            if str(s) not in self.submission:
                self.update_comment(
                    f"Your submission is missing an important term: `'{s}'`."
                )
                return False if return_bool else self.return_feedback()

    self.add_to_score()
    return True if return_bool else self.return_feedback()

SklearnGrader

Bases: BaseGrader

Grader for evaluating objects from sckit-learn <https://scikit-learn.org/stable/>_.

Source code in grading_tools/graders.py
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
class SklearnGrader(BaseGrader):
    """Grader for evaluating objects from `sckit-learn <https://scikit-learn.org/stable/>`_."""

    def __init__(self, submission, answer, points=1):
        super().__init__(submission, answer, points)

    def grade_model_params(
        self,
        match_steps=False,
        match_hyperparameters=False,
        prune_hyperparameters=False,
        match_fitted=True,
        tolerance=0.0,
        return_bool=False,
    ):
        """Evaluate model parameters.

        Parameters
        ----------
        match_steps : bool, default=False
            For models that are type `sklearn.pipeline.Pipeline <https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html>`_.
            Whether to consider the steps of a Pipeline when evaluating submission.

        match_hyperparameters : bool, default=False
            Whether to consider the hyper parameter values when evaluating submission.

        prune_hyperparameters : bool, default=False
            When you pull hyperparameters out a a model, it can be a seriously messy
            dictionary, especially if there's some sort of encoder mapping. So when you
            set ``match_hyperparameters`` to ``True``, save yourself a headache and set
            this to ``True``, too.

        match_fitted : bool, default=True
            Whether to consider if the submission has or has not been fitted to
            training data.

        tolerance : int or float, default=0.0
            For numerical hyperparameter values, what is the maximum allowed
            difference between ``submission`` and ``answer``? If ``tolerance=0.0``,
            values must be identical. If ``tolerance=0.1``, values must be within 10%
            of each other (relative to the larger absolute value of the two).

        return_bool : bool, default=False
            Whether to return ``self.passed`` once grading is complete. You'll need this if
            you want to design your own grading workflow beyond the default.

        Examples
        --------
        Let's create two linear models that use different scalers. We'll then fit only
        the answer model to the California housing dataset.

        >>> from sklearn.datasets import fetch_california_housing
        >>> from sklearn.linear_model import LinearRegression
        >>> from sklearn.pipeline import make_pipeline
        >>> from sklearn.preprocessing import MinMaxScaler, StandardScaler
        >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True)
        >>> sub_model = make_pipeline(MinMaxScaler(), LinearRegression())
        >>> ans_model = make_pipeline(StandardScaler(), LinearRegression())
        >>> ans_model.fit(X, y)
        Pipeline(steps=[('standardscaler', StandardScaler()),
                        ('linearregression', LinearRegression())])

        Next, we'll grade the submission.

        >>> from grading_tools.graders import SklearnGrader
        >>> g = SklearnGrader(sub_model, ans_model)
        >>> g.grade_model_params(return_bool=True)
        False
        >>> g.comment
        "Your model hasn't been trained. Fit it to the training data and resubmit it."

        If we train and re-grade the model, it passes.

        >>> sub_model.fit(X, y)
        Pipeline(steps=[('minmaxscaler', MinMaxScaler()),
                        ('linearregression', LinearRegression())])
        >>> g.grade_model_params(return_bool=True)
        True
        >>> g.comment
        'Good work!'

        Finally, if we re-grade the model, requiring that the steps match,
        the submission fails.

        >>> g.grade_model_params(match_steps=True, return_bool=True)
        False
        >>> g.comment
        "Step 1 in your model Pipeline doesn't match the expected result.
        Expected: `StandardScaler`. Received: `MinMaxScaler`."
        """
        # Is the answer model fitted?
        try:
            check_is_fitted(self.answer)
            ans_fitted = True
        except NotFittedError:
            ans_fitted = False

        if match_fitted and not ans_fitted:
            raise NotFittedError(
                "`match_fitted` cannot be set to `True` if answer model is not fitted."
            )

        # Do we need to check if the submission is fitted?
        if match_fitted and ans_fitted:
            try:
                check_is_fitted(self.submission)
            except NotFittedError:
                self.update_comment(
                    "Your model hasn't been trained. Fit it to the training data and resubmit it."
                )
                if return_bool:
                    return False
                else:
                    return

        # Is the model a pipeline (rather than just an estimator)?
        if isinstance(self.answer, Pipeline):
            is_pipeline = True
        else:
            is_pipeline = False

        if match_steps and not is_pipeline:
            raise ValueError(
                f"`match_steps` can only be `True` when answer Pipeline, not {type(self.answer).__name__}."
            )
        # Checking steps in pipeline models
        if match_steps and is_pipeline:
            sub_steps = [s for s in self.submission]
            ans_steps = [s for s in self.answer]

            # Wrong number of steps
            if len(sub_steps) != len(ans_steps):
                self.update_comment(
                    f"Your model Pipeline should have {len(ans_steps)} steps,",
                    f"not {len(sub_steps)}.",
                )
                if return_bool:
                    return False
                else:
                    return None

            # Wrong type of steps
            for idx, (sub, ans) in enumerate(
                zip(sub_steps, ans_steps), start=1
            ):
                if not isinstance(sub, type(ans)):
                    self.update_comment(
                        f"Step {idx} in your model Pipeline doesn't match the expected",
                        f"result. Expected: `{type(ans).__name__}`. Received:",
                        f"`{type(sub).__name__}`.",
                    )
                    if return_bool:
                        return False
                    else:
                        return None

        if match_hyperparameters:
            sub_params = self.submission.get_params()
            ans_params = self.answer.get_params()

            if prune_hyperparameters:
                sub_params = {
                    k: v
                    for k, v in sub_params.items()
                    if isinstance(v, (str, bool, float, int))
                }
                ans_params = {
                    k: v
                    for k, v in ans_params.items()
                    if isinstance(v, (str, bool, float, int))
                }

            g = PythonGrader(submission=sub_params, answer=ans_params)
            if not g.grade_dict(
                tolerance=tolerance,
                return_bool=True,
            ):
                self.update_comment(g.comment.replace("key", "hyperparameter"))
                if return_bool:
                    return False
                else:
                    return None

        self.add_to_score()
        if return_bool:
            return True
        else:
            return None

    def grade_model_performance(
        self,
        X_test: pd.DataFrame,
        y_test: pd.Series,
        metric: str,
        round_to=3,
        tolerance=0.0,
        return_bool=False,
    ):
        """Evaluate model's performance using the model itself.

        Parameters
        ----------
        X_test: pd.DataFrame
            The test data feature matrix.

        y_test: pd.Series
            The test data target vector.

        metric: str {"accuracy_score", "precision_score", "recall_score", "f1_score", "r2_score", "mean_absolute_error", "mean_squared_error"}
            Metric to use when evaluating model performance.

        round_to: int, default=3
            Number of decimal places to round metric to before comparing
            submission and answer model performance.

        tolerance : int or float, default=0.0
            What is the maximum allowed difference between submission and answer
            model performance. If ``tolerance=0.0``, submission and answer metrics
            must be identical. If ``tolerance=0.1``, values must be within 10%
            of each other (relative to the larger absolute value of the two).

        return_bool : bool, default=False
            Whether to return ``self.passed`` once grading is complete. You'll need this if
            you want to design your own grading workflow beyond the default.

        Examples
        --------
        Let's start by creating a dataset, and splitting it into train and test.

        >>> from sklearn.datasets import fetch_california_housing
        >>> from sklearn.dummy import DummyRegressor
        >>> from sklearn.model_selection import train_test_split
        >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True)
        >>> X_train, X_test, y_train, y_test = train_test_split(
        ...     X, y, test_size=0.2, random_state=42
        ... )

        Next, we create an answer and submission model, and put them into a grader.

        >>> ans = DummyRegressor(strategy="constant", constant=2).fit(X_train, y_train)
        >>> sub = DummyRegressor(strategy="constant", constant=1).fit(X_train, y_train)
        >>> g = SklearnGrader(sub, ans)

        Finally, we grade model performance looking at MSE.

        >>> g.grade_model_performance(
        ...     X_test=X_test,
        ...     y_test=y_test,
        ...     metric="mean_absolute_error",
        ...     return_bool=True,
        ... )
        False
        >>> print(g.comment)
        Your model's mean absolute error is `1.141`. You can do better. Try to
        beat `0.893`.

        If we allow for some tolerance, the model will pass.

        >>> g.grade_model_performance(
        ...     X_test=X_test,
        ...     y_test=y_test,
        ...     metric="mean_absolute_error",
        ...     tolerance=0.5,
        ...     return_bool=True,
        ... )
        True
        >>> print(g.comment)
        Your model's mean absolute error is `1.141`. Very impressive.

        """
        if not isinstance(X_test, pd.DataFrame):
            raise AttributeError(
                f"X_test must be a DataFrame, not {type(X_test)}."
            )

        if not isinstance(y_test, pd.Series):
            raise AttributeError(
                f"y_test must be a Series, not {type(y_test)}."
            )
        # Whether submission model outperforms answer
        sub_beats_ans = False

        # Grouping metrics into scores (higher is better) or errors (lower is better)
        score_metrics = [
            "accuracy_score",
            "precision_score",
            "recall_score",
            "f1_score",
            "r2_score",
        ]
        error_metrics = ["mean_absolute_error", "mean_squared_error"]

        if metric not in score_metrics + error_metrics:
            raise ValueError(
                f"'{metric}' is not a valid argument for `metric`. "
                f"Your options are {score_metrics + error_metrics}."
            )

        try:
            check_is_fitted(self.submission)
        except NotFittedError:
            self.update_comment(
                "In order to evaluate your model, it needs to be fitted to the "
                "training data first."
            )
            if return_bool:
                return False
            else:
                return None

        # Check that you use regression metrics for regression models
        if (metric in error_metrics + ["r2_score"]) and not is_regressor(
            self.submission
        ):
            raise ValueError(
                f"The metric {metric} can only be used to evaluate regression models."
            )

        # Check that you use classification metrics for classification models
        if metric in score_metrics[:-1] and not is_classifier(self.submission):
            raise ValueError(
                f"The metric {metric} can only be used to evaluate classification "
                "models."
            )

        # Generate submission and answer model predictions
        y_pred_sub = self.submission.predict(X_test)
        y_pred_ans = self.answer.predict(X_test)

        # Calculate training metrics for submission and answer
        if metric == "mean_absolute_error":
            sub_score = round(
                mean_absolute_error(y_test, y_pred_sub), round_to
            )
            ans_score = round(
                mean_absolute_error(y_test, y_pred_ans), round_to
            )

        if metric == "mean_squared_error":
            sub_score = round(mean_squared_error(y_test, y_pred_sub), round_to)
            ans_score = round(mean_squared_error(y_test, y_pred_ans), round_to)

        if metric == "r2_score":
            sub_score = round(r2_score(y_test, y_pred_sub), round_to)
            ans_score = round(r2_score(y_test, y_pred_ans), round_to)

        if metric == "accuracy_score":
            sub_score = round(accuracy_score(y_test, y_pred_sub), round_to)
            ans_score = round(accuracy_score(y_test, y_pred_ans), round_to)

        if metric == "precision_score":
            sub_score = round(precision_score(y_test, y_pred_sub), round_to)
            ans_score = round(precision_score(y_test, y_pred_ans), round_to)

        if metric == "recall_score":
            sub_score = round(recall_score(y_test, y_pred_sub), round_to)
            ans_score = round(recall_score(y_test, y_pred_ans), round_to)

        if metric == "f1_score":
            sub_score = round(f1_score(y_test, y_pred_sub), round_to)
            ans_score = round(f1_score(y_test, y_pred_ans), round_to)

        # Determine if submission beats answer
        if metric in error_metrics:
            # With error, smaller is better
            sub_beats_ans = (sub_score < ans_score) or isclose(
                sub_score, ans_score, rel_tol=tolerance
            )

        if metric in score_metrics:
            # With score, bigger is better
            sub_beats_ans = (sub_score > ans_score) or isclose(
                sub_score, ans_score, rel_tol=tolerance
            )

        # Update grader score and comment
        metric_verbose = metric.replace("_", " ")
        if sub_beats_ans:
            self.add_to_score()
            self.comment = (
                f"Your model's {metric_verbose} is `{sub_score}`. "
                + self.comment
            )

        else:
            self.update_comment(
                f"Your model's {metric_verbose} is `{sub_score}`. You can do better. "
                f"Try to beat `{ans_score}`."
            )

        if return_bool:
            return self.passed
        else:
            return None

    def grade_model_predictions(
        self,
        metric: str,
        threshold: float,
        round_to=3,
        tolerance=0.0,
        return_bool=False,
    ):
        """Evaluate model's performance using the predicitons itself.

        Submission and answer must be ``pd.Series``.

        Parameters
        ----------
        metric: str {"accuracy_score", "precision_score", "recall_score", "f1_score", "r2_score", "mean_absolute_error", "mean_squared_error"}
            Metric to use when evaluating predicitons.

        threshold: float
            Score that predictions must beat.

        round_to: int, default=3
            Number of decimal places to round metric to before comparing
            predictions score to threshold.

        tolerance : int or float, default=0.0
            What is the maximum allowed difference between prediction score
            and threshold. If ``tolerance=0.0``, predictions must beat threshold.
            If ``tolerance=0.1``, values must be within 10% of each other (relative to
            the larger absolute value of the two).

        Examples
        --------
        Let's start by creating a dataset.

        >>> from sklearn.datasets import fetch_california_housing
        >>> from sklearn.dummy import DummyRegressor
        >>> from sklearn.model_selection import train_test_split
        >>> import pandas as pd
        >>> from grading_tools.graders import SklearnGrader
        >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True)
        >>> X_train, X_test, y_train, y_test = train_test_split(
        ...     X, y, test_size=0.2, random_state=42
        ... )

        Next, we'll train a model and generate a Series of predictions

        >>> model = DummyRegressor(strategy="constant", constant=2).fit(X_train, y_train)
        >>> y_pred = pd.Series(model.predict(X_test))

        Finally, we'll put the predictions and true values in a grader.

        >>> g = SklearnGrader(y_pred, y_test)

        The submission will pass as long as the metric score beats the threshold.

        >>> g.grade_model_predictions(
        ... metric="mean_absolute_error",
        ... threshold=0.9,
        ... return_bool=True,
        ... )
        >>> print(g.comment)
        Your model's mean absolute error is `0.893`. Boom! You got it.

        """
        # Whether submission model outperforms answer
        sub_beats_threshold = False

        # Grouping metrics into scores (higher is better) or errors (lower is better)
        score_metrics = [
            "accuracy_score",
            "precision_score",
            "recall_score",
            "f1_score",
            "r2_score",
        ]
        error_metrics = ["mean_absolute_error", "mean_squared_error"]

        if not isinstance(self.submission, pd.Series):
            raise TypeError(
                f"grade_model_predictions can only be used if submission is Series, "
                f"not {type(self.submission).__name__}."
            )

        if metric not in score_metrics + error_metrics:
            raise ValueError(
                f"'{metric}' is not a valid argument for `metric`. "
                f"Your options are {score_metrics + error_metrics}."
            )

        # Student's answer doesn't has wrong number of predicitions
        if len(self.submission) != len(self.answer):
            self.update_comment(
                f"Your submission should have length {len(self.answer)},"
                f"not {len(self.submission)}."
            )
            if return_bool:
                return False
            else:
                return None

        # Calculate training metrics for submission and answer
        if metric == "mean_absolute_error":
            sub_score = round(
                mean_absolute_error(self.answer, self.submission), round_to
            )

        if metric == "mean_squared_error":
            sub_score = round(
                mean_squared_error(self.answer, self.submission), round_to
            )

        if metric == "r2_score":
            sub_score = round(r2_score(self.answer, self.submission), round_to)

        if metric == "accuracy_score":
            sub_score = round(
                accuracy_score(self.answer, self.submission), round_to
            )

        if metric == "precision_score":
            sub_score = round(
                precision_score(self.answer, self.submission), round_to
            )

        if metric == "recall_score":
            sub_score = round(
                recall_score(self.answer, self.submission), round_to
            )

        if metric == "f1_score":
            sub_score = round(f1_score(self.answer, self.submission), round_to)

        # Determine if submission beats answer
        if metric in error_metrics:
            # With error, smaller is better
            sub_beats_threshold = (sub_score < threshold) or isclose(
                sub_score, threshold, rel_tol=tolerance
            )

        if metric in score_metrics:
            # With score, bigger is better
            sub_beats_threshold = (sub_score > threshold) or isclose(
                sub_score, threshold, rel_tol=tolerance
            )

        # Update grader score and comment
        metric_verbose = metric.replace("_", " ")
        if sub_beats_threshold:
            self.add_to_score()
            self.comment = (
                f"Your model's {metric_verbose} is `{sub_score}`. "
                + self.comment
            )

        else:
            self.update_comment(
                f"Your model's {metric_verbose} is `{sub_score}`. You can do better. "
                f"Try to beat `{threshold}`."
            )

        if return_bool:
            return self.passed
        else:
            return None

grade_model_params(match_steps=False, match_hyperparameters=False, prune_hyperparameters=False, match_fitted=True, tolerance=0.0, return_bool=False)

Evaluate model parameters.

Parameters

match_steps : bool, default=False For models that are type sklearn.pipeline.Pipeline <https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html>_. Whether to consider the steps of a Pipeline when evaluating submission.

bool, default=False

Whether to consider the hyper parameter values when evaluating submission.

bool, default=False

When you pull hyperparameters out a a model, it can be a seriously messy dictionary, especially if there's some sort of encoder mapping. So when you set match_hyperparameters to True, save yourself a headache and set this to True, too.

bool, default=True

Whether to consider if the submission has or has not been fitted to training data.

int or float, default=0.0

For numerical hyperparameter values, what is the maximum allowed difference between submission and answer? If tolerance=0.0, values must be identical. If tolerance=0.1, values must be within 10% of each other (relative to the larger absolute value of the two).

bool, default=False

Whether to return self.passed once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.

Examples

Let's create two linear models that use different scalers. We'll then fit only the answer model to the California housing dataset.

from sklearn.datasets import fetch_california_housing from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import MinMaxScaler, StandardScaler X, y = fetch_california_housing(return_X_y=True, as_frame=True) sub_model = make_pipeline(MinMaxScaler(), LinearRegression()) ans_model = make_pipeline(StandardScaler(), LinearRegression()) ans_model.fit(X, y) Pipeline(steps=[('standardscaler', StandardScaler()), ('linearregression', LinearRegression())])

Next, we'll grade the submission.

from grading_tools.graders import SklearnGrader g = SklearnGrader(sub_model, ans_model) g.grade_model_params(return_bool=True) False g.comment "Your model hasn't been trained. Fit it to the training data and resubmit it."

If we train and re-grade the model, it passes.

sub_model.fit(X, y) Pipeline(steps=[('minmaxscaler', MinMaxScaler()), ('linearregression', LinearRegression())]) g.grade_model_params(return_bool=True) True g.comment 'Good work!'

Finally, if we re-grade the model, requiring that the steps match, the submission fails.

g.grade_model_params(match_steps=True, return_bool=True) False g.comment "Step 1 in your model Pipeline doesn't match the expected result. Expected: StandardScaler. Received: MinMaxScaler."

Source code in grading_tools/graders.py
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
def grade_model_params(
    self,
    match_steps=False,
    match_hyperparameters=False,
    prune_hyperparameters=False,
    match_fitted=True,
    tolerance=0.0,
    return_bool=False,
):
    """Evaluate model parameters.

    Parameters
    ----------
    match_steps : bool, default=False
        For models that are type `sklearn.pipeline.Pipeline <https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html>`_.
        Whether to consider the steps of a Pipeline when evaluating submission.

    match_hyperparameters : bool, default=False
        Whether to consider the hyper parameter values when evaluating submission.

    prune_hyperparameters : bool, default=False
        When you pull hyperparameters out a a model, it can be a seriously messy
        dictionary, especially if there's some sort of encoder mapping. So when you
        set ``match_hyperparameters`` to ``True``, save yourself a headache and set
        this to ``True``, too.

    match_fitted : bool, default=True
        Whether to consider if the submission has or has not been fitted to
        training data.

    tolerance : int or float, default=0.0
        For numerical hyperparameter values, what is the maximum allowed
        difference between ``submission`` and ``answer``? If ``tolerance=0.0``,
        values must be identical. If ``tolerance=0.1``, values must be within 10%
        of each other (relative to the larger absolute value of the two).

    return_bool : bool, default=False
        Whether to return ``self.passed`` once grading is complete. You'll need this if
        you want to design your own grading workflow beyond the default.

    Examples
    --------
    Let's create two linear models that use different scalers. We'll then fit only
    the answer model to the California housing dataset.

    >>> from sklearn.datasets import fetch_california_housing
    >>> from sklearn.linear_model import LinearRegression
    >>> from sklearn.pipeline import make_pipeline
    >>> from sklearn.preprocessing import MinMaxScaler, StandardScaler
    >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True)
    >>> sub_model = make_pipeline(MinMaxScaler(), LinearRegression())
    >>> ans_model = make_pipeline(StandardScaler(), LinearRegression())
    >>> ans_model.fit(X, y)
    Pipeline(steps=[('standardscaler', StandardScaler()),
                    ('linearregression', LinearRegression())])

    Next, we'll grade the submission.

    >>> from grading_tools.graders import SklearnGrader
    >>> g = SklearnGrader(sub_model, ans_model)
    >>> g.grade_model_params(return_bool=True)
    False
    >>> g.comment
    "Your model hasn't been trained. Fit it to the training data and resubmit it."

    If we train and re-grade the model, it passes.

    >>> sub_model.fit(X, y)
    Pipeline(steps=[('minmaxscaler', MinMaxScaler()),
                    ('linearregression', LinearRegression())])
    >>> g.grade_model_params(return_bool=True)
    True
    >>> g.comment
    'Good work!'

    Finally, if we re-grade the model, requiring that the steps match,
    the submission fails.

    >>> g.grade_model_params(match_steps=True, return_bool=True)
    False
    >>> g.comment
    "Step 1 in your model Pipeline doesn't match the expected result.
    Expected: `StandardScaler`. Received: `MinMaxScaler`."
    """
    # Is the answer model fitted?
    try:
        check_is_fitted(self.answer)
        ans_fitted = True
    except NotFittedError:
        ans_fitted = False

    if match_fitted and not ans_fitted:
        raise NotFittedError(
            "`match_fitted` cannot be set to `True` if answer model is not fitted."
        )

    # Do we need to check if the submission is fitted?
    if match_fitted and ans_fitted:
        try:
            check_is_fitted(self.submission)
        except NotFittedError:
            self.update_comment(
                "Your model hasn't been trained. Fit it to the training data and resubmit it."
            )
            if return_bool:
                return False
            else:
                return

    # Is the model a pipeline (rather than just an estimator)?
    if isinstance(self.answer, Pipeline):
        is_pipeline = True
    else:
        is_pipeline = False

    if match_steps and not is_pipeline:
        raise ValueError(
            f"`match_steps` can only be `True` when answer Pipeline, not {type(self.answer).__name__}."
        )
    # Checking steps in pipeline models
    if match_steps and is_pipeline:
        sub_steps = [s for s in self.submission]
        ans_steps = [s for s in self.answer]

        # Wrong number of steps
        if len(sub_steps) != len(ans_steps):
            self.update_comment(
                f"Your model Pipeline should have {len(ans_steps)} steps,",
                f"not {len(sub_steps)}.",
            )
            if return_bool:
                return False
            else:
                return None

        # Wrong type of steps
        for idx, (sub, ans) in enumerate(
            zip(sub_steps, ans_steps), start=1
        ):
            if not isinstance(sub, type(ans)):
                self.update_comment(
                    f"Step {idx} in your model Pipeline doesn't match the expected",
                    f"result. Expected: `{type(ans).__name__}`. Received:",
                    f"`{type(sub).__name__}`.",
                )
                if return_bool:
                    return False
                else:
                    return None

    if match_hyperparameters:
        sub_params = self.submission.get_params()
        ans_params = self.answer.get_params()

        if prune_hyperparameters:
            sub_params = {
                k: v
                for k, v in sub_params.items()
                if isinstance(v, (str, bool, float, int))
            }
            ans_params = {
                k: v
                for k, v in ans_params.items()
                if isinstance(v, (str, bool, float, int))
            }

        g = PythonGrader(submission=sub_params, answer=ans_params)
        if not g.grade_dict(
            tolerance=tolerance,
            return_bool=True,
        ):
            self.update_comment(g.comment.replace("key", "hyperparameter"))
            if return_bool:
                return False
            else:
                return None

    self.add_to_score()
    if return_bool:
        return True
    else:
        return None

grade_model_performance(X_test, y_test, metric, round_to=3, tolerance=0.0, return_bool=False)

Evaluate model's performance using the model itself.

Parameters

X_test: pd.DataFrame The test data feature matrix.

pd.Series

The test data target vector.

str {"accuracy_score", "precision_score", "recall_score", "f1_score", "r2_score", "mean_absolute_error", "mean_squared_error"}

Metric to use when evaluating model performance.

int, default=3

Number of decimal places to round metric to before comparing submission and answer model performance.

int or float, default=0.0

What is the maximum allowed difference between submission and answer model performance. If tolerance=0.0, submission and answer metrics must be identical. If tolerance=0.1, values must be within 10% of each other (relative to the larger absolute value of the two).

bool, default=False

Whether to return self.passed once grading is complete. You'll need this if you want to design your own grading workflow beyond the default.

Examples

Let's start by creating a dataset, and splitting it into train and test.

from sklearn.datasets import fetch_california_housing from sklearn.dummy import DummyRegressor from sklearn.model_selection import train_test_split X, y = fetch_california_housing(return_X_y=True, as_frame=True) X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.2, random_state=42 ... )

Next, we create an answer and submission model, and put them into a grader.

ans = DummyRegressor(strategy="constant", constant=2).fit(X_train, y_train) sub = DummyRegressor(strategy="constant", constant=1).fit(X_train, y_train) g = SklearnGrader(sub, ans)

Finally, we grade model performance looking at MSE.

g.grade_model_performance( ... X_test=X_test, ... y_test=y_test, ... metric="mean_absolute_error", ... return_bool=True, ... ) False print(g.comment) Your model's mean absolute error is 1.141. You can do better. Try to beat 0.893.

If we allow for some tolerance, the model will pass.

g.grade_model_performance( ... X_test=X_test, ... y_test=y_test, ... metric="mean_absolute_error", ... tolerance=0.5, ... return_bool=True, ... ) True print(g.comment) Your model's mean absolute error is 1.141. Very impressive.

Source code in grading_tools/graders.py
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
def grade_model_performance(
    self,
    X_test: pd.DataFrame,
    y_test: pd.Series,
    metric: str,
    round_to=3,
    tolerance=0.0,
    return_bool=False,
):
    """Evaluate model's performance using the model itself.

    Parameters
    ----------
    X_test: pd.DataFrame
        The test data feature matrix.

    y_test: pd.Series
        The test data target vector.

    metric: str {"accuracy_score", "precision_score", "recall_score", "f1_score", "r2_score", "mean_absolute_error", "mean_squared_error"}
        Metric to use when evaluating model performance.

    round_to: int, default=3
        Number of decimal places to round metric to before comparing
        submission and answer model performance.

    tolerance : int or float, default=0.0
        What is the maximum allowed difference between submission and answer
        model performance. If ``tolerance=0.0``, submission and answer metrics
        must be identical. If ``tolerance=0.1``, values must be within 10%
        of each other (relative to the larger absolute value of the two).

    return_bool : bool, default=False
        Whether to return ``self.passed`` once grading is complete. You'll need this if
        you want to design your own grading workflow beyond the default.

    Examples
    --------
    Let's start by creating a dataset, and splitting it into train and test.

    >>> from sklearn.datasets import fetch_california_housing
    >>> from sklearn.dummy import DummyRegressor
    >>> from sklearn.model_selection import train_test_split
    >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True)
    >>> X_train, X_test, y_train, y_test = train_test_split(
    ...     X, y, test_size=0.2, random_state=42
    ... )

    Next, we create an answer and submission model, and put them into a grader.

    >>> ans = DummyRegressor(strategy="constant", constant=2).fit(X_train, y_train)
    >>> sub = DummyRegressor(strategy="constant", constant=1).fit(X_train, y_train)
    >>> g = SklearnGrader(sub, ans)

    Finally, we grade model performance looking at MSE.

    >>> g.grade_model_performance(
    ...     X_test=X_test,
    ...     y_test=y_test,
    ...     metric="mean_absolute_error",
    ...     return_bool=True,
    ... )
    False
    >>> print(g.comment)
    Your model's mean absolute error is `1.141`. You can do better. Try to
    beat `0.893`.

    If we allow for some tolerance, the model will pass.

    >>> g.grade_model_performance(
    ...     X_test=X_test,
    ...     y_test=y_test,
    ...     metric="mean_absolute_error",
    ...     tolerance=0.5,
    ...     return_bool=True,
    ... )
    True
    >>> print(g.comment)
    Your model's mean absolute error is `1.141`. Very impressive.

    """
    if not isinstance(X_test, pd.DataFrame):
        raise AttributeError(
            f"X_test must be a DataFrame, not {type(X_test)}."
        )

    if not isinstance(y_test, pd.Series):
        raise AttributeError(
            f"y_test must be a Series, not {type(y_test)}."
        )
    # Whether submission model outperforms answer
    sub_beats_ans = False

    # Grouping metrics into scores (higher is better) or errors (lower is better)
    score_metrics = [
        "accuracy_score",
        "precision_score",
        "recall_score",
        "f1_score",
        "r2_score",
    ]
    error_metrics = ["mean_absolute_error", "mean_squared_error"]

    if metric not in score_metrics + error_metrics:
        raise ValueError(
            f"'{metric}' is not a valid argument for `metric`. "
            f"Your options are {score_metrics + error_metrics}."
        )

    try:
        check_is_fitted(self.submission)
    except NotFittedError:
        self.update_comment(
            "In order to evaluate your model, it needs to be fitted to the "
            "training data first."
        )
        if return_bool:
            return False
        else:
            return None

    # Check that you use regression metrics for regression models
    if (metric in error_metrics + ["r2_score"]) and not is_regressor(
        self.submission
    ):
        raise ValueError(
            f"The metric {metric} can only be used to evaluate regression models."
        )

    # Check that you use classification metrics for classification models
    if metric in score_metrics[:-1] and not is_classifier(self.submission):
        raise ValueError(
            f"The metric {metric} can only be used to evaluate classification "
            "models."
        )

    # Generate submission and answer model predictions
    y_pred_sub = self.submission.predict(X_test)
    y_pred_ans = self.answer.predict(X_test)

    # Calculate training metrics for submission and answer
    if metric == "mean_absolute_error":
        sub_score = round(
            mean_absolute_error(y_test, y_pred_sub), round_to
        )
        ans_score = round(
            mean_absolute_error(y_test, y_pred_ans), round_to
        )

    if metric == "mean_squared_error":
        sub_score = round(mean_squared_error(y_test, y_pred_sub), round_to)
        ans_score = round(mean_squared_error(y_test, y_pred_ans), round_to)

    if metric == "r2_score":
        sub_score = round(r2_score(y_test, y_pred_sub), round_to)
        ans_score = round(r2_score(y_test, y_pred_ans), round_to)

    if metric == "accuracy_score":
        sub_score = round(accuracy_score(y_test, y_pred_sub), round_to)
        ans_score = round(accuracy_score(y_test, y_pred_ans), round_to)

    if metric == "precision_score":
        sub_score = round(precision_score(y_test, y_pred_sub), round_to)
        ans_score = round(precision_score(y_test, y_pred_ans), round_to)

    if metric == "recall_score":
        sub_score = round(recall_score(y_test, y_pred_sub), round_to)
        ans_score = round(recall_score(y_test, y_pred_ans), round_to)

    if metric == "f1_score":
        sub_score = round(f1_score(y_test, y_pred_sub), round_to)
        ans_score = round(f1_score(y_test, y_pred_ans), round_to)

    # Determine if submission beats answer
    if metric in error_metrics:
        # With error, smaller is better
        sub_beats_ans = (sub_score < ans_score) or isclose(
            sub_score, ans_score, rel_tol=tolerance
        )

    if metric in score_metrics:
        # With score, bigger is better
        sub_beats_ans = (sub_score > ans_score) or isclose(
            sub_score, ans_score, rel_tol=tolerance
        )

    # Update grader score and comment
    metric_verbose = metric.replace("_", " ")
    if sub_beats_ans:
        self.add_to_score()
        self.comment = (
            f"Your model's {metric_verbose} is `{sub_score}`. "
            + self.comment
        )

    else:
        self.update_comment(
            f"Your model's {metric_verbose} is `{sub_score}`. You can do better. "
            f"Try to beat `{ans_score}`."
        )

    if return_bool:
        return self.passed
    else:
        return None

grade_model_predictions(metric, threshold, round_to=3, tolerance=0.0, return_bool=False)

Evaluate model's performance using the predicitons itself.

Submission and answer must be pd.Series.

Parameters

metric: str {"accuracy_score", "precision_score", "recall_score", "f1_score", "r2_score", "mean_absolute_error", "mean_squared_error"} Metric to use when evaluating predicitons.

float

Score that predictions must beat.

int, default=3

Number of decimal places to round metric to before comparing predictions score to threshold.

int or float, default=0.0

What is the maximum allowed difference between prediction score and threshold. If tolerance=0.0, predictions must beat threshold. If tolerance=0.1, values must be within 10% of each other (relative to the larger absolute value of the two).

Examples

Let's start by creating a dataset.

from sklearn.datasets import fetch_california_housing from sklearn.dummy import DummyRegressor from sklearn.model_selection import train_test_split import pandas as pd from grading_tools.graders import SklearnGrader X, y = fetch_california_housing(return_X_y=True, as_frame=True) X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.2, random_state=42 ... )

Next, we'll train a model and generate a Series of predictions

model = DummyRegressor(strategy="constant", constant=2).fit(X_train, y_train) y_pred = pd.Series(model.predict(X_test))

Finally, we'll put the predictions and true values in a grader.

g = SklearnGrader(y_pred, y_test)

The submission will pass as long as the metric score beats the threshold.

g.grade_model_predictions( ... metric="mean_absolute_error", ... threshold=0.9, ... return_bool=True, ... ) print(g.comment) Your model's mean absolute error is 0.893. Boom! You got it.

Source code in grading_tools/graders.py
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
def grade_model_predictions(
    self,
    metric: str,
    threshold: float,
    round_to=3,
    tolerance=0.0,
    return_bool=False,
):
    """Evaluate model's performance using the predicitons itself.

    Submission and answer must be ``pd.Series``.

    Parameters
    ----------
    metric: str {"accuracy_score", "precision_score", "recall_score", "f1_score", "r2_score", "mean_absolute_error", "mean_squared_error"}
        Metric to use when evaluating predicitons.

    threshold: float
        Score that predictions must beat.

    round_to: int, default=3
        Number of decimal places to round metric to before comparing
        predictions score to threshold.

    tolerance : int or float, default=0.0
        What is the maximum allowed difference between prediction score
        and threshold. If ``tolerance=0.0``, predictions must beat threshold.
        If ``tolerance=0.1``, values must be within 10% of each other (relative to
        the larger absolute value of the two).

    Examples
    --------
    Let's start by creating a dataset.

    >>> from sklearn.datasets import fetch_california_housing
    >>> from sklearn.dummy import DummyRegressor
    >>> from sklearn.model_selection import train_test_split
    >>> import pandas as pd
    >>> from grading_tools.graders import SklearnGrader
    >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True)
    >>> X_train, X_test, y_train, y_test = train_test_split(
    ...     X, y, test_size=0.2, random_state=42
    ... )

    Next, we'll train a model and generate a Series of predictions

    >>> model = DummyRegressor(strategy="constant", constant=2).fit(X_train, y_train)
    >>> y_pred = pd.Series(model.predict(X_test))

    Finally, we'll put the predictions and true values in a grader.

    >>> g = SklearnGrader(y_pred, y_test)

    The submission will pass as long as the metric score beats the threshold.

    >>> g.grade_model_predictions(
    ... metric="mean_absolute_error",
    ... threshold=0.9,
    ... return_bool=True,
    ... )
    >>> print(g.comment)
    Your model's mean absolute error is `0.893`. Boom! You got it.

    """
    # Whether submission model outperforms answer
    sub_beats_threshold = False

    # Grouping metrics into scores (higher is better) or errors (lower is better)
    score_metrics = [
        "accuracy_score",
        "precision_score",
        "recall_score",
        "f1_score",
        "r2_score",
    ]
    error_metrics = ["mean_absolute_error", "mean_squared_error"]

    if not isinstance(self.submission, pd.Series):
        raise TypeError(
            f"grade_model_predictions can only be used if submission is Series, "
            f"not {type(self.submission).__name__}."
        )

    if metric not in score_metrics + error_metrics:
        raise ValueError(
            f"'{metric}' is not a valid argument for `metric`. "
            f"Your options are {score_metrics + error_metrics}."
        )

    # Student's answer doesn't has wrong number of predicitions
    if len(self.submission) != len(self.answer):
        self.update_comment(
            f"Your submission should have length {len(self.answer)},"
            f"not {len(self.submission)}."
        )
        if return_bool:
            return False
        else:
            return None

    # Calculate training metrics for submission and answer
    if metric == "mean_absolute_error":
        sub_score = round(
            mean_absolute_error(self.answer, self.submission), round_to
        )

    if metric == "mean_squared_error":
        sub_score = round(
            mean_squared_error(self.answer, self.submission), round_to
        )

    if metric == "r2_score":
        sub_score = round(r2_score(self.answer, self.submission), round_to)

    if metric == "accuracy_score":
        sub_score = round(
            accuracy_score(self.answer, self.submission), round_to
        )

    if metric == "precision_score":
        sub_score = round(
            precision_score(self.answer, self.submission), round_to
        )

    if metric == "recall_score":
        sub_score = round(
            recall_score(self.answer, self.submission), round_to
        )

    if metric == "f1_score":
        sub_score = round(f1_score(self.answer, self.submission), round_to)

    # Determine if submission beats answer
    if metric in error_metrics:
        # With error, smaller is better
        sub_beats_threshold = (sub_score < threshold) or isclose(
            sub_score, threshold, rel_tol=tolerance
        )

    if metric in score_metrics:
        # With score, bigger is better
        sub_beats_threshold = (sub_score > threshold) or isclose(
            sub_score, threshold, rel_tol=tolerance
        )

    # Update grader score and comment
    metric_verbose = metric.replace("_", " ")
    if sub_beats_threshold:
        self.add_to_score()
        self.comment = (
            f"Your model's {metric_verbose} is `{sub_score}`. "
            + self.comment
        )

    else:
        self.update_comment(
            f"Your model's {metric_verbose} is `{sub_score}`. You can do better. "
            f"Try to beat `{threshold}`."
        )

    if return_bool:
        return self.passed
    else:
        return None