How to use nbgrader - 10 common examples

To help you get started, we’ve selected a few nbgrader examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jupyter / nbgrader / nbgrader / preprocessors / clearsolutions.py View on Github external
import re

from traitlets import Dict, Unicode, Bool, observe
from traitlets.config.loader import Config
from textwrap import dedent

from .. import utils
from . import NbGraderPreprocessor
from typing import Any, Tuple
from nbformat.notebooknode import NotebookNode
from nbconvert.exporters.exporter import ResourcesDict


class ClearSolutions(NbGraderPreprocessor):

    code_stub = Dict(
        dict(python="# YOUR CODE HERE\nraise NotImplementedError()",
             matlab="% YOUR CODE HERE\nerror('No Answer Given!')",
             octave="% YOUR CODE HERE\nerror('No Answer Given!')",
             java="// YOUR CODE HERE"),
        help="The code snippet that will replace code solutions"
    ).tag(config=True)

    text_stub = Unicode(
        "YOUR ANSWER HERE",
        help="The text snippet that will replace written solutions"
    ).tag(config=True)

    begin_solution_delimeter = Unicode(
        "BEGIN SOLUTION",
github jupyter / nbgrader / testSub.py View on Github external
cd=coursedir.CourseDirectory(root='/home/daniel/Teaching/L2python')

api=NbGraderAPI(cd)

api.exchange='/home/daniel/Teaching/L2python/exchange'
#print (api.get_submissions('a_a'))

notebook_id='lessEmpty'
assignment_id='lessEmpty'

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("code_score"),
            func.sum(GradeCell.max_score).label("max_code_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "code")\
         .group_by(SubmittedNotebook.id)\
         .all()

print (res)

import sys
#sys.exit()

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("written_score"),
            func.sum(GradeCell.max_score).label("max_written_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "markdown")\
         .group_by(SubmittedNotebook.id)\
github jupyter / nbgrader / testSub.py View on Github external
from sqlalchemy.orm import aliased
from sqlalchemy.sql import  and_

cd=coursedir.CourseDirectory(root='/home/daniel/Teaching/L2python')

api=NbGraderAPI(cd)

api.exchange='/home/daniel/Teaching/L2python/exchange'
#print (api.get_submissions('a_a'))

notebook_id='lessEmpty'
assignment_id='lessEmpty'

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("code_score"),
            func.sum(GradeCell.max_score).label("max_code_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "code")\
         .group_by(SubmittedNotebook.id)\
         .all()

print (res)

import sys
#sys.exit()

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("written_score"),
            func.sum(GradeCell.max_score).label("max_written_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
github jupyter / nbgrader / testSub.py View on Github external
cd=coursedir.CourseDirectory(root='/home/daniel/Teaching/L2python')

api=NbGraderAPI(cd)

api.exchange='/home/daniel/Teaching/L2python/exchange'
#print (api.get_submissions('a_a'))

notebook_id='lessEmpty'
assignment_id='lessEmpty'

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("code_score"),
            func.sum(GradeCell.max_score).label("max_code_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "code")\
         .group_by(SubmittedNotebook.id)\
         .all()

print (res)

import sys
#sys.exit()

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("written_score"),
            func.sum(GradeCell.max_score).label("max_written_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "markdown")\
         .group_by(SubmittedNotebook.id)\
         .all()
github jupyter / nbgrader / testSub.py View on Github external
from sqlalchemy.sql import  and_

cd=coursedir.CourseDirectory(root='/home/daniel/Teaching/L2python')

api=NbGraderAPI(cd)

api.exchange='/home/daniel/Teaching/L2python/exchange'
#print (api.get_submissions('a_a'))

notebook_id='lessEmpty'
assignment_id='lessEmpty'

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("code_score"),
            func.sum(GradeCell.max_score).label("max_code_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "code")\
         .group_by(SubmittedNotebook.id)\
         .all()

print (res)

import sys
#sys.exit()

res = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("written_score"),
            func.sum(GradeCell.max_score).label("max_written_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "markdown")\
github jupyter / nbgrader / nbgrader / preprocessors / clearhiddentests.py View on Github external
def preprocess_cell(self,
                        cell: NotebookNode,
                        resources: ResourcesDict,
                        cell_index: int
                        ) -> Tuple[NotebookNode, ResourcesDict]:
        # remove hidden test regions
        removed_test = self._remove_hidden_test_region(cell)

        # determine whether the cell is a grade cell
        is_grade = utils.is_grade(cell)

        # check that it is marked as a grade cell if we remove a test
        # region -- if it's not, then this is a problem, because the cell needs
        # to be given an id
        if not is_grade and removed_test:
            if self.enforce_metadata:
                raise RuntimeError(
                    "Hidden test region detected in a non-grade cell; "
                    "please make sure all solution regions are within "
                    "'Autograder tests' cells."
                )

        return cell, resources
github jupyter / nbgrader / testSub.py View on Github external
# subquery for the written scores
written_scores = api.gradebook.db.query(
            SubmittedNotebook.id,
            func.sum(Grade.score).label("score"),
            func.sum(GradeCell.max_score).label("max_written_score"),
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
         .filter(GradeCell.cell_type == "markdown")\
         .group_by(SubmittedNotebook.id)\
         .subquery()

task_scores = api.gradebook.db.query(
            SubmittedAssignment.id,
            func.sum(Grade.score).label("score"),
            func.sum(TaskCell.max_score).label("max_task_score"),
        ).join(SubmittedNotebook, Notebook, Assignment, Student, Grade, TaskCell)\
         .filter(TaskCell.cell_type == "markdown")\
         .group_by(SubmittedAssignment.id)\
         .subquery()

        # subquery for needing manual grading
manual_grade = api.gradebook.db.query(
            SubmittedNotebook.id,
            exists().where(Grade.needs_manual_grade).label("needs_manual_grade")
        ).join(SubmittedAssignment, Assignment, Notebook)\
         .filter(
             Grade.notebook_id == SubmittedNotebook.id,
             Grade.needs_manual_grade)\
         .group_by(SubmittedNotebook.id)\
         .subquery()

        # subquery for failed tests
github jupyter / nbgrader / testSub.py View on Github external
written_scores.c.score, written_scores.c.max_written_score,
            task_scores.c.score, task_scores.c.max_task_score,
            _manual_grade, _failed_tests, SubmittedNotebook.flagged
        ).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, BaseCell)\
         .outerjoin(code_scores, SubmittedNotebook.id == code_scores.c.id)\
         .outerjoin(written_scores, SubmittedNotebook.id == written_scores.c.id)\
         .outerjoin(task_scores, SubmittedNotebook.id == task_scores.c.id)\
         .outerjoin(manual_grade, SubmittedNotebook.id == manual_grade.c.id)\
         .outerjoin(failed_tests, SubmittedNotebook.id == failed_tests.c.id)\
         .filter(and_(
             Notebook.name == notebook_id,
             Assignment.name == assignment_id,
             Student.id == SubmittedAssignment.student_id,
             SubmittedAssignment.id == SubmittedNotebook.assignment_id,
             SubmittedNotebook.id == Grade.notebook_id,
             GradeCell.id == Grade.cell_id,
             TaskCell.id == Grade.cell_id))\
         .group_by(
             SubmittedNotebook.id, Notebook.name,
             Student.id, Student.first_name, Student.last_name,
             code_scores.c.score, code_scores.c.max_code_score,
             written_scores.c.score, written_scores.c.max_written_score,
             task_scores.c.score, task_scores.c.max_task_score,
             _manual_grade, _failed_tests, SubmittedNotebook.flagged)\
         .all()
print (submissions)

#print (api.gradebook.notebook_submission_dicts(notebook_id, assignment_id))

#submissions = api.gradebook.db.query(
#            SubmittedNotebook.id, Notebook.name,
#            func.sum(Grade.score), func.sum(GradeCell.max_score),
github jupyter / nbgrader / nbgrader / preprocessors / saveautogrades.py View on Github external
manually grade written solutions anyway. This function adds
        score information to the database if it doesn't exist. It does
        NOT override the 'score' field, as this is the manual score
        that might have been provided by a grader.

        """
        # these are the fields by which we will identify the score
        # information
        grade = self.gradebook.find_grade(
            cell.metadata['nbgrader']['grade_id'],
            self.notebook_id,
            self.assignment_id,
            self.student_id)

        # determine what the grade is
        auto_score, _ = utils.determine_grade(cell, self.log)
        grade.auto_score = auto_score

        # if there was previously a manual grade, or if there is no autograder
        # score, then we should mark this as needing review
        if (grade.manual_score is not None) or (grade.auto_score is None):
            grade.needs_manual_grade = True
        else:
            grade.needs_manual_grade = False

        self.gradebook.db.commit()
github jupyter / nbgrader / nbgrader / preprocessors / saveautogrades.py View on Github external
def _add_comment(self, cell: NotebookNode, resources: ResourcesDict) -> None:
        comment = self.gradebook.find_comment(
            cell.metadata['nbgrader']['grade_id'],
            self.notebook_id,
            self.assignment_id,
            self.student_id)
        if cell.metadata.nbgrader.get("checksum", None) == utils.compute_checksum(cell) and not utils.is_task(cell):
            comment.auto_comment = "No response."
        else:
            comment.auto_comment = None

        self.gradebook.db.commit()