How to use the nbformat.v4.new_notebook function in nbformat

To help you get started, we’ve selected a few nbformat examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uw-cmg / MAST-ML / tests / tests.py View on Github external
main = textwrap.dedent(f"""\
            import pandas as pd
            from IPython.core.display import Image as image

            df = pd.read_csv('{csv_file}')
            y_true = df['Enorm DFT (eV)'].values
            y_pred = df['Enorm Predicted (eV)'].values
            savepath = './foobar.png'
            stats = {stats}

            {plot_func.__name__}(y_true, y_pred, savepath, stats, title='some plot of some data')
            image(filename='foobar.png')
        """)

        nb = nbformat.v4.new_notebook()
        text_cells = [header, func_strings, plot_func_string, main]
        cells = [nbformat.v4.new_code_cell(cell_text)
                 for cell_text in text_cells]
        nb['cells'] = cells
        nbformat.write(nb, 'test.ipynb')
github ipython-contrib / jupyter_contrib_nbextensions / tests / test_exporters.py View on Github external
def test_html_collapsible_headings(self):
        """Test exporter for inlining collapsible_headings"""
        nb = v4.new_notebook(cells=[
            v4.new_markdown_cell(source=('# level 1 heading')),
            v4.new_code_cell(source='a = range(1,10)'),
            v4.new_markdown_cell(source=('## level 2 heading')),
            v4.new_code_cell(source='a = range(1,10)'),
            v4.new_markdown_cell(source=('### level 3 heading')),
            v4.new_code_cell(source='a = range(1,10)'),
        ])
        self.check_stuff_gets_embedded(
            nb, 'html_ch', to_be_included=['collapsible_headings'])
github dataflownb / dfkernel / dfkernel / nbtests / test_files.py View on Github external
def test_contents_manager(self):
        "make sure ContentsManager returns right files (ipynb, bin, txt)."

        nbdir = self.notebook_dir

        nb = new_notebook(
            cells=[
                new_markdown_cell(u'Created by test ³'),
                new_code_cell("print(2*6)", outputs=[
                    new_output("stream", text="12"),
                ])
            ]
        )

        with io.open(pjoin(nbdir, 'testnb.ipynb'), 'w', 
            encoding='utf-8') as f:
            write(nb, f, version=4)

        with io.open(pjoin(nbdir, 'test.bin'), 'wb') as f:
            f.write(b'\xff' + os.urandom(5))
            f.close()
github opencor / opencor / src / plugins / thirdParty / Python / scripts / run_python.py View on Github external
try:
        with open(script_file) as f:
          source = f.read()
    except IOError:
        sys.exit('Cannot read file: {}'.format(script_file))

    # Create a new notebook with a single cell containing the
    # contents of the Python file

    args = ['import sys', 'args = []']
    for arg in sys.argv[1:]:
        args.append('args.append("{}")'.format(arg.replace('"', '\\"')))
    args.append('sys.argv = args')

    nb = nbformat.v4.new_notebook()
    nb.cells.append(nbformat.v4.new_code_cell(source='\n'.join(args)))
    nb.cells.append(nbformat.v4.new_code_cell(source=source))

    # Create an execution process that runs an OpenCOR kernel
    # and use it to run the notebook

    try:
        ep = ExecutePreprocessor(timeout=-1, kernel_name='opencor')
        ep.preprocess(nb, {'metadata': {'path': script_path + '/'}})
    except CellExecutionError as err:
        pass

    # Tracebacks may contain ANSI escape codes which Windows
    # by default doesn't recognise

    if platform.system() == 'Windows':
github GoogleCloudPlatform / cloudml-samples / tools / to_ipynb.py View on Github external
for filename in style_dict['tpu_precells']:
            add_cell(cells, filename)

        cs2 = 'main(args)'
        cells.append(new_code_cell(cs2))

    else:
        cells.append(code_cell(cell_source))

    # Add tpu postcell
    if 'tpu' in py_filepath and style_dict['tpu_postcells'] is not None:
        for filename in style_dict['tpu_postcells']:
            add_cell(cells, filename)

    notebook = new_notebook(cells=cells)

    # output
    outpath, _ = os.path.split(ipynb_filepath)
    if not os.path.exists(outpath):
        os.makedirs(outpath)
    with open(ipynb_filepath, 'w') as ipynb_file:
        nbformat.write(notebook, ipynb_file)
github vatlab / sos-notebook / src / sos_notebook / converter.py View on Github external
"display_name": "SoS",
                "language": "sos",
                "name": "sos"
            },
            "language_info": {
                "file_extension": ".sos",
                "mimetype": "text/x-sos",
                "name": "sos",
                "pygments_lexer": "python",
                'nbconvert_exporter': 'sos_notebook.converter.SoS_Exporter',
            },
            'sos': {
                'kernels': kernels
            }
        }
        return new_notebook(cells=cells, metadata=metadata)
github att / rcloud / rcloud.packages / rcloud.jupyter / inst / jupyter / jupyter_adapter.py View on Github external
def run_cmd(self, cmd, kernel_name = None):
        """
        Runs python command string.
        """
        
        if _debugging: logging.info('Running command: ' + cmd + ' using kernel: ' + kernel_name)
        notebook = nbformat.v4.new_notebook()
        my_cell = nbformat.v4.new_code_cell(source=cmd)
        notebook.cells = [my_cell]
        if kernel_name:
          notebook.metadata['kernelspec'] = {'name' : kernel_name}
          
        try:
          self.executePreprocessor.preprocess(notebook, {'metadata': {'path': '.' }})
          if _debugging: logging.info('Result notebook: ' + nbformat.v4.writes_json(notebook))
          if len(notebook.cells) < 1 or len(notebook.cells[0].outputs) < 1:
            return None
          return self.postprocess_output(notebook.cells[0].outputs)
        except:
          exc_type, exc_obj, exc_tb = sys.exc_info()
          
          msg = None
          if _debugging:
github ipython / ipython / IPython / core / magics / basic.py View on Github external
removed in the future.
        """
        args = magic_arguments.parse_argstring(self.notebook, s)

        from nbformat import write, v4

        cells = []
        hist = list(self.shell.history_manager.get_range())
        if(len(hist)<=1):
            raise ValueError('History is empty, cannot export')
        for session, execution_count, source in hist[:-1]:
            cells.append(v4.new_code_cell(
                execution_count=execution_count,
                source=source
            ))
        nb = v4.new_notebook(cells=cells)
        with io.open(args.filename, 'w', encoding='utf-8') as f:
            write(nb, f, version=4)
github shivam5992 / aster / maggle.py View on Github external
def __init__(self, config):

		# initialize important variables
		self.nb = nbformat.v4.new_notebook()
		self.nb['cells'] = []

		## add the intro text cell
		text = """	## Baseline Model Pipeline   

					This is the baseline kernel (automatically generated by my bot: Maggle). In this kernel, an end to end classification pipeline is implemented.

					### Contents 

					1. Prepare Environment  
					2. Preparation and Exploration   
					     2.1 Dataset Snapshot and Summary    
					     2.2 Target Variable Distribution    
					     2.3 Missing Values    
					     2.4 Variable Correlations
					3. Preprocessing  
github takluyver / bookbook / bookbook / latex.py View on Github external
def combine_notebooks(notebook_files: Sequence[Path]) -> NotebookNode:
    combined_nb = new_notebook()

    count = 0
    for filename in notebook_files:
        count += 1
        log.debug('Adding notebook: %s', filename)
        nbname = filename.stem
        nb = nbformat.read(str(filename), as_version=4)

        try:
            combined_nb.cells.extend(add_sec_label(nb.cells[0], nbname))
        except NoHeader:
            raise NoHeader("Failed to find header in " + filename)

        combined_nb.cells.extend(nb.cells[1:])

        if not combined_nb.metadata: