How to use the pandas.set_option function in pandas

To help you get started, we’ve selected a few pandas examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github AugurProject / augur / tests / consensus / runtests.py View on Github external
try:
    from colorama import Fore, Style, init
except ImportError:
    pass
from ethereum import tester as t
from pyconsensus import Oracle

ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                    os.pardir, os.pardir, "consensus")

np.set_printoptions(linewidth=225,
                    suppress=True,
                    formatter={"float": "{: 0.6f}".format})

pd.set_option("display.max_rows", 25)
pd.set_option("display.width", 1000)
pd.set_option('display.float_format', lambda x: '%.8f' % x)

# max_iterations: number of blocks required to complete PCA
verbose = False
max_iterations = 5
tolerance = 0.05
variance_threshold = 0.85
max_components = 5
init()

YES = 2.0
NO = 1.0
BAD = 1.5
NA = 0.0

def BR(string): # bright red
github RamonYeung / torchlight / utils.py View on Github external
def personal_display_settings():
    """
    Pandas Doc
    https://pandas.pydata.org/pandas-docs/stable/generated/pandas.set_option.html
    NumPy Doc
        -
    """
    from pandas import set_option
    set_option('display.max_rows', 500)
    set_option('display.max_columns', 500)
    set_option('display.width', 2000)
    set_option('display.max_colwidth', 1000)
    from numpy import set_printoptions
    set_printoptions(suppress=True)
github TTyb / TTyb.github.io / code / lianjia.py View on Github external
def getHotHouse(allList, top):
    df = pd.DataFrame(allList)
    # 根据首付降序排列
    pd.set_option('display.max_rows', 1000)
    pd.set_option('display.max_columns', 1000)
    pd.set_option('display.width', 1000)
    pd.set_option('max_colwidth', 1000)
    df["rank"] = df['price_f'].rank(ascending=1, method='dense')
    # 选出排名最低的10个
    df_rank = df[df["rank"] <= top]

    return df_rank
github hellobiek / smart_deal_tool / datamanager / ticks.py View on Github external
import ctypes
import struct
import zipfile
import datetime
import requests
import const as ct
import numpy as np
import pandas as pd
from base.clog import getLogger
from base.cdate import get_day_nday_ago
from datetime import datetime, timedelta
from common import get_security_exchange_name
from datamanager.tick_models import TickTradeDetail, TickDetailModel
logger = getLogger(__name__)
pd.options.mode.chained_assignment = None #default='warn'
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)

def unsigned2signed(value):
    return ctypes.c_int32(value).value

def signed2unsigned(value, b = 32):
    if 32 == b:
        return ctypes.c_uint32(value).value
    elif 8 == b:
        return ctypes.c_uint8(value).value
    elif 16 == b:
        return ctypes.c_uint16(value).value

def int_overflow(val):
    maxint = 2147483647
    if not -maxint - 1 <= val <= maxint:
github rd11490 / NBA_Tutorials / analyze_play_by_play / analyze_pbp.py View on Github external
import pandas as pd
import os


pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)


dirname = os.path.dirname(__file__)
input_file = os.path.join(dirname, 'data/2017-18_pbp.csv')
output_file = os.path.join(dirname, 'data/unique_pbp.csv')

play_by_play = pd.read_csv(input_file)
play_by_play_for_analysis = play_by_play[['EVENTMSGTYPE', 'EVENTMSGACTIONTYPE', 'HOMEDESCRIPTION', 'NEUTRALDESCRIPTION',
                                          'VISITORDESCRIPTION','PLAYER1_ID', 'PLAYER1_NAME', 'PLAYER1_TEAM_ID',
                                          'PLAYER1_TEAM_NICKNAME', 'PLAYER2_ID', 'PLAYER2_NAME', 'PLAYER2_TEAM_ID',
                                          'PLAYER2_TEAM_NICKNAME', 'PLAYER3_ID', 'PLAYER3_NAME', 'PLAYER3_TEAM_ID',
                                          'PLAYER3_TEAM_NICKNAME']]
play_by_play_for_analysis = play_by_play_for_analysis.fillna('')

play_by_play_for_analysis['DESCRIPTION'] = play_by_play_for_analysis['HOMEDESCRIPTION'] + ' ' + \
                                           play_by_play_for_analysis['NEUTRALDESCRIPTION'] + ' ' + \
github googledatalab / pydatalab / google / datalab / contrib / mlworkbench / commands / _ml.py View on Github external
return (text[:37] + '...') if isinstance(text, six.string_types) and len(text) > 40 else text

  # Truncate text explicitly here because we will set display.max_colwidth to -1.
  # This applies to images to but images will be overriden with "_show_img()" later.
  formatters = {x: _truncate_text for x in df.columns if df[x].dtype == np.object}
  if not args['no_show_image'] and img_cols:
    formatters.update({x + '_image': _show_img for x in img_cols})

  # Set display.max_colwidth to -1 so we can display images.
  old_width = pd.get_option('display.max_colwidth')
  pd.set_option('display.max_colwidth', -1)
  try:
    IPython.display.display(IPython.display.HTML(
        df.to_html(formatters=formatters, escape=False, index=False)))
  finally:
    pd.set_option('display.max_colwidth', old_width)
github chen0040 / keras-recommender / demo / vgg16_content_based_filtering.py View on Github external
def main():
    data_dir_path = './data/ml-latest-small'
    poster_dir_path = './data/posters'
    output_dir_path = './data/models'

    np.set_printoptions(threshold=np.nan)
    pd.set_option('display.height', 1000)
    pd.set_option('display.max_rows', 500)
    pd.set_option('display.max_columns', 500)
    pd.set_option('display.width', 1000)

    df = pd.read_csv(data_dir_path + '/ratings.csv', sep=',')
    df_id = pd.read_csv(data_dir_path + '/links.csv', sep=',')
    df_movie_names = pd.read_csv(data_dir_path + '/movies.csv', sep=',')
    df = pd.merge(pd.merge(df, df_id, on='movieId'), df_movie_names, on='movieId')

    print(df.head())

    data_file = data_dir_path + '/imdb_id_to_image_dict.data'
    if not os.path.exists(data_file):
        imdb_id_to_image_dict = dict()
        for poster_file in glob(poster_dir_path + '/*.jpg'):  # debug here
            print('Loading img at {}'.format(poster_file))
            img = kimage.load_img(poster_file, target_size=(224, 224))
github mgckind / easyaccess / easyaccess / easyaccess.py View on Github external
global load_bar, colored
    conf = config_mod.get_config(config_file)

    if readline_present:
        try:
            readline.read_history_file(history_file)
            readline.set_history_length(conf.getint('easyaccess', 'histcache'))
        except:
            print(colored('readline might have problems accessing history', 'red'))

    args = eaparser.get_args(config_file)  # Reads command line arguments

    # PANDAS DISPLAY SET UP
    pd.set_option('display.max_rows', conf.getint('display', 'max_rows'))
    pd.set_option('display.width', conf.getint('display', 'width'))
    pd.set_option('display.max_columns', conf.getint('display', 'max_columns'))
    pd.set_option('display.max_colwidth', conf.getint('display', 'max_colwidth'))
    load_bar = conf.getboolean('display', 'loading_bar')
    if args.quiet:
        conf.set('display', 'loading_bar', 'no')

    if args.db is not None:
        db = args.db
        if db[:3] == 'db-':
            db = db[3:]
    else:
        db = conf.get('easyaccess', 'database')

    if args.user is not None:
        print('Bypassing .desservices file with user : %s' % args.user)
        if args.password is None:
            print('Must include password')
github ddos-clearing-house / ddos_dissector / src / ddos_dissector / dataframe_analysis.py View on Github external
import math
from datetime import datetime
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', 100)
import hashlib
from ddos_dissector.exceptions.UnsupportedFileTypeError import UnsupportedFileTypeError
from ddos_dissector.portnumber2name import portnumber2name
from ddos_dissector.protocolnumber2name import protocolnumber2name
from ddos_dissector.tcpflagletters2names import tcpflagletters2names

from datetime import datetime


def analyze_dataframe(df, dst_ip, file_type):
    """
    Analyze a dataframe, and return the fingerprints
    :param df: The Pandas dataframe
    :param dst_ip: The destination IP (if entered) or False
    :param file_type: The file type string
    :return: The fingerprints