How to use the traitlets.Float function in traitlets

To help you get started, we’ve selected a few traitlets examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bloomberg / bqplot / bqplot / marks.py View on Github external
# Other attributes
    x = (Float(0.5) | Date() | Unicode()).tag(sync=True)
    y = (Float(0.5) | Date() | Unicode()).tag(sync=True)

    scales_metadata = Dict({'color': {'dimension': 'color'}}).tag(sync=True)
    sort = Bool().tag(sync=True)
    colors = List(trait=Color(default_value=None, allow_none=True),
                  default_value=CATEGORY10).tag(sync=True,
                                                display_name='Colors')
    stroke = Color(None, allow_none=True).tag(sync=True)
    opacities = List(trait=Float(1.0, min=0, max=1, allow_none=True))\
        .tag(sync=True, display_name='Opacities')
    radius = Float(180.0, min=0.0, max=float('inf')).tag(sync=True)
    inner_radius = Float(0.1, min=0.0, max=float('inf')).tag(sync=True)
    start_angle = Float().tag(sync=True)
    end_angle = Float(360.0).tag(sync=True)
    display_labels = Enum(['none', 'inside', 'outside'],
                          default_value='inside').tag(sync=True)
    display_values = Bool(False).tag(sync=True)
    values_format = Unicode(default_value='.1f').tag(sync=True)
    label_color = Color(None, allow_none=True).tag(sync=True)
    font_size = Unicode(default_value='12px').tag(sync=True)
    font_weight = Enum(['bold', 'normal', 'bolder'],
                       default_value='normal').tag(sync=True)

    _view_name = Unicode('Pie').tag(sync=True)
    _model_name = Unicode('PieModel').tag(sync=True)


def topo_load(name):
    with open(os.path.join(os.path.split(os.path.realpath(__file__))[0],
                           name)) as data_file:
github sinhrks / cesiumpy / cesiumpy / entities / model.py View on Github external
For debugging only. Draws the model in wireframe.
    """

    _props = ['url', 'basePath', 'show', 'modelMatrix', 'scale',
              'minimumPixelSize', 'maximumScale', 'id', 'allowPicking',
              'incrementallyLoadTextures', 'asynchronous',
              'debugShowBoundingVolume', 'debugWireframe']

    url = URITrait()
    modelMatrix = traitlets.Instance(klass=Transforms)

    basePath = traitlets.Unicode(allow_none=True)
    show = traitlets.Bool(allow_none=True)
    scale = traitlets.Float(allow_none=True)
    minimumPixelSize = traitlets.Float(allow_none=True)
    maximumScale = traitlets.Float(allow_none=True)

    allowPicking = traitlets.Bool(allow_none=True)
    incrementallyLoadTextures = traitlets.Bool(allow_none=True)
    asynchronous = traitlets.Bool(allow_none=True)
    debugShowBoundingVolume = traitlets.Bool(allow_none=True)
    debugWireframe = traitlets.Bool(allow_none=True)

    def __init__(self, url, modelMatrix, basePath=None, show=None,
                 scale=None, minimumPixelSize=None, maximumScale=None,
                 id=None, allowPicking=None, incrementallyLoadTextures=None,
                 asynchronous=None, debugShowBoundingVolume=None,
                 debugWireframe=None):

        self.url = url

        self.modelMatrix = Transforms.eastNorthUpToFixedFrame(modelMatrix)
github bloomberg / bqplot / bqplot / scales.py View on Github external
scale_types: dict (class-level attribute)
        A registry of existing scale types.
    domain_class: type (default: Float)
        traitlet type used to validate values in of the domain of the scale.
    reverse: bool (default: False)
        whether the scale should be reversed.
    allow_padding: bool (default: True)
        indicates whether figures are allowed to add data padding to this scale
        or not.
    precedence: int (class-level attribute)
        attribute used to determine which scale takes precedence in cases when
        two or more scales have the same rtype and dtype.
    """
    scale_types = {}
    precedence = 1
    domain_class = Type(Float)
    reverse = Bool().tag(sync=True)
    allow_padding = Bool(True).tag(sync=True)

    _view_name = Unicode('Scale').tag(sync=True)
    _model_name = Unicode('ScaleModel').tag(sync=True)
    _view_module = Unicode('bqplot').tag(sync=True)
    _model_module = Unicode('bqplot').tag(sync=True)
    _view_module_version = Unicode(__frontend_version__).tag(sync=True)
    _model_module_version = Unicode(__frontend_version__).tag(sync=True)
    _ipython_display_ = None  # We cannot display a scale outside of a figure


class GeoScale(Scale):

    """The base projection scale class for Map marks.
github dask / dask-gateway / dask-gateway-server / dask_gateway_server / managers / base.py View on Github external
from traitlets.config import LoggingConfigurable

from ..utils import MemoryLimit, TaskPool


class ClusterManager(LoggingConfigurable):
    """Base class for dask cluster managers"""

    environment = Dict(
        help="""
        Environment variables to set for both the worker and scheduler processes.
        """,
        config=True,
    )

    cluster_start_timeout = Float(
        60,
        help="""
        Timeout (in seconds) before giving up on a starting dask cluster.
        """,
        config=True,
    )

    cluster_status_period = Float(
        30,
        help="""
        Time (in seconds) between cluster status checks.

        A smaller period will detect failed clusters sooner, but will use more
        resources. A larger period will provide slower feedback in the presence
        of failures.
        """,
github martinRenou / ipycanvas / ipycanvas / canvas.py View on Github external
#: (str) Global composite operation, possible values are listed below:
    #: https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Compositing#globalCompositeOperation
    global_composite_operation = Enum(
        ['source-over', 'source-in', 'source-out', 'source-atop',
         'destination-over', 'destination-in', 'destination-out',
         'destination-atop', 'lighter', 'copy', 'xor', 'multiply',
         'screen', 'overlay', 'darken', 'lighten', 'color-dodge',
         'color-burn', 'hard-light', 'soft-light', 'difference',
         'exclusion', 'hue', 'saturation', 'color', 'luminosity'],
        default_value='source-over'
    )

    #: (float) Indicates the horizontal distance the shadow should extend from the object.
    #: This value isn't affected by the transformation matrix. The default is 0.
    shadow_offset_x = Float(0.0)

    #: (float) Indicates the vertical distance the shadow should extend from the object.
    #: This value isn't affected by the transformation matrix. The default is 0.
    shadow_offset_y = Float(0.0)

    #: (float) Indicates the size of the blurring effect; this value doesn't correspond to a number of pixels
    #: and is not affected by the current transformation matrix. The default value is 0.
    shadow_blur = Float(0.0)

    #: (valid HTML color) A standard CSS color value indicating the color of the shadow effect; by default,
    #: it is fully-transparent black.
    shadow_color = Color('rgba(0, 0, 0, 0)')

    #: (float) Sets the width of lines drawn in the future, must be a positive number. Default to ``1.0``.
    line_width = Float(1.0)
github jupyterhub / batchspawner / batchspawner / batchspawner.py View on Github external
async def poll(self):
        """Poll the process"""
        if self.job_id is not None and len(self.job_id) > 0:
            await self.read_job_state()
            if self.state_isrunning() or self.state_ispending():
                return None
            else:
                self.clear_state()
                return 1

        if not self.job_id:
            # no job id means it's not running
            self.clear_state()
            return 1

    startup_poll_interval = Float(0.5,
        help="Polling interval (seconds) to check job state during startup"
        ).tag(config=True)

    async def start(self):
        """Start the process"""
        self.ip = self.traits()['ip'].default_value
        self.port = self.traits()['port'].default_value

        if jupyterhub.version_info >= (0,8) and self.server:
            self.server.port = self.port

        job = await self.submit_batch_script()

        # We are called with a timeout, and if the timeout expires this function will
        # be interrupted at the next yield, and self.stop() will be called.
        # So this function should not return unless successful, and if unsuccessful
github allenai / citeomatic / citeomatic / models / options.py View on Github external
use_dense = Bool(default_value=True)
    use_citations = Bool(default_value=True)
    use_sparse = Bool(default_value=True)
    use_src_tgt_embeddings = Bool(default_value=False)
    use_metadata = Bool(default_value=True)
    use_authors = Bool(default_value=False)
    use_venue = Bool(default_value=False)
    use_keyphrases = Bool(default_value=False)

    # training and feature params
    optimizer = Unicode(default_value='tfopt')
    lr = Float(default_value=0.0001)
    use_nn_negatives = Bool(default_value=True)
    margin_multiplier = Float(default_value=1)
    use_variable_margin = Bool(default_value=True)
    train_frac = Float(default_value=0.8) # the rest will be divided 50/50 val/test
    max_features = Int(default_value=200000)
    max_title_len = Int(default_value=50)
    max_abstract_len = Int(default_value=500)
    neg_to_pos_ratio = Int(default_value=6) # ideally divisible by 2 and 3
    batch_size = Int(default_value=512)
    samples_per_epoch = Int(default_value=1000000)
    total_samples = Int(default_value=5000000)
    reduce_lr_flag = Bool(default_value=False)

    # regularization params for embedding layer: l1 for mag/sparse, l2 for dir
    l2_lambda = Float(default_value=0.00001)
    l1_lambda = Float(default_value=0.0000001)
    dropout_p = Float(default_value=0)
    use_magdir = Bool(default_value=True)

    # params for TextEmbeddingConv
github jaantollander / crowddynamics / crowddynamics / examples / collective_motion.py View on Github external
agent_type = Enum(
        default_value=Circular,
        values=(Circular, ThreeCircle))
    body_type = Enum(
        default_value='adult',
        values=('adult',))
    width = Float(
        default_value=10.0,
        min=0)
    height = Float(
        default_value=10.0,
        min=0)
    exit_width = Float(
        default_value=1.25,
        min=0, max=10)
    ratio_obs = Float(
        default_value=0.6,
        min=0, max=1)

    def attributes(self, has_target: bool=True, is_follower: bool=False):
        def wrapper():
            rand_target = np.random.randint(0, len(self.field.targets))
            target = rand_target if has_target else NO_TARGET
            orientation = np.random.uniform(-np.pi, np.pi)
            d = dict(
                target=target,
                is_leader=not is_follower,
                is_follower=is_follower,
                body_type=self.body_type,
                orientation=orientation,
                velocity=np.zeros(2),
                angular_velocity=0.0,
github jupyter-widgets / ipyleaflet / ipyleaflet / leaflet.py View on Github external
super(GeoData, self).__init__(**kwargs)
        self.data = self._get_data()

    @observe('geo_dataframe')
    def _update_data(self, change):
        self.data = self._get_data()

    def _get_data(self):
        return json.loads(self.geo_dataframe.to_json())


class Choropleth(GeoJSON):

    geo_data = Dict()
    choro_data = Dict()
    value_min = Float(None, allow_none=True)
    value_max = Float(None, allow_none=True)
    colormap = Instance(ColorMap)
    border_color = Color('black')

    @observe('choro_data')
    def _update_bounds(self, change):
        self.value_min = min(self.choro_data.items(), key=lambda x: x[1])[1]
        self.value_max = max(self.choro_data.items(), key=lambda x: x[1])[1]

    @observe('value_min', 'value_max', 'geo_data', 'choro_data', 'colormap', 'border_color')
    def _update_data(self, change):
        self.data = self._get_data()

    @default('colormap')
    def _default_colormap(self):
        return linear.OrRd_06
github allenai / citeomatic / citeomatic / models / options.py View on Github external
margin_multiplier = Float(default_value=1)
    use_variable_margin = Bool(default_value=True)
    train_frac = Float(default_value=0.8) # the rest will be divided 50/50 val/test
    max_features = Int(default_value=200000)
    max_title_len = Int(default_value=50)
    max_abstract_len = Int(default_value=500)
    neg_to_pos_ratio = Int(default_value=6) # ideally divisible by 2 and 3
    batch_size = Int(default_value=512)
    samples_per_epoch = Int(default_value=1000000)
    total_samples = Int(default_value=5000000)
    reduce_lr_flag = Bool(default_value=False)

    # regularization params for embedding layer: l1 for mag/sparse, l2 for dir
    l2_lambda = Float(default_value=0.00001)
    l1_lambda = Float(default_value=0.0000001)
    dropout_p = Float(default_value=0)
    use_magdir = Bool(default_value=True)

    # params for TextEmbeddingConv
    kernel_width = Int(default_value=5)
    stride = Int(default_value=2)

    # params for TextEmbeddingConv2
    filters = Int(default_value=100) # default in the paper
    max_kernel_size = Int(default_value=5) # we use 2, 3, 4, 5. paper uses 3, 4, 5

    # dense layers
    dense_config = Unicode(default_value='20,20')

    num_ann_nbrs_to_fetch = Int(default_value=100)
    num_candidates_to_rank = Int(default_value=100) # No. of candidates to fetch from ANN at eval time
    extend_candidate_citations = Bool(default_value=True) # Whether to include citations of ANN