How to use the neo.lib.logging function in neo

To help you get started, we’ve selected a few neo examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Nexedi / neoppod / neo / storage / database / mysqldb.py View on Github external
else:
            timeout_at = time.time() + self._wait
        last = None
        while True:
            try:
                self.conn = MySQLdb.connect(**kwd)
                break
            except Exception as e:
                if None is not timeout_at <= time.time():
                    raise
                e = str(e)
                if last == e:
                    log = logging.debug
                else:
                    last = e
                    log = logging.exception
                log('Connection to MySQL failed, retrying.')
                time.sleep(1)
        self._config = {}
        conn = self.conn
        conn.autocommit(False)
        conn.query("SET SESSION group_concat_max_len = %u" % (2**32-1))
        conn.set_sql_mode("TRADITIONAL,NO_ENGINE_SUBSTITUTION")
        def query(sql):
            conn.query(sql)
            r = conn.store_result()
            return r.fetch_row(r.num_rows())
        if self.LOCK:
            (locked,), = query("SELECT GET_LOCK('%s.%s', 0)"
                % (self.db, self.LOCK))
            if not locked:
                sys.exit(self.LOCKED)
github Nexedi / neoppod / neo / storage / handlers / initialization.py View on Github external
pt.load(ptid, num_replicas, row_list, app.nm)
        if not pt.filled():
            raise ProtocolError('Partial partition table received')
        # Install the partition table into the database for persistence.
        cell_list = []
        unassigned = range(pt.getPartitions())
        for offset in reversed(unassigned):
            for cell in pt.getCellList(offset):
                cell_list.append((offset, cell.getUUID(), cell.getState()))
                if cell.getUUID() == app.uuid:
                    unassigned.remove(offset)
        # delete objects database
        dm = app.dm
        if unassigned:
          if app.disable_drop_partitions:
            logging.info('partitions %r are discarded but actual deletion'
                         ' of data is disabled', unassigned)
          else:
            logging.debug('drop data for partitions %r', unassigned)
            dm.dropPartitions(unassigned)

        dm.changePartitionTable(ptid, num_replicas, cell_list, reset=True)
        dm.commit()
github Nexedi / neoppod / neo / lib / connector.py View on Github external
def _error(self, op, exc=None):
        if exc is None:
            logging.debug('%r closed in %s', self, op)
        else:
            logging.debug("%s failed for %s: %s (%s)",
                op, self, errno.errorcode[exc.errno], exc.strerror)
        raise ConnectorException
github Nexedi / neoppod / neo / master / recovery.py View on Github external
def run(self):
        """
        Recover the status about the cluster. Obtain the last OID, the last
        TID, and the last Partition Table ID from storage nodes, then get
        back the latest partition table or make a new table from scratch,
        if this is the first time.
        A new primary master may also arise during this phase.
        """
        logging.info('begin the recovery of the status')
        app = self.app
        pt = app.pt = app.newPartitionTable()
        app.changeClusterState(ClusterStates.RECOVERING)

        self.try_secondary = True

        # collect the last partition table available
        poll = app.em.poll
        while 1:
            if self.try_secondary:
                # Keep trying to connect to all other known masters,
                # to make sure there is a challege between each pair
                # of masters in the cluster. If we win, all connections
                # opened here will be closed.
                self.try_secondary = False
                node_list = []
github Nexedi / neoppod / neo / lib / connection.py View on Github external
def _handle(self, connection, packet):
        pending = self._pending
        assert len(pending) == 1 or pending[0][0], pending
        logging.packet(connection, packet, False)
        if connection.isClosed() and (connection.isAborted() or
                                      packet.ignoreOnClosedConnection()):
            logging.debug('Ignoring packet %r on closed connection %r',
                packet, connection)
            return
        if not packet.isResponse(): # notification
            # XXX: If there are several handlers, which one to use ?
            pending[0][1].packetReceived(connection, packet)
            return
        msg_id = packet.getId()
        request_dict, handler = pending[0]
        # checkout the expected answer class
        try:
            klass, kw = request_dict.pop(msg_id)
        except KeyError:
            klass = None
github Nexedi / neoppod / neo / scripts / neostorage.py View on Github external
def main(args=None):
    from neo.storage.app import Application
    config = Application.option_parser.parse(args)

    # setup custom logging
    logging.setup(config.get('logfile'))

    # and then, load and run the application
    app = Application(config)
    if not config.get('reset'):
        app.run()
github Nexedi / neoppod / neo / lib / threaded_app.py View on Github external
def close(self):
        # Clear all connection
        self.master_conn = None
        if self.poll_thread.is_alive():
            for conn in self.em.getConnectionList():
                conn.close()
            # Stop polling thread
            logging.debug('Stopping %s', self.poll_thread)
            self.em.wakeup(thread.exit)
        else:
            super(ThreadedApplication, self).close()
github Nexedi / neoppod / neo / client / app.py View on Github external
tid_set = set()
        for storage_node in self.pt.getNodeSet(True):
            conn = self.getStorageConnection(storage_node)
            if conn is None:
                continue
            conn.ask(packet, queue=queue, tid_set=tid_set)

        # Wait for answers from all storages.
        # TODO: Results are incomplete when readable cells move concurrently
        #       from one storage to another. We detect when this happens and
        #       retry.
        self.waitResponses(queue)

        # Reorder tids
        ordered_tids = sorted(tid_set, reverse=True)
        logging.debug("UndoLog tids %s", map(dump, ordered_tids))
        # For each transaction, get info
        undo_info = []
        append = undo_info.append
        for tid in ordered_tids:
            (txn_info, txn_ext) = self._getTransactionInformation(tid)
            if filter is None or filter(txn_info):
                txn_info.pop('packed')
                txn_info.pop("oids")
                if txn_ext:
                    txn_info.update(loads(txn_ext))
                append(txn_info)
                if len(undo_info) >= last - first:
                    break
        # Check we return at least one element, otherwise call
        # again but extend offset
        if len(undo_info) == 0 and not block:
github Nexedi / neoppod / neo / client / Storage.py View on Github external
def deleteObject(self, oid, serial, transaction):
        try:
            self.app.store(oid, serial, None, None, transaction)
        except Exception:
            logging.exception('oid=%r, serial=%r, transaction=%r',
                              oid, serial, transaction)
            raise
github Nexedi / neoppod / neo / master / recovery.py View on Github external
if pt.filled():
                    if app.truncate_tid:
                        node_list = app.nm.getIdentifiedList(pool_set={uuid
                            for uuid, tid in self.truncate_dict.iteritems()
                            if not tid or app.truncate_tid < tid})
                        if node_list:
                            truncate = Packets.Truncate(app.truncate_tid)
                            for node in node_list:
                                conn = node.getConnection()
                                conn.send(truncate)
                                self.handlerSwitched(conn, False)
                            continue
                    node_list = pt.getConnectedNodeList()
                break

        logging.info('startup allowed')

        for node in node_list:
            assert node.isPending(), node
            node.setRunning()

        for node in app.nm.getMasterList():
            if not (node is app._node or node.isIdentified()):
                if node.isConnected(True):
                    node.getConnection().close()
                    assert node.isDown(), node
                elif not node.isDown():
                    assert self.try_secondary, node
                    node.setDown()
                    node_list.append(node)

        app.broadcastNodesInformation(node_list)