How to use the sqlalchemy.and_ function in SQLAlchemy

To help you get started, we’ve selected a few SQLAlchemy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github EI-CoreBioinformatics / mikado / test / test_serialisation.py View on Github external
session.query(Mikado.serializers.junction.Junction).count()
        )

        self.assertEqual(
            session.query(Mikado.serializers.junction.Junction).filter(
                Mikado.serializers.junction.Junction.chrom != "Chr5"
            ).count(), 1,
            [_.chrom for _ in
                session.query(Mikado.serializers.junction.Junction).filter(
                    Mikado.serializers.junction.Junction.chrom != "Chr5"
            )])

        # It's a BED file translated into 1-based, so add 1 to starts
        self.assertEqual(
            session.query(Mikado.serializers.junction.Junction).filter(
                and_(
                    Mikado.serializers.junction.Junction.chrom == "Chr5",
                    Mikado.serializers.junction.Junction.start == 26510619,
                )
            ).count(), 1,
            [str(_) for _ in
                session.query(Mikado.serializers.junction.Junction).filter(
                    and_(
                        Mikado.serializers.junction.Junction.name == "portcullis_junc_0",
                    )
github cea-sec / ivre / ivre / db / sql / __init__.py View on Github external
data = xmlnmap.change_http_git(rec.data[rec.name])
                except Exception:
                    utils.LOGGER.warning("Cannot migrate host %r", rec.id,
                                         exc_info=True)
                    failed.add(rec.id)
                else:
                    if data:
                        self.db.execute(
                            update(self.tables.script)
                            .where(and_(self.tables.script.port == rec.port,
                                        self.tables.script.name == rec.name))
                            .values(data={rec.name: data})
                        )
        self.db.execute(
            update(self.tables.scan)
            .where(and_(self.tables.scan.schema_version == 14,
                        self.tables.scan.id.notin_(failed)))
            .values(schema_version=15)
        )
        return len(failed)
github okfn / vdm / vdm / elixir / complex.py View on Github external
def get_as_of(self, dt):
            # if the passed in timestamp is older than our current version's
            # time stamp, then the most recent version is our current version
            if self.timestamp < dt:
                return self
            
            # otherwise, we need to look to the history table to get our
            # older version
            query = object_session(self).query(Version)
            query = query.filter(and_(get_history_where(self), 
                                      Version.c.timestamp <= dt))
            query = query.order_by(desc(Version.c.timestamp)).limit(1)
            return query.first()
github wylok / sparrow / operation / examine.py View on Github external
users = db_sso.query.with_entities(db_sso.dingunionid, db_sso.realName).all()
        users = {info[0]: info[1:] for info in users}
        sql_executes = db_sql_execute.query.with_entities(db_sql_execute.work_number,db_sql_execute.date,
                                                      db_sql_execute.host,db_sql_execute.port,db_sql_execute.database,
                                                      db_sql_execute.describe,
                                                      db_sql_execute.dingid).all()
        sql_executes = {info[0]: info[1:] for info in sql_executes}
        work_orders = db_work_order.query.with_entities(db_work_order.work_number,
                                                        db_work_order.dingid,
                                                        db_work_order.status).filter(db_work_order.source==source
                                                                                     ).order_by(desc(db_work_order.id)).limit(500).all()
        if action and work_number:
            if action == 'query':
                work_orders = db_work_order.query.with_entities(db_work_order.work_number,
                                                                db_work_order.dingid,
                                                                db_work_order.status).filter(and_(
                    db_work_order.source == source,db_work_order.work_number==work_number)).all()
        if work_orders:
            work_orders = [list(info) for info in work_orders]
            for info in work_orders:
                info.extend(sql_executes[info[0]][:-1])
                info.extend(users[sql_executes[info[0]][-1]])
        new_work_number = Redis.smembers(Key)
    return render_template('ensure_sql_execute.html',tables=tables,work_orders=work_orders,msg=msg,new_work_number=new_work_number,total='线上SQL执行工单管理')
github rucio / rucio / lib / rucio / core / did.py View on Github external
:param account: The account owner.
    :param ignore_duplicate: If True, ignore duplicate entries.
    :param session: The database session in use.
    """
    # lookup for existing files
    files_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name,
                                models.DataIdentifier.bytes, models.DataIdentifier.guid,
                                models.DataIdentifier.events,
                                models.DataIdentifier.availability,
                                models.DataIdentifier.adler32, models.DataIdentifier.md5).\
        filter(models.DataIdentifier.did_type == DIDType.FILE).\
        with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')

    file_condition = []
    for file in files:
        file_condition.append(and_(models.DataIdentifier.scope == file['scope'],
                                   models.DataIdentifier.name == file['name']))

    existing_content, existing_files = [], {}
    if ignore_duplicate:
        # lookup for existing content
        content_query = session.query(models.ConstituentAssociation.scope,
                                      models.ConstituentAssociation.name,
                                      models.ConstituentAssociation.child_scope,
                                      models.ConstituentAssociation.child_name).\
            with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle')
        content_condition = []
        for file in files:
            content_condition.append(and_(models.ConstituentAssociation.scope == scope,
                                          models.ConstituentAssociation.name == name,
                                          models.ConstituentAssociation.child_scope == file['scope'],
                                          models.ConstituentAssociation.child_name == file['name']))
github pycogent / pycogent / cogent / db / ensembl / genome.py View on Github external
btype, descr = None, None
        
        if BioType:
            if like:
                btype = gene_table.c.biotype.like('%'+BioType+'%')
            else:
                btype = gene_table.c.biotype==BioType
        if Description:
            if like:
                descr = gene_table.c.description.like('%'+Description+'%')
            else:
                descr = gene_table.c.description.op('regexp')(
                                    '[[:<:]]%s[[:>:]]' % Description)
        
        if btype is not None and descr is not None:
            condition = sql.and_(btype, descr)
        elif btype is not None:
            condition = btype
        elif descr is not None:
            condition = descr
        
        return condition
github amitt001 / pygmy / pygmy / model / link.py View on Github external
def latest_default_link(self, db):
        """Returns latest non custom link"""
        return db.query(Link).filter(and_(
            Link.is_custom.is_(False),
            Link.short_code.isnot(None),
            Link.short_code != ''
        )).order_by(Link.id.desc()).first()
github strinking / futaba / futaba / sql / models / settings.py View on Github external
def fetch_optional_cog_settings(self, guild, cog_name, default=None):
        logger.info(
            "Fetching or inserting settings for optional cog '%s' in guild '%s' (%d)",
            cog_name,
            guild.name,
            guild.id,
        )

        sel = select([self.tb_optional_cog_settings.c.settings]).where(
            and_(
                self.tb_optional_cog_settings.c.guild_id == guild.id,
                self.tb_optional_cog_settings.c.cog_name == cog_name,
            )
        )
        result = self.sql.execute(sel)

        if result.rowcount:
            (settings,) = result.fetchone()
        else:
            logger.info("Settings didn't exist, creating...")
            ins = self.tb_optional_cog_settings.insert().values(
                guild_id=guild.id, cog_name=cog_name, settings={}
            )
            self.sql.execute(ins)
            settings = if_not_null(default, {})
github galaxyproject / galaxy / scripts / cleanup_datasets / cleanup_datasets.py View on Github external
def purge_libraries(app, cutoff_time, remove_from_disk, info_only=False, force_retry=False):
    # Purges deleted libraries whose update_time is older than the cutoff_time.
    # The dataset associations of each library are also marked as deleted.
    # The Purge Dataset method will purge each Dataset as necessary
    # library.purged == True simply means that it can no longer be undeleted
    # i.e. all associated LibraryDatasets/folders are marked as deleted
    library_count = 0
    start = time.time()
    if force_retry:
        libraries = app.sa_session.query(app.model.Library) \
                                  .filter(and_(app.model.Library.table.c.deleted == true(),
                                               app.model.Library.table.c.update_time < cutoff_time))
    else:
        libraries = app.sa_session.query(app.model.Library) \
                                  .filter(and_(app.model.Library.table.c.deleted == true(),
                                               app.model.Library.table.c.purged == false(),
                                               app.model.Library.table.c.update_time < cutoff_time))
    for library in libraries:
        _purge_folder(library.root_folder, app, remove_from_disk, info_only=info_only)
        if not info_only:
            print("Purging library id ", library.id)
            library.purged = True
            app.sa_session.add(library)
            app.sa_session.flush()
        library_count += 1
    stop = time.time()
    print('# Purged %d libraries .' % library_count)
github apache / allura / ForgeClassic / sfx / model / mailman.py View on Github external
def update(self, is_public, description):
        action_logger.info('UpdateML')
        if self.is_public == is_public and self.description == description:
            return

        stmt = T.mail_group_list.update(
            whereclause=sa.and_(
                T.mail_group_list.c.group_id==self.group_id,
                T.mail_group_list.c.group_list_id==self.group_list_id))
        stmt.execute(is_public=is_public, description=description)
        stmt = T.lists.update(whereclause=T.lists.c.name==self.name)
        stmt.execute(is_public=is_public)
        tg.flash('The mailing list was successfully updated')