How to use the nhentai.logger.logger.warn function in nhentai

To help you get started, we’ve selected a few nhentai examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github RicterZ / nhentai / nhentai / downloader.py View on Github external
def download(self, queue, folder=''):
        if not isinstance(folder, text):
            folder = str(folder)

        if self.path:
            folder = os.path.join(self.path, folder)

        if not os.path.exists(folder):
            logger.warn('Path \'{0}\' does not exist, creating.'.format(folder))
            try:
                os.makedirs(folder)
            except EnvironmentError as e:
                logger.critical('{0}'.format(str(e)))
                exit(1)
        else:
            logger.warn('Path \'{0}\' already exist.'.format(folder))

        queue = [([url], {'folder': folder}) for url in queue]

        self.thread_pool = threadpool.ThreadPool(self.thread_count)
        requests_ = threadpool.makeRequests(self._download, queue, self._download_callback)
        [self.thread_pool.putRequest(req) for req in requests_]

        self.thread_pool.wait()
github RicterZ / nhentai / nhentai / parser.py View on Github external
def __api_suspended_search_parser(keyword, sorting, page):
    logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
    result = []
    i = 0
    while i < 5:
        try:
            response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page, 'sort': sorting}).json()
        except Exception as e:
            i += 1
            if not i < 5:
                logger.critical(str(e))
                logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
                exit(1)
            continue
        break

    if 'result' not in response:
        raise Exception('No result in response')

    for row in response['result']:
        title = row['title']['english']
        title = title[:85] + '..' if len(title) > 85 else title
        result.append({'id': row['id'], 'title': title})

    if not result:
        logger.warn('No results for keywords {}'.format(keyword))

    return result
github RicterZ / nhentai / nhentai / parser.py View on Github external
logger.critical(str(e))
                logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
                exit(1)
            continue
        break

    if 'result' not in response:
        raise Exception('No result in response')

    for row in response['result']:
        title = row['title']['english']
        title = title[:85] + '..' if len(title) > 85 else title
        result.append({'id': row['id'], 'title': title})

    if not result:
        logger.warn('No results for keywords {}'.format(keyword))

    return result
github RicterZ / nhentai / nhentai / parser.py View on Github external
def search_parser(keyword, sorting='date', page=1):
    logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
    response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page, 'sort': sorting}).content

    result = _get_title_and_id(response)
    if not result:
        logger.warn('Not found anything of keyword {}'.format(keyword))

    return result
github RicterZ / nhentai / nhentai / parser.py View on Github external
logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
            response = request('get', url='%s/%s/%s?page=%d' % (constant.TAG_URL[index], tag_name, sorting, p)).content
            result += _get_title_and_id(response)
        else:
            for i in tag_name:
                logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, i))
                response = request('get',
                                   url='%s/%s/%s?page=%d' % (constant.TAG_URL[index], i, sorting, p)).content
                result += _get_title_and_id(response)

        if not result:
            logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
            return

    if not result:
        logger.warn('No results for tag \'{}\''.format(tag_name))

    return result
github RicterZ / nhentai / nhentai / parser.py View on Github external
response = request('get', url=constant.TAG_API_URL, params={'sort': sorting, 'tag_id': tag_id}).json()
    page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])

    for i in range(1, page + 1):
        logger.info('Getting page {} ...'.format(i))

        if page != 1:
            response = request('get', url=constant.TAG_API_URL,
                               params={'sort': sorting, 'tag_id': tag_id}).json()
    for row in response['result']:
        title = row['title']['english']
        title = title[:85] + '..' if len(title) > 85 else title
        result.append({'id': row['id'], 'title': title})

    if not result:
        logger.warn('No results for tag id {}'.format(tag_id))

    return result
github RicterZ / nhentai / nhentai / parser.py View on Github external
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
    doujinshi = dict()
    doujinshi['id'] = id_
    url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)

    try:
        response = request('get', url)
        if response.status_code in (200, ):
            response = response.content
        else:
            logger.debug('Slow down and retry ({}) ...'.format(id_))
            time.sleep(1)
            return doujinshi_parser(str(id_))

    except Exception as e:
        logger.warn('Error: {}, ignored'.format(str(e)))
        return None

    html = BeautifulSoup(response, 'html.parser')
    doujinshi_info = html.find('div', attrs={'id': 'info'})

    title = doujinshi_info.find('h1').text
    subtitle = doujinshi_info.find('h2')

    doujinshi['name'] = title
    doujinshi['subtitle'] = subtitle.text if subtitle else ''

    doujinshi_cover = html.find('div', attrs={'id': 'cover'})
    img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png|gif)$', doujinshi_cover.a.img.attrs['data-src'])

    ext = []
    for i in html.find_all('div', attrs={'class': 'thumb-container'}):