Python异步爬虫示例1

import asyncio
import time
import aiohttp


def main():
    start = time.time()
    url_list = ['网址1','网址2','网址3','网址4']
    tasks = [asyncio.ensure_future(get_html_info(url)) for url in url_list]
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
    loop.close()
    end = time.time()
    print(f'所需时间:{end-start}')


# 协程任务,获取网页数据
async def get_html_info(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as resp:
            if resp.status == 200:
                print("爬取数据中:" + str(url))
                res = await resp.text()
                await session.close()
 


if __name__ == '__main__':
    main()