这里要注意,获取贴吧ID有防爬虫,他会把数据用<!-- -->这个注释掉。
运行截图如下:
注意要伪造成浏览器,不然获取不到数据
代码如下:
- import requests
- import queue
- import time
- from bs4 import BeautifulSoup
-
- header = {
- 'Host' : 'tieba.baidu.com',
- 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
- 'Accept-Language' : 'zh-CN,zh;q=0.9',
- 'Cache-Control' : 'no-cache',
- 'Connection' : 'keep-alive',
- 'Cookie' : 'xxxxxxxxxxxxxxxx',
- 'Pragma' : 'no-cache',
- 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
- 'sec-ch-ua' : '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"'
- }
-
- baseUrl = "https://tieba.baidu.com/f?kw=%E5%B8%9D&ie=utf-8"
- urlQueue = queue.Queue(10000)
-
- def getRequest(url):
- response = requests.get(url, headers = header)
- return response.text
- pass
-
- if __name__ == '__main__':
-
- urlQueue.put(baseUrl)
- for i in range(100):
- url = urlQueue.get()
- content = getRequest(baseUrl + url)
- content = content.replace('-->', '').replace('<!--', '')
- contentSoup = BeautifulSoup(content, "html.parser")
- urlAllList = contentSoup.select("a")
- for urlTmp in urlAllList:
- if urlTmp.attrs.__contains__('href'):
- urlString = urlTmp['href']
- if '/p/' in urlString:
- testUrl = urlString.split('/p/')[1]
- urlQueue.put(testUrl)
- pass
- pass
- pass
- print('over')
-
- pass