我正在用 python 創建一個爬蟲來列出網站中的所有鏈接,但出現錯誤,我看不到導致錯誤的原因:Traceback (most recent call last): File "vul_scanner.py", line 8, in <module> vuln_scanner.crawl(target_url) File "C:\Users\Lenovo x240\Documents\website\website\spiders\scanner.py", line 18, in crawl href_links= self.extract_links_from(url) File "C:\Users\Lenovo x240\Documents\website\website\spiders\scanner.py", line 15, in extract_links_from return re.findall('(?:href=")(.*?)"', response.content) File "C:\Users\Lenovo x240\AppData\Local\Programs\Python\Python38\lib\re.py", line 241, in findall return _compile(pattern, flags).findall(string)TypeError: cannot use a string pattern on a bytes-like object我的代碼是:在scanner.py文件中:# To ignore numpy errors:# pylint: disable=E1101import urllibimport requestsimport refrom urllib.parse import urljoinclass Scanner: def __init__(self, url): self.target_url = url self.target_links = [] def extract_links_from(self, url): response = requests.get(url) return re.findall('(?:href=")(.*?)"', response.content) def crawl(self, url): href_links= self.extract_links_from(url) for link in href_links: link = urljoin(url, link) if "#" in link: link = link.split("#")[0] if self.target_url in link and link not in self.target_links: self.target_links.append(link) print(link) self.crawl(link) 在 vul_scanner.py 文件中:import scanner# To ignore numpy errors:# pylint: disable=E1101target_url = "https://www.amazon.com"vuln_scanner = scanner.Scanner(target_url)vuln_scanner.crawl(target_url)我運行的命令是:python vul_scanner.py
無法在類似字節的對象上使用字符串模式 (Python)
慕桂英3389331
2024-01-04 16:39:00