本文主要是介绍python解析html (bs4 lxml),希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
1. bs4
import bs4fp = open("test.html")
#创建soup对象
soup = bs4.BeautifulSoup(fp.read(),"lxml")#获取元素文本
ele1 = soup.select("#site-name")
print(ele1[0].getText())
print(ele1[0].string)
#将元素转为字符串
print(str(ele1[0]))
# <h1 id="site-name"><a href="/admin/">Django administration</a></h1>
#Django administration#格式化输出
print(soup.prettify())#获取元素所有属性
ele = soup.select("[name='csrfmiddlewaretoken']")
print(ele[0].attrs)
# {'type': 'hidden', 'name': 'csrfmiddlewaretoken', 'value': 'fQ144OxQZmYo0nzYF3YcpeR5PKCOrdiJat8wdmOmoLuuijUxrcEAzx3rc7ZQmaHn'}#获取元素特定属性
print(ele[0].get("type"))
# hidden
print(ele[0].attrs["name"])
# csrfmiddlewaretoken#其他
print(soup.title)
# <title>Log in | Django site admin</title>
print(soup.title.name)
# title
print(soup.string)
#contents表示标签下的所有子标签返回一个列表print(soup.h1)
# h1 id="site-name"><a href="/admin/">Django administration</a></h1>
print(soup.h1["id"])
# site-name
print(soup.find_all("h1")) #返回所有满足的元素,soup.find:只会返回第一个元素
# [ < h1 id = "site-name" > < a href = "/admin/" > Django administration < / a > < / h1 > ]#获取多个标签
print(soup.find_all(["title", "input"]))
# [<title>Log in | Django site admin</title>,
# <input name="csrfmiddlewaretoken" type="hidden" value="fQ144OxQZmYo0nzYF3YcpeR5PKCOrdiJat8wdmOmoLuuijUxrcEAzx3rc7ZQmaHn"/>, <input autocapitalize="none" autocomplete="username" autofocus="" id="id_username" maxlength="150" name="username" required="" type="text"/>, <input autocomplete="current-password" id="id_password" name="password" required="" type="password"/>, <input name="next" type="hidden" value="/admin/"/>, <input type="submit" value="Log in"/>]#指定属性
ele = soup.find_all("input", attrs={"name":"password"})
print(ele)#标签中的注释
string = '<p><!--这是注释 --></p>'
sp = bs4.BeautifulSoup(string)
print(sp.p.string) #这是注释 #常见三大节点
#获取节点本身
print(soup.body)#获取子节点
print(soup.body.contents)
# None#
print(soup.body.children) #返回一个迭代器
for child in soup.body.children:print(child)#获取父节点
print(soup.title.parent.name)#获取所有父级节点
print(soup.title.parents)
# head#兄弟下一个节点
r = soup.find_all("div", attrs={"id":"header"})[0]
t = r.next_sibling
print(t)#兄弟上一个节点
r = soup.find_all("div", attrs={"id":"header"})[0]
t = r.previous_sibling
print(t)#获取全部兄弟节点
tt = soup.title.next_siblings
for i in tt:print(i)
html
<!DOCTYPE html><html lang="en-us" >
<head>
<title>Log in | Django site admin</title>
<link rel="stylesheet" type="text/css" href="/static/admin/css/base.css">
<link rel="stylesheet" type="text/css" href="/static/admin/css/login.css"><meta name="viewport" content="user-scalable=no, width=device-width, initial-scale=1.0, maximum-scale=1.0"><link rel="stylesheet" type="text/css" href="/static/admin/css/responsive.css"><meta name="robots" content="NONE,NOARCHIVE">
</head><body class=" login"data-admin-utc-offset="0"><!-- Container -->
<div id="container"><!-- Header --><div id="header"><div id="branding"><h1 id="site-name"><a href="/admin/">Django administration</a></h1></div></div><!-- END Header --><!-- Content --><div id="content" class="colM"><div id="content-main"><form action="/admin/login/" method="post" id="login-form"><input type="hidden" name="csrfmiddlewaretoken" value="fQ144OxQZmYo0nzYF3YcpeR5PKCOrdiJat8wdmOmoLuuijUxrcEAzx3rc7ZQmaHn"><div class="form-row"><label class="required" for="id_username">Username:</label> <input type="text" name="username" autofocus autocapitalize="none" autocomplete="username" maxlength="150" required id="id_username"></div><div class="form-row"><label class="required" for="id_password">Password:</label> <input type="password" name="password" autocomplete="current-password" required id="id_password"><input type="hidden" name="next" value="/admin/"></div><div class="submit-row"><label> </label><input type="submit" value="Log in"></div>
</form></div><br class="clear"></div><!-- END Content --><div id="footer"></div>
</div>
<!-- END Container --></body>
</html>
2. lxml
获取所有栏目的博客
import requests
from lxml import htmlheaders = {"User-Agent":"Python-urllib/2.6"}
res = requests.get("https://blog.csdn.net/shitou987", headers=headers)dom = html.document_fromstring(res.text)category_urls = dom.xpath('//*[@id="asideCategory"]//*[@class="clearfix"]/@href')
category_names = dom.xpath('//*[@id="asideCategory"]//*[@class="title oneline"]/span/text()')
#获取text也可以使用:category_names[0].text
category_list = []
for index, url in enumerate(category_urls,0):temp_dict = {"name":category_names[index],"url":url}category_list.append(temp_dict)print(category_list)
这篇关于python解析html (bs4 lxml)的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!