python爬虫图片代码(python爬图片 beautifulsoup)
linux下python怎么写爬虫获取图片
跟linux有什么关系,python是跨平台的,爬取图片的代码如下:
import urllib.requestimport osimport randomdef url_open(url):
req=urllib.request.Request(url) ? ?#为请求设置user-agent,使得程旁兆序看起来更像一个人类
req.add_header('User-Agent'运穗租,'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0') ? ?#代理IP,使用户能以不同IP访问,从而防止被服务器发现
'''iplist=['1.193.162.123:8000','1.193.162.91:8000','1.193.163.32:8000']
proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.154 Safari/537.36 LBBROWSER')]
urllib.request.install_opener(opener)'''
response=urllib.request.urlopen(req)
html=response.read() ? ?return htmldef get_page(url):
html=url_open(url).decode('utf-8')
a=html.find('current-comment-page')+23
b=html.find(']',a) ? ?#print(html[a:b])
return html[a:b]def find_imgs(url):
html=url_open(url).decode('utf-8')
img_addrs=[]
a=html.find('img src=') ? ?while a!=-1:
b=html.find('.jpg',a,a+140) ? ? ? ?if b!=-1: ? ? ? ? ? ?if html[a+9]!='h':
img_addrs.append('http:'+html[a+9:b+4]) ? ? ? ? ? ?else:
img_addrs.append(html[a+9:b+4]) ? ? ? ?else:
b=a+9
a=html.find('img src=',b) ? ?for each in img_addrs:
print(each+'我的打印') ? ?return img_addrsdef save_imgs(folder,img_addrs):
for each in img_addrs: ? ? ? ?#print('one was saved')
filename=each.split('族锋/')[-1] ? ? ? ?with open(filename,'wb') as f:
img=url_open(each)
f.write(img)def download_mm(folder='ooxx',pages=10):
os.mkdir(folder)
os.chdir(folder)
url=""
page_num=int(get_page(url)) ? ?for i in range(pages):
page_num=page_num-1
page_url=url+'page-'+str(page_num)+'#comments'
img_addrs=find_imgs(page_url)
save_imgs(folder,img_addrs)if __name__=='__main__':
download_mm()1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
完成
运行结果
python爬虫图片 在目录中存在图片1 跳过该图片1,继续写入图片2的代码
给你一个代码借鉴下:
#!/usr/bin/python3
#?-*-?coding:?utf-8?-*-
import?requests
import?re
import?os
Header?=?{"User-Agent":?"Mozilla/5.0?(Windows?NT?6.1;?WOW64)?AppleWebKit/537.36?(KHTML,?like?Gecko)?Chrome/38.0.2125.104?Safari/537.36"}
def?picture_get(picture_url):?
????????try:
????????????root?=?"E:/pic/"
????????????path?=?root?+?url.split('/')[-1]????????????
????????????if?not?os.path.exists(root):??#?目录不存在创建目录
????????????????os.mkdir(root)????????????
????????局早橡????if?not?os.path.exists(path):??#?文件不存在则下载
????????????????r?=?requests.get(picture_url,?headers=Header)
????????????????f?=?open(path,?"wb")
????????????????f.write(r.content)
????????????????f.close()????????????????
??????睁雀??????????print("文件下载成功")????????????
????????????else:????????????????
????????????????print("文件已经存在")
????????except:????????桐旁????
????????????print("获取失败")
python爬虫 将在线html网页中的图片链接替换成本地链接并将html文件下载到本地
import os,re
def check_flag(flag):
regex = re.compile(r'images\/')
result = True if regex.match(flag) else False
return result
#soup = BeautifulSoup(open('index.html'))
from bs4 import BeautifulSoup
html_content = ''运耐'
a href=""测试01/a
a href=""测试02/a
a href=""测试旁逗春01/a
a href=""测试01/a
'''
file = open(r'favour-en.html','r',encoding="UTF-8")
soup = BeautifulSoup(file, 'html.parser')
for element in soup.find_all('img'):
if 'src' in element.attrs:
print(element.attrs['src'])
if check_flag(element.attrs['src']):
#if element.attrs['src'].find("png"):
element.attrs['src'] = "michenxxxxxxxxxxxx" +'/'+ element.attrs['src']
print("##################################")
with open('index.html'指蔽, 'w',encoding="UTF-8") as fp:
fp.write(soup.prettify()) # prettify()的作?是将sp美化?下,有可读性
