python批量下载图片的三种方法

jopen 8年前

一是用微软提供的扩展库win32com来操作IE:

win32com可以获得类似js里面的document对象,但貌似是只读的(文档都没找到)。

二是用selenium的webdriver:

selenium则提供了Chrome,IE,FireFox等的支持,每种浏览器都有execute_script和find_element_by_xx方法,可以方便的执行js脚本(包括修改元素)和读取html里面的元素。不足是selenium只提供对python2.6和2.7的支持。

三是用python自带的HTMLParser解析:

HTMLParser则是需要自己写个类继承基类,重写解析元素的方法。

1.win32com

 1 #将滚动条滑到底,最多滑动20000像素   2 #模拟键盘右键,查看多张图片   3 import sys   4 import win32com.client,win32api   5 import urllib.request   6 import time   7 import os   8 def main():   9     #获取参数  10     url=sys.argv[1]  11     #操作IE  12     ie=win32com.client.Dispatch("InternetExplorer.Application")  13     ie.Navigate(url)  14     ie.Visible=True  15     last_url=''  16     dir_name=''  17     while last_url!=url:  18         print('\nThe URL is:',url,'\n')  19         while ie.ReadyState != 4:      20             time.sleep(1)  21         while ie.Document.readyState != "complete":   22             time.sleep(1)  23         #滑动滚动条  24         win=ie.Document.parentWindow  25         lastY=-1;  26         for i in range(40):  27             win.scrollTo(0,500*i)  28             nowY=win.pageYOffset  29             if(nowY==lastY):  30                 break  31             lastY=nowY  32             time.sleep(0.4)  33         print('Document load state:',ie.Document.readyState)  34         doc=ie.Document  35         #第一次需要创建目录  36         if(dir_name==''):  37             root_dir='E:\\img'  38             dir_name=root_dir+'\\'+doc.title  39             dir_name=dir_name.replace('|','-')  40             if(os.path.exists(root_dir)!=True):  41                 os.mkdir(root_dir)  42             if(os.path.exists(dir_name)!=True):  43                 os.mkdir(dir_name)  44         all_image=doc.images  45         print('共有',all_image.length,'张图片')  46         count=0;  47         for img in all_image:  48             if(img.id=='b_img'):  49                 count=count+1  50                 print(count,img.src)  51                 time.sleep(1)  52                 img_file=urllib.request.urlopen(img.src)  53                 byte=img_file.read()  54                 print(count,'donwload complete!','-'*10,'size:','{:.3}'.format(byte.__len__()/1024),'KB')  55                 if(byte.__len__()>7000):  56                     file_name=img.src.replace('/','_')  57                     file_name=file_name.replace(':','_')  58                     end=file_name.__len__()  59                     if(file_name.rfind('!')!=-1):  60                         end=file_name.rfind('!')  61                     if(file_name.rfind('?')!=-1):  62                         end=file_name.rfind('?')  63                     file_name=file_name[:end]  64                     write_file=open(dir_name+'\\'+file_name,'wb')  65                     write_file.write(byte)  66                     write_file.close()  67                     print(count,file_name,'complete!')  68         #下一张  69         last_url=url  70         win32api.keybd_event(39,0)  71         time.sleep(1)  72         url=ie.Document.url  73         print(last_url,url)  74     #ie.Quit()  75 if __name__ == '__main__':  76     main()

2.selenium

 1 # -*- coding: cp936 -*-   2 import sys   3 import urllib   4 import time   5 import os   6 from selenium import webdriver   7 def main():   8     #获取参数   9     url=sys.argv[1]  10     #操作IE  11     driver=webdriver.Chrome()  12     driver.get(url)  13     driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")  14     #创建目录  15     dir_name=driver.find_element_by_tag_name('title').text  16     print dir_name  17     root_dir='E:\\img'  18     dir_name=root_dir+'\\'+dir_name  19     dir_name=dir_name.replace('|','-')  20     if(os.path.exists(root_dir)!=True):  21         os.mkdir(root_dir)  22     if(os.path.exists(dir_name)!=True):  23         os.mkdir(dir_name)  24     images=driver.find_elements_by_tag_name('img')  25     count=0  26     for image in images:  27         count=count+1  28         image_url=str(image.get_attribute('src'))  29         img_file=urllib.urlopen(image_url)  30         byte=img_file.read()  31         print count,'donwload complete!','-'*10,'size:',byte.__len__()/1024,'KB'  32         if(byte.__len__()>7000):  33             file_name=image_url.replace('/','_')  34             file_name=file_name.replace(':','_')  35             end=file_name.__len__()  36             if(file_name.rfind('!')!=-1):  37                 end=file_name.rfind('!')  38             if(file_name.rfind('?')!=-1):  39                 end=file_name.rfind('?')  40             file_name=file_name[:end]  41             write_file=open(dir_name+'\\'+file_name,'wb')  42             write_file.write(byte)  43             write_file.close()  44             print count,file_name,'complete!'  45     driver.quit()  46 if __name__ == '__main__':  47     main()

3.HTMLParser:

 1 # import modules used here -- sys is a very standard one   2 import sys   3 import urllib.request   4 # Gather our code in a main() function   5 from html.parser import HTMLParser   6 class MyHTMLParser(HTMLParser):   7     def handle_starttag(self,tag,attrs):   8         if(tag=='img'):   9             for attr in attrs:  10                 if(attr[0]=='src'):  11                     img_file=urllib.request.urlopen(attr[1])  12                     byte=img_file.read()  13                     #文件大于1000b则生成文件,添加计数,下载多少图片,显示html代码  14                     if(byte.__len__()>1000):  15                         file_name=attr[1].replace('/','_')  16                         file_name=file_name.replace(':','_')  17                         end=file_name.__len__()  18                         if(file_name.rfind('!')!=-1):  19                             end=file_name.rfind('!')  20                         if(file_name.rfind('?')!=-1):  21                             end=file_name.rfind('?')  22                         file_name=file_name[:end]  23 ##                        print(file_name)  24                         write_file=open('E:\\img\\'+file_name,'wb')  25                         write_file.write(byte)  26                         write_file.close()  27 def main():  28     #获取参数  29     url=sys.argv[1]  30     print('\nThe URL is:',url,'\n')  31     #读取url所指向的资源  32     html_file=urllib.request.urlopen(url)  33     byte_content=html_file.read()  34     #将html网页保存起来  35     url_file=open('E:\\img\\html\\result.htm','wb')  36     url_file.write(byte_content)  37     url_file.close()  38     #从字节转换为字符串  39     s=str(byte_content, encoding = "utf-8")  40     #print(s)  41     #bytes.decode(html_file.read())  42     parser=MyHTMLParser(strict=False)  43     parser.feed(s)  44 # Standard boilerplate to call the main() function to begin  45 # the program.  46 if __name__ == '__main__':  47     main()

来自: http://www.cnblogs.com/liu-ke/p/5092391.html