反爬虫

一般来说我们会遇到网站反爬虫策略下面几点:

限制IP访问频率,超过频率就断开连接。(这种方法解决办法就是,降低爬虫的速度在每个请求前面加上time.sleep;或者不停的更换代理IP,这样就绕过反爬虫机制啦!)
后台对访问进行统计,如果单个userAgent访问超过阈值,予以封锁。(效果出奇的棒!不过误伤也超级大,一般站点不会使用,不过我们也考虑进去
还有针对于cookies的 (这个解决办法更简单,一般网站不会用)

判断是否使用了代理

import requests
import re
import random

class download:

def __init__(self):

    self.iplist = []  ##初始化一个list用来存放我们获取到的IP
    html = requests.get("http://haoip.cc/tiqu.htm")  ##不解释咯
    iplistn = re.findall(r'r/>(.*?)<b', html.text, re.S)  ##表示从html.text中获取所有r/><b中的内容,re.S的意思是包括匹配包括换行符,findall返回的是个list哦!
    for ip in iplistn:
        i = re.sub('\n', '', ip)  ##re.sub 是re模块替换的方法,这儿表示将\n替换为空
        self.iplist.append(i.strip())  ##添加到我们上面初始化的list里面

    self.user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

def get(self, url, proxy=None): ##给函数一个默认参数proxy为空
    UA = random.choice(self.user_agent_list) ##从self.user_agent_list中随机取出一个字符串
    headers = {'User-Agent': UA}  ##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

    if proxy == None: ##当代理为空时,不使用代理获取response(别忘了response啥哦!之前说过了!!)
        response = requests.get(url, headers=headers)##这样服务器就会以为我们是真的浏览器了
        return response ##返回response

    else: ##当代理不为空
        IP = ''.join(str(random.choice(self.iplist)).strip()) ##将从self.iplist中获取的字符串处理成我们需要的格式(处理了些,什么自己看哦,这是基础呢)
        proxy = {'http': IP} ##构造成一个代理
        response = requests.get(url, headers=headers, proxies=proxy) ##使用代理获取response
        return response

Xz = download() ##实例化
print(Xz.get(“mzitu.com”).headers) ##打印headers

需要使用代理,而且还得规定一下多少次切换成代理爬取,多少次取消代理啊!

import requests
import re
import random
import time

class download:

def __init__(self):

    self.iplist = []  ##初始化一个list用来存放我们获取到的IP
    html = requests.get("http://haoip.cc/tiqu.htm")  ##不解释咯
    iplistn = re.findall(r'r/>(.*?)<b', html.text, re.S)  ##表示从html.text中获取所有r/><b中的内容,re.S的意思是包括匹配包括换行符,findall返回的是个list哦!
    for ip in iplistn:
        i = re.sub('\n', '', ip)  ##re.sub 是re模块替换的方法,这儿表示将\n替换为空
        self.iplist.append(i.strip())  ##添加到我们上面初始化的list里面

    self.user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

def get(self, url, timeout, proxy=None, num_retries=6): ##给函数一个默认参数proxy为空
    UA = random.choice(self.user_agent_list) ##从self.user_agent_list中随机取出一个字符串
    headers = {'User-Agent': UA}  ##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

    if proxy == None: ##当代理为空时,不使用代理获取response(别忘了response啥哦!之前说过了!!)
        try:
            return requests.get(url, headers=headers, timeout=timeout)##这样服务器就会以为我们是真的浏览器了
        except:##如过上面的代码执行报错则执行下面的代码
            if num_retries > 0: ##num_retries是我们限定的重试次数
                time.sleep(10) ##延迟十秒
                print(u'获取网页出错,10S后将获取倒数第:', num_retries, u'次')
                return self.get(url, timeout, num_retries-1)  ##调用自身 并将次数减1
            else:
                print(u'开始使用代理')
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip()) ##下面有解释哦
                proxy = {'http': IP}
                return self.get(url, timeout, proxy,) ##代理不为空的时候

    else: ##当代理不为空
        IP = ''.join(str(random.choice(self.iplist)).strip()) ##将从self.iplist中获取的字符串处理成我们需要的格式(处理了些什么自己看哦,这是基础呢)
        proxy = {'http': IP} ##构造成一个代理
        return requests.get(url, headers=headers, proxies=proxy, timeout = timeout) ##使用代理获取response

Xz = download() ##实例化
print(Xz.get(“mzitu.com”, 3)) ##打印headers

使用代理失败6次后,取消代理

import requests
import re
import random
import time

class download:

def __init__(self):

    self.iplist = []  ##初始化一个list用来存放我们获取到的IP
    html = requests.get("http://haoip.cc/tiqu.htm")  ##不解释咯
    iplistn = re.findall(r'r/>(.*?)<b', html.text, re.S)  ##表示从html.text中获取所有r/><b中的内容,re.S的意思是包括匹配包括换行符,findall返回的是个list哦!
    for ip in iplistn:
        i = re.sub('\n', '', ip)  ##re.sub 是re模块替换的方法,这儿表示将\n替换为空
        self.iplist.append(i.strip())  ##添加到我们上面初始化的list里面

    self.user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

def get(self, url, timeout, proxy=None, num_retries=6): ##给函数一个默认参数proxy为空
    print(u'开始获取:', url)
    UA = random.choice(self.user_agent_list) ##从self.user_agent_list中随机取出一个字符串
    headers = {'User-Agent': UA}  ##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

    if proxy == None: ##当代理为空时,不使用代理获取response(别忘了response啥哦!之前说过了!!)
        try:
            return requests.get(url, headers=headers, timeout=timeout)##这样服务器就会以为我们是真的浏览器了
        except:##如过上面的代码执行报错则执行下面的代码

            if num_retries > 0: ##num_retries是我们限定的重试次数
                time.sleep(10) ##延迟十秒
                print(u'获取网页出错,10S后将获取倒数第:', num_retries, u'次')
                return self.get(url, timeout, num_retries-1)  ##调用自身 并将次数减1
            else:
                print(u'开始使用代理')
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip()) ##下面有解释哦
                proxy = {'http': IP}
                return self.get(url, timeout, proxy,) ##代理不为空的时候

    else: ##当代理不为空
        try:
            IP = ''.join(str(random.choice(self.iplist)).strip()) ##将从self.iplist中获取的字符串处理成我们需要的格式(处理了些什么自己看哦,这是基础呢)
            proxy = {'http': IP} ##构造成一个代理
            return requests.get(url, headers=headers, proxies=proxy, timeout=timeout) ##使用代理获取response
        except:

            if num_retries > 0:
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip())
                proxy = {'http': IP}
                print(u'正在更换代理,10S后将重新获取倒数第', num_retries, u'次')
                print(u'当前代理是:', proxy)
                return self.get(url, timeout, proxy, num_retries - 1)
            else:
                print(u'代理也不好使了!取消代理')
                return self.get(url, 3)
完整代码

from bs4 import BeautifulSoup
import os
from Download import download

class mzitu(download):

def all_url(self, url):
    html = download.get(self, url, 3) ##这儿替换了,并加上timeout参数
    all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a')
    for a in all_a:
        title = a.get_text()
        print(u'开始保存:', title)
        path = str(title).replace("?", '_')
        self.mkdir(path)
        os.chdir("D:\mzitu\\"+path)
        href = a['href']
        self.html(href)

def html(self, href):
    html = download.get(self, href, 3)
    max_span = BeautifulSoup(html.text, 'lxml').find_all('span')[10].get_text()
    for page in range(1, int(max_span) + 1):
        page_url = href + '/' + str(page)
        self.img(page_url)

def img(self, page_url):
    img_html = download.get(self, page_url, 3) ##这儿替换了
    img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']
    self.save(img_url)

def save(self, img_url):
    name = img_url[-9:-4]
    print(u'开始保存:', img_url)
    img = download.get(self, img_url, 3) ##这儿替换了,并加上timeout参数
    f = open(name + '.jpg', 'ab')
    f.write(img.content)
    f.close()

def mkdir(self, path): ##这个函数创建文件夹
    path = path.strip()
    isExists = os.path.exists(os.path.join("D:\mzitu", path))
    if not isExists:
        print(u'建了一个名字叫做', path, u'的文件夹!')
        os.makedirs(os.path.join("D:\mzitu", path))
        return True
    else:
        print(u'名字叫做', path, u'的文件夹已经存在了!')
        return False

Mzitu = mzitu() ##实例化
Mzitu.all_url(‘http://www.mzitu.com/all‘) ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)
好了!搞完收工!大家可以看一下和上一次我们写的爬虫有哪些变化就知道我们做了什么啦!

2016/11/4 更新:今天做教程的时候发现我忽略了一个问题,上面的写法,属于子类继承父类,这种写法 子类没法用init;所以我改了一下写法,(其余都没变,不用担心。)直接贴代码了:

首先是下载模块(Download.py):

Python

import requests
import re
import random
import time

class download():

def __init__(self):

    self.iplist = []  ##初始化一个list用来存放我们获取到的IP
    html = requests.get("http://haoip.cc/tiqu.htm")  ##不解释咯
    iplistn = re.findall(r'r/>(.*?)<b', html.text, re.S)  ##表示从html.text中获取所有r/><b中的内容,re.S的意思是包括匹配包括换行符,findall返回的是个list哦!
    for ip in iplistn:
        i = re.sub('\n', '', ip)  ##re.sub 是re模块替换的方法,这儿表示将\n替换为空
        self.iplist.append(i.strip())  ##添加到我们上面初始化的list里面

    self.user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

def get(self, url, timeout, proxy=None, num_retries=6): ##给函数一个默认参数proxy为空
    UA = random.choice(self.user_agent_list) ##从self.user_agent_list中随机取出一个字符串
    headers = {'User-Agent': UA}  ##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

    if proxy == None: ##当代理为空时,不使用代理获取response(别忘了response啥哦!之前说过了!!)
        try:
            return requests.get(url, headers=headers, timeout=timeout)##这样服务器就会以为我们是真的浏览器了
        except:##如过上面的代码执行报错则执行下面的代码

            if num_retries > 0: ##num_retries是我们限定的重试次数
                time.sleep(10) ##延迟十秒
                print(u'获取网页出错,10S后将获取倒数第:', num_retries, u'次')
                return self.get(url, timeout, num_retries-1)  ##调用自身 并将次数减1
            else:
                print(u'开始使用代理')
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip()) ##下面有解释哦
                proxy = {'http': IP}
                return self.get(url, timeout, proxy,) ##代理不为空的时候

    else: ##当代理不为空
        try:
            IP = ''.join(str(random.choice(self.iplist)).strip()) ##将从self.iplist中获取的字符串处理成我们需要的格式(处理了些什么自己看哦,这是基础呢)
            proxy = {'http': IP} ##构造成一个代理
            return requests.get(url, headers=headers, proxies=proxy, timeout=timeout) ##使用代理获取response
        except:

            if num_retries > 0:
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip())
                proxy = {'http': IP}
                print(u'正在更换代理,10S后将重新获取倒数第', num_retries, u'次')
                print(u'当前代理是:', proxy)
                return self.get(url, timeout, proxy, num_retries - 1)
            else:
                print(u'代理也不好使了!取消代理')
                return self.get(url, 3)

request = download() ##

import requests
import re
import random
import time

class download():

def __init__(self):

    self.iplist = []  ##初始化一个list用来存放我们获取到的IP
    html = requests.get("http://haoip.cc/tiqu.htm")  ##不解释咯
    iplistn = re.findall(r'r/>(.*?)<b', html.text, re.S)  ##表示从html.text中获取所有r/><b中的内容,re.S的意思是包括匹配包括换行符,findall返回的是个list哦!
    for ip in iplistn:
        i = re.sub('\n', '', ip)  ##re.sub 是re模块替换的方法,这儿表示将\n替换为空
        self.iplist.append(i.strip())  ##添加到我们上面初始化的list里面

    self.user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

def get(self, url, timeout, proxy=None, num_retries=6): ##给函数一个默认参数proxy为空
    UA = random.choice(self.user_agent_list) ##从self.user_agent_list中随机取出一个字符串
    headers = {'User-Agent': UA}  ##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

    if proxy == None: ##当代理为空时,不使用代理获取response(别忘了response啥哦!之前说过了!!)
        try:
            return requests.get(url, headers=headers, timeout=timeout)##这样服务器就会以为我们是真的浏览器了
        except:##如过上面的代码执行报错则执行下面的代码

            if num_retries > 0: ##num_retries是我们限定的重试次数
                time.sleep(10) ##延迟十秒
                print(u'获取网页出错,10S后将获取倒数第:', num_retries, u'次')
                return self.get(url, timeout, num_retries-1)  ##调用自身 并将次数减1
            else:
                print(u'开始使用代理')
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip()) ##下面有解释哦
                proxy = {'http': IP}
                return self.get(url, timeout, proxy,) ##代理不为空的时候

    else: ##当代理不为空
        try:
            IP = ''.join(str(random.choice(self.iplist)).strip()) ##将从self.iplist中获取的字符串处理成我们需要的格式(处理了些什么自己看哦,这是基础呢)
            proxy = {'http': IP} ##构造成一个代理
            return requests.get(url, headers=headers, proxies=proxy, timeout=timeout) ##使用代理获取response
        except:

            if num_retries > 0:
                time.sleep(10)
                IP = ''.join(str(random.choice(self.iplist)).strip())
                proxy = {'http': IP}
                print(u'正在更换代理,10S后将重新获取倒数第', num_retries, u'次')
                print(u'当前代理是:', proxy)
                return self.get(url, timeout, proxy, num_retries - 1)
            else:
                print(u'代理也不好使了!取消代理')
                return self.get(url, 3)

request = download() ##
这个模块就多了 request = download()

第二个(def mzitu.py):

Python

from bs4 import BeautifulSoup
import os
from Download import request ##导入模块变了一下
from pymongo import MongoClient

class mzitu():

def all_url(self, url):

    html = request.get(url, 3) ##这儿更改了一下(是不是发现  self 没见了?)
    all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a')
    for a in all_a:
        title = a.get_text()
        print(u'开始保存:', title)
        path = str(title).replace("?", '_')
        self.mkdir(path)
        os.chdir("D:\mzitu\\"+path)
        href = a['href']
        self.html(href)

def html(self, href):
    html = request.get(href, 3)##这儿更改了一下(是不是发现  self 没见了?)
    max_span = BeautifulSoup(html.text, 'lxml').find_all('span')[10].get_text()
    for page in range(1, int(max_span) + 1):
        page_url = href + '/' + str(page)
        self.img(page_url)

def img(self, page_url):
    img_html = request.get(page_url, 3) ##这儿更改了一下(是不是发现  self 没见了?)
    img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']
    self.save(img_url)

def save(self, img_url):
    name = img_url[-9:-4]
    print(u'开始保存:', img_url)
    img = request.get(img_url, 3) ##这儿更改了一下(是不是发现  self 没见了?)
    f = open(name + '.jpg', 'ab')
    f.write(img.content)
    f.close()

def mkdir(self, path): 
    path = path.strip()
    isExists = os.path.exists(os.path.join("D:\mzitu", path))
    if not isExists:
        print(u'建了一个名字叫做', path, u'的文件夹!')
        os.makedirs(os.path.join("D:\mzitu", path))
        return True
    else:
        print(u'名字叫做', path, u'的文件夹已经存在了!')
        return False

Mzitu = mzitu() ##实例化
Mzitu.all_url(‘http://www.mzitu.com/all‘) ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)

from bs4 import BeautifulSoup
import os
from Download import request ##导入模块变了一下
from pymongo import MongoClient

class mzitu():

def all_url(self, url):

    html = request.get(url, 3) ##这儿更改了一下(是不是发现  self 没见了?)
    all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a')
    for a in all_a:
        title = a.get_text()
        print(u'开始保存:', title)
        path = str(title).replace("?", '_')
        self.mkdir(path)
        os.chdir("D:\mzitu\\"+path)
        href = a['href']
        self.html(href)

def html(self, href):
    html = request.get(href, 3)##这儿更改了一下(是不是发现  self 没见了?)
    max_span = BeautifulSoup(html.text, 'lxml').find_all('span')[10].get_text()
    for page in range(1, int(max_span) + 1):
        page_url = href + '/' + str(page)
        self.img(page_url)

def img(self, page_url):
    img_html = request.get(page_url, 3) ##这儿更改了一下(是不是发现  self 没见了?)
    img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']
    self.save(img_url)

def save(self, img_url):
    name = img_url[-9:-4]
    print(u'开始保存:', img_url)
    img = request.get(img_url, 3) ##这儿更改了一下(是不是发现  self 没见了?)
    f = open(name + '.jpg', 'ab')
    f.write(img.content)
    f.close()

def mkdir(self, path): 
    path = path.strip()
    isExists = os.path.exists(os.path.join("D:\mzitu", path))
    if not isExists:
        print(u'建了一个名字叫做', path, u'的文件夹!')
        os.makedirs(os.path.join("D:\mzitu", path))
        return True
    else:
        print(u'名字叫做', path, u'的文件夹已经存在了!')
        return False

Mzitu = mzitu() ##实例化
Mzitu.all_url(‘http://www.mzitu.com/all‘) ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)

学习链接

https://cuiqingcai.com/3256.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值