• Elasticsearch索引自动删除


    简介

    脚本分2部分,1部分查找符合条件的索引名,2脚本调用1脚本,进行删除操作
    

    脚本

    查找符合条件的,默认大于30天

    # coding:utf-8
    
    __author__ = 'Jipu FANG'
    
    from elasticsearch import  Elasticsearch
    import re
    import time
    import datetime
    
    now = time.localtime()
    data1 = datetime.datetime(now[0], now[1], now[2])
    
    es=Elasticsearch("http://192.168.30.135:9200")
    
    
    res = es.cat.indices()
    
    l = res.strip().split()
    def dindex(day=30):
        index = []
        for i in l:
            if re.search('d+.d+.d+$', i):
                itime = time.strptime(re.findall('d+.d+.d+$', i)[0], "%Y.%m.%d")
                data2 = datetime.datetime(itime[0], itime[1], itime[2])
                d = (data1-data2).days
                if int(d) > int(day):
                    index.append(i)
        return index
    
    if __name__ == '__main__':
        print dindex()
    
    

    对符合条件的索引,进行删除操作

    # coding:utf-8
    
    __author__ = 'Jipu FANG'
    
    import requests
    import json
    import time
    from multiprocessing.dummy import Pool as ThreadPool
    import  re
    import indexs
    
    '''
    delect index    url:"http://192.168.30.135:9200/app-2017.05.16"  headers:'Content-Type: application/json' data:{"query": {"match_all":{}}}'
    select log  curl: "http://192.168.30.135:9200/_search"  headers:'Content-Type: application/json' data:{"query": {"match": {"message": {"query": "ERROR|77" }}}'
    '''
    
    # request API
    class ES_API:
        def __init__(self, url, data, headers):
            self.url=url
            self.data=data
            self.headers=headers
    
        def delete(self):
            r = requests.delete(url=self.url, data=json.dumps(self.data), headers=self.headers)
            v=r.text
            print(v)
    
        def post(self):
            r = requests.post(url=self.url, data=json.dumps(self.data), headers=self.headers)
            v=r.text
            print(v)
    
    # 删除索引,day保留多少天
    def delete_index(day):
        for i in indexs.dindex(day):
            url = r"http://192.168.30.135:9200/%s" %(i)
            headers = {'Content-Type':'application/json'}
            data = {"query": {"match_all":{}}}
            C=ES_API(url, data, headers)
            C.delete()
            time.sleep(3)
        return "Delete indexs OK!"
    
    # 关闭索引,day保留多少天,当索引处于关闭状态,资源占用比较少
    def close_index(day):
        for i in indexs.dindex(day):
            url = r"http://192.168.30.135:9200/%s/_close?pretty" %(i)
            headers = {'Content-Type':'application/json'}
            data = {}
            C=ES_API(url, data, headers)
            C.post()
            time.sleep(3)
        return "index status close ok!"
    
    
    delete_index(30)
    time.sleep(60)
    close_index(15)
    
  • 相关阅读:
    开发一个cube.js cratedb driver
    dremio 时间操作函数
    authelia web sso 解决方案
    dremio sql server 出现无法绑定由多个部分组成的标识符的解决方法
    cratedb 4.5 企业特性可以免费使用了
    cube.js 新版本cubestore 禁用
    spring-native 编译spring 应用为graalvm native 镜像
    streamsets tar 模式启动
    streamset data collector 新注册机制
    Swarm 集群管理
  • 原文地址:https://www.cnblogs.com/GXLo/p/7405631.html
Copyright © 2020-2023  润新知