• clickhouse 配置文件


    users.xml

    <?xml version="1.0"?>
    <yandex>
        <!-- See also the files in users.d directory where the settings can be overridden. -->
    
        <!-- Profiles of settings. -->
        <profiles>
            <!-- Default settings. -->
            <default>
                <!-- Maximum memory usage for processing single query, in bytes. -->
                <max_memory_usage>10000000000</max_memory_usage>
    
                <!-- How to choose between replicas during distributed query processing.
                     random - choose random replica from set of replicas with minimum number of errors
                     nearest_hostname - from set of replicas with minimum number of errors, choose replica
                      with minimum number of different symbols between replica's hostname and local hostname
                      (Hamming distance).
                     in_order - first live replica is chosen in specified order.
                     first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
                -->
                <load_balancing>random</load_balancing>
            </default>
    
            <!-- Profile that allows only read queries. -->
            <readonly>
                <readonly>0</readonly>
            </readonly>
        </profiles>
    
        <!-- Users and ACL. -->
        <users>
            <!-- If user name was not specified, 'default' user is used. -->
            <default>
                <password_sha256_hex>5e8ffa57ce30c94e534b2813d9166f2acf8f8c8784efc91675b480021ecfc339</password_sha256_hex>
    
               
                <networks>
                    <ip>::/0</ip>
                </networks>
    
                <!-- Settings profile for user. -->
                <profile>default</profile>
    
                <!-- Quota for user. -->
                <quota>default</quota>
    
                <!-- User can create other users and grant rights to them. -->
                <access_management>1</access_management>
            </default>
            <test>
                <password_sha256_hex>5e8ffa57ce30c94e534b2813d9166f2acf8f8c8784efc91675b480021ecfc339</password_sha256_hex>
    
               
                <networks>
                    <ip>::/0</ip>
                </networks>
    
                <!-- Settings profile for user. -->
                <profile>test</profile>
    
                <!-- Quota for user. -->
                <quota>test</quota>
    
                <!-- User can create other users and grant rights to them. -->
               <access_management>1</access_management>
            </test>
        </users>
    
        <!-- Quotas. -->
        <quotas>
            <!-- Name of quota. -->
            <default>
                <!-- Limits for time interval. You could specify many intervals with different limits. -->
                <interval>
                    <!-- Length of interval. -->
                    <duration>3600</duration>
    
                    <!-- No limits. Just calculate resource usage for time interval. -->
                    <queries>0</queries>
                    <errors>0</errors>
                    <result_rows>0</result_rows>
                    <read_rows>0</read_rows>
                    <execution_time>0</execution_time>
                </interval>
            </default>
        </quotas>
    </yandex>
    

    config.xml

    <?xml version="1.0"?>
    <!--
      NOTE: User and query level settings are set up in "users.xml" file.
      If you have accidentally specified user-level settings here, server won't start.
      You can either move the settings to the right place inside "users.xml" file
       or add <skip_check_for_incorrect_settings>1</skip_check_for_incorrect_settings> here.
    -->
    <yandex>
        <logger>
            <level>warning</level>
            <log>/var/log/clickhouse-server/clickhouse-server.log</log>
            <errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
    
            <size>500M</size>
            <count>2</count>
    
        </logger>
    
    
        <http_port>8123</http_port>
    
        <tcp_port>9000</tcp_port>
    
    
        <mysql_port>9004</mysql_port>
    
    
        <postgresql_port>9005</postgresql_port>
    
     
        <interserver_http_port>9009</interserver_http_port>
    
    
        <listen_host>::</listen_host>
        <listen_host>host_ip</listen_host>
    
    
    
        <max_connections>4096</max_connections>
    
        <!-- For 'Connection: keep-alive' in HTTP 1.1 -->
        <keep_alive_timeout>3</keep_alive_timeout>
    
        <!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->
        <!-- <grpc_port>9100</grpc_port> -->
        <grpc>
            <enable_ssl>false</enable_ssl>
    
            <!-- The following two files are used only if enable_ssl=1 -->
            <ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
            <ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
    
            <!-- Whether server will request client for a certificate -->
            <ssl_require_client_auth>false</ssl_require_client_auth>
    
            <!-- The following file is used only if ssl_require_client_auth=1 -->
            <ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
    
            <!-- Default compression algorithm (applied if client doesn't specify another algorithm).
                 Supported algorithms: none, deflate, gzip, stream_gzip -->
            <compression>deflate</compression>
    
            <!-- Default compression level (applied if client doesn't specify another level).
                 Supported levels: none, low, medium, high -->
            <compression_level>medium</compression_level>
    
            <!-- Send/receive message size limits in bytes. -1 means unlimited -->
            <max_send_message_size>-1</max_send_message_size>
            <max_receive_message_size>-1</max_receive_message_size>
    
            <!-- Enable if you want very detailed logs -->
            <verbose_logs>false</verbose_logs>
        </grpc>
    
        <!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
        <openSSL>
            <server> <!-- Used for https server AND secure tcp port -->
                <!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
                <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
                <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
    
                <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
                <verificationMode>none</verificationMode>
                <loadDefaultCAFile>true</loadDefaultCAFile>
                <cacheSessions>true</cacheSessions>
                <disableProtocols>sslv2,sslv3</disableProtocols>
                <preferServerCiphers>true</preferServerCiphers>
            </server>
    
            <client> <!-- Used for connecting to https dictionary source and secured Zookeeper communication -->
                <loadDefaultCAFile>true</loadDefaultCAFile>
                <cacheSessions>true</cacheSessions>
                <disableProtocols>sslv2,sslv3</disableProtocols>
                <preferServerCiphers>true</preferServerCiphers>
                <!-- Use for self-signed: <verificationMode>none</verificationMode> -->
                <invalidCertificateHandler>
                    <!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
                    <name>RejectCertificateHandler</name>
                </invalidCertificateHandler>
            </client>
        </openSSL>
    
        <max_concurrent_queries>100</max_concurrent_queries>
    
        <max_server_memory_usage>0</max_server_memory_usage>
    
    
        <max_thread_pool_size>10000</max_thread_pool_size>
    
        <!-- On memory constrained environments you may have to set this to value larger than 1.
          -->
        <max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio>
    
        <total_memory_profiler_step>4194304</total_memory_profiler_step>
    
        <total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability>
    
        <uncompressed_cache_size>8589934592</uncompressed_cache_size>
    
        <mark_cache_size>5368709120</mark_cache_size>
    
        <mmap_cache_size>1000</mmap_cache_size>
    
        <!-- Cache size in bytes for compiled expressions.-->
        <compiled_expression_cache_size>134217728</compiled_expression_cache_size>
    
        <!-- Cache size in elements for compiled expressions.-->
        <compiled_expression_cache_elements_size>10000</compiled_expression_cache_elements_size>
    
        <!-- Path to data directory, with trailing slash. -->
        <path>/data/clickhouse/</path>
    
        <!-- Path to temporary data for processing hard queries. -->
        <tmp_path>/data/clickhouse/tmp/</tmp_path>
    
        <user_files_path>/data/clickhouse/user_files/</user_files_path>
    
        <!-- LDAP server definitions. -->
        <ldap_servers>
    
        </ldap_servers>
    
        <user_directories>
            <users_xml>
                <!-- Path to configuration file with predefined users. -->
                <path>users.xml</path>
            </users_xml>
            <local_directory>
                <!-- Path to folder where users created by SQL commands are stored. -->
                <path>/data/clickhouse/access/</path>
            </local_directory>
    
        </user_directories>
    
        <!-- Default profile of settings. -->
        <default_profile>default</default_profile>
    
        <!-- Comma-separated list of prefixes for user-defined settings. -->
        <custom_settings_prefixes></custom_settings_prefixes>
    
        <default_database>default</default_database>
    
    	<timezone>Asia/Shanghai</timezone>
    
        <mlock_executable>true</mlock_executable>
    
        <!-- Reallocate memory for machine code ("text") using huge pages. Highly experimental. -->
        <remap_executable>false</remap_executable>
    
        <remote_servers>
            <!-- Test only shard config for testing distributed storage -->
            <cluster_5_shards_2_replica>
    	     <shard>
                     <internal_replication>true</internal_replication>
                     <replica>
                         <host>192.168.10.101</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                     <replica>
                         <host>192.168.10.102</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                 </shard>
                 <shard>
                     <internal_replication>true</internal_replication>
                     <replica>
                         <host>192.168.10.103</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                     <replica>
                         <host>192.168.10.104</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                 </shard>
                 <shard>
                     <internal_replication>true</internal_replication>
                     <replica>
                         <host>192.168.10.105</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                     <replica>
                         <host>192.168.10.106</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                 </shard>
                     <shard>
                     <internal_replication>true</internal_replication>
                     <replica>
                         <host>192.168.10.107</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                     <replica>
                         <host>192.168.10.108</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                 </shard>
                 <shard>
                     <internal_replication>true</internal_replication>
                     <replica>
                         <host>192.168.10.109</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                     <replica>
                         <host>192.168.10.110</host>
                         <port>9000</port>
    		     <user>default</user>
                         <password>123456</password>
                     </replica>
                 </shard>
            </cluster_5_shards_2_replica>
        </remote_servers>
    
        <zookeeper>
            <node>
                <host>192.168.10.101</host>
                <port>2181</port>
            </node>
            <node>
                <host>192.168.10.102</host>
                <port>2181</port>
            </node>
            <node>
                <host>192.168.10.103</host>
                <port>2181</port>
            </node>
        </zookeeper>
    
    
    
        <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
        <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
    
    
        <!-- Maximum session timeout, in seconds. Default: 3600. -->
        <max_session_timeout>3600</max_session_timeout>
    
        <!-- Default session timeout, in seconds. Default: 60. -->
        <default_session_timeout>60</default_session_timeout>
    
        <query_log>
    
            <database>system</database>
            <table>query_log</table>
    
            <partition_by>toYYYYMM(event_date)</partition_by>
    
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </query_log>
    
        <trace_log>
            <database>system</database>
            <table>trace_log</table>
    
            <partition_by>toYYYYMM(event_date)</partition_by>
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </trace_log>
    
        <!-- Query thread log. Has information about all threads participated in query execution.
             Used only for queries with setting log_query_threads = 1. -->
        <query_thread_log>
            <database>system</database>
            <table>query_thread_log</table>
            <partition_by>toYYYYMM(event_date)</partition_by>
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </query_thread_log>
    
        <!-- Query views log. Has information about all dependent views associated with a query.
             Used only for queries with setting log_query_views = 1. -->
        <query_views_log>
            <database>system</database>
            <table>query_views_log</table>
            <partition_by>toYYYYMM(event_date)</partition_by>
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </query_views_log>
    
        <!-- Uncomment if use part log.
             Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).-->
        <part_log>
            <database>system</database>
            <table>part_log</table>
            <partition_by>toYYYYMM(event_date)</partition_by>
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </part_log>
    
        <metric_log>
            <database>system</database>
            <table>metric_log</table>
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
            <collect_interval_milliseconds>1000</collect_interval_milliseconds>
        </metric_log>
    
        <asynchronous_metric_log>
            <database>system</database>
            <table>asynchronous_metric_log</table>
    
            <flush_interval_milliseconds>7000</flush_interval_milliseconds>
        </asynchronous_metric_log>
    
        <opentelemetry_span_log>
    
            <engine>
                engine MergeTree
                partition by toYYYYMM(finish_date)
                order by (finish_date, finish_time_us, trace_id)
            </engine>
            <database>system</database>
            <table>opentelemetry_span_log</table>
            <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </opentelemetry_span_log>
    
    
        <!-- Crash log. Stores stack traces for fatal errors.
             This table is normally empty. -->
        <crash_log>
            <database>system</database>
            <table>crash_log</table>
    
            <partition_by />
            <flush_interval_milliseconds>1000</flush_interval_milliseconds>
        </crash_log>
    
        <top_level_domains_lists>
    
        </top_level_domains_lists>
    
        <dictionaries_config>*_dictionary.xml</dictionaries_config>
    
        <encryption>
            
        </encryption>
    
        <distributed_ddl>
            <!-- Path in ZooKeeper to queue with DDL queries -->
            <path>/clickhouse/task_queue/ddl</path>
    
        </distributed_ddl>
    
        <graphite_rollup_example>
            <pattern>
                <regexp>click_cost</regexp>
                <function>any</function>
                <retention>
                    <age>0</age>
                    <precision>3600</precision>
                </retention>
                <retention>
                    <age>86400</age>
                    <precision>60</precision>
                </retention>
            </pattern>
            <default>
                <function>max</function>
                <retention>
                    <age>0</age>
                    <precision>60</precision>
                </retention>
                <retention>
                    <age>3600</age>
                    <precision>300</precision>
                </retention>
                <retention>
                    <age>86400</age>
                    <precision>3600</precision>
                </retention>
            </default>
        </graphite_rollup_example>
    
        <format_schema_path>/data/clickhouse/format_schemas/</format_schema_path>
    
        <query_masking_rules>
            <rule>
                <name>hide encrypt/decrypt arguments</name>
                <regexp>((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)</regexp>
                <!-- or more secure, but also more invasive:
                    (aes_\w+)\s*\(.*\)
                -->
                <replace>\1(???)</replace>
            </rule>
        </query_masking_rules>
    
        <send_crash_reports>
    
            <enabled>false</enabled>
            <!-- Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report -->
            <anonymize>false</anonymize>
            <!-- Default endpoint should be changed to different Sentry DSN only if you have -->
            <!-- some in-house engineers or hired consultants who're going to debug ClickHouse issues for you -->
            <endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint>
        </send_crash_reports>
    
        <macros>
            <shard>sdnum</shard>
            <replica>rpnum</replica>
        </macros>
    	
    </yandex>
    
    
  • 相关阅读:
    都说学编程容易秃头,程序员到底有多忙?看看这四位网友的回答
    给你的C/C++学习路线建议
    使C语言实现面向对象的三个要素,你掌握了吗?
    digital-image-processing-image-segmentation
    TensorFlow Image Segmentation: Two Quick Tutorials
    Image Segmentation
    Links for tensorflow-gpu
    Remove image background and shadows
    How to do Semantic Segmentation using Deep learning
    TensorFlow Image Segmentation: Two Quick Tutorials
  • 原文地址:https://www.cnblogs.com/whiteY/p/16636914.html
Copyright © 2020-2023  润新知