• glusterfs 内存管理方式


    glusterfs中的内存管理方式:

      首先来看看glusterfs的内存管理结构吧:

     1 struct mem_pool {
     2         struct list_head  list;
     3         int               hot_count;
     4         int               cold_count;
     5         gf_lock_t         lock;
     6         unsigned long     padded_sizeof_type;
     7         void             *pool;
     8         void             *pool_end;
     9         int               real_sizeof_type;
    10         uint64_t          alloc_count;
    11         uint64_t          pool_misses;
    12         int               max_alloc;
    13         int               curr_stdalloc;
    14         int               max_stdalloc;
    15         char             *name;
    16         struct list_head  global_list;
    17 };

    管理结构的信息量很简单,核心的数据项是list,每个要分配的内存块被一个双向链表串连起来管理。

      接下来是创建内存池的接口:

     1 struct mem_pool *
     2 mem_pool_new_fn (unsigned long sizeof_type,
     3                  unsigned long count, char *name)
     4 {
     5         struct mem_pool  *mem_pool = NULL;
     6         unsigned long     padded_sizeof_type = 0;
     7         void             *pool = NULL;
     8         int               i = 0;
     9         int               ret = 0;
    10         struct list_head *list = NULL;
    11         glusterfs_ctx_t  *ctx = NULL;
    12 
    13         if (!sizeof_type || !count) {
    14                 gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
    15                 return NULL;
    16         }
    17         padded_sizeof_type = sizeof_type + GF_MEM_POOL_PAD_BOUNDARY;
    18 
    19         mem_pool = GF_CALLOC (sizeof (*mem_pool), 1, gf_common_mt_mem_pool);
    20         if (!mem_pool)
    21                 return NULL;
    22 
    23         ret = gf_asprintf (&mem_pool->name, "%s:%s", THIS->name, name);
    24         if (ret < 0)
    25                 return NULL;
    26 
    27         if (!mem_pool->name) {
    28                 GF_FREE (mem_pool);
    29                 return NULL;
    30         }
    31 
    32         LOCK_INIT (&mem_pool->lock);
    33         INIT_LIST_HEAD (&mem_pool->list);
    34         INIT_LIST_HEAD (&mem_pool->global_list);
    35 
    36         mem_pool->padded_sizeof_type = padded_sizeof_type;
    37         mem_pool->cold_count = count;
    38         mem_pool->real_sizeof_type = sizeof_type;
    39 
    40         pool = GF_CALLOC (count, padded_sizeof_type, gf_common_mt_long);
    41         if (!pool) {
    42                 GF_FREE (mem_pool->name);
    43                 GF_FREE (mem_pool);
    44                 return NULL;
    45         }
    46 
    47         for (i = 0; i < count; i++) {
    48                 list = pool + (i * (padded_sizeof_type));
    49                 INIT_LIST_HEAD (list);
    50                 list_add_tail (list, &mem_pool->list);
    51         }
    52 
    53         mem_pool->pool = pool;
    54         mem_pool->pool_end = pool + (count * (padded_sizeof_type));
    55 
    56         /* add this pool to the global list */
    57         ctx = THIS->ctx;
    58         if (!ctx)
    59                 goto out;
    60 
    61         list_add (&mem_pool->global_list, &ctx->mempool_list);
    62 
    63 out:
    64         return mem_pool;
    65 }

    在第19行中申请了一个mem_pool内存管理结构,在初始化这个结构体后,40行申请了真正要使用的内存pool并把用mem_pool->list链表串起来。之后再记录内存池的开始和结束地址(53-54),再把这个结构加入全局管理。

    再看一下申请后的内存是如何使用的呢?

     1 void *
     2 mem_get (struct mem_pool *mem_pool)
     3 {
     4         struct list_head *list = NULL;
     5         void             *ptr = NULL;
     6         int             *in_use = NULL;
     7         struct mem_pool **pool_ptr = NULL;
     8 
     9         if (!mem_pool) {
    10                 gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
    11                 return NULL;
    12         }
    13 
    14         LOCK (&mem_pool->lock);
    15         {
    16                 mem_pool->alloc_count++;
    17                 if (mem_pool->cold_count) {
    18                         list = mem_pool->list.next;
    19                         list_del (list);
    20 
    21                         mem_pool->hot_count++;
    22                         mem_pool->cold_count--;
    23 
    24                         if (mem_pool->max_alloc < mem_pool->hot_count)
    25                                 mem_pool->max_alloc = mem_pool->hot_count;
    26 
    27                         ptr = list;
    28                         in_use = (ptr + GF_MEM_POOL_LIST_BOUNDARY +
    29                                   GF_MEM_POOL_PTR);
    30                         *in_use = 1;
    31 
    32                         goto fwd_addr_out;
    33                 }
    34 
    35                 /* This is a problem area. If we've run out of
    36                  * chunks in our slab above, we need to allocate
    37                  * enough memory to service this request.
    38                  * The problem is, these individual chunks will fail
    39                  * the first address range check in __is_member. Now, since
    40                  * we're not allocating a full second slab, we wont have
    41                  * enough info perform the range check in __is_member.
    42                  *
    43                  * I am working around this by performing a regular allocation
    44                  * , just the way the caller would've done when not using the
    45                  * mem-pool. That also means, we're not padding the size with
    46                  * the list_head structure because, this will not be added to
    47                  * the list of chunks that belong to the mem-pool allocated
    48                  * initially.
    49                  *
    50                  * This is the best we can do without adding functionality for
    51                  * managing multiple slabs. That does not interest us at present
    52                  * because it is too much work knowing that a better slab
    53                  * allocator is coming RSN.
    54                  */
    55                 mem_pool->pool_misses++;
    56                 mem_pool->curr_stdalloc++;
    57                 if (mem_pool->max_stdalloc < mem_pool->curr_stdalloc)
    58                         mem_pool->max_stdalloc = mem_pool->curr_stdalloc;
    59                 ptr = GF_CALLOC (1, mem_pool->padded_sizeof_type,
    60                                  gf_common_mt_mem_pool);
    61                 gf_log_callingfn ("mem-pool", GF_LOG_DEBUG, "Mem pool is full. "
    62                                   "Callocing mem");
    63 
    64                 /* Memory coming from the heap need not be transformed from a
    65                  * chunkhead to a usable pointer since it is not coming from
    66                  * the pool.
    67                  */
    68         }
    69 fwd_addr_out:
    70         pool_ptr = mem_pool_from_ptr (ptr);
    71         *pool_ptr = (struct mem_pool *)mem_pool; //保存分配者地址
    72         ptr = mem_pool_chunkhead2ptr (ptr);
    73         UNLOCK (&mem_pool->lock);
    74 
    75         return ptr;
    76 }

    从17行到33行可以看出,当需要内存时,glusterfs从mem_pool->list中分配内存。关键是:当内存不足时,mem_pool如何处理呢?55-63行处理这个问题:当内存不足时,它向系统申请了内存,并处理了内存的管理信息后,直接将内存返回给调用者。

    最后看看内存的释放过程:

     1 void
     2 mem_put (void *ptr)
     3 {
     4         struct list_head *list = NULL;
     5         int    *in_use = NULL;
     6         void   *head = NULL;
     7         struct mem_pool **tmp = NULL;
     8         struct mem_pool *pool = NULL;
     9 
    10         if (!ptr) {
    11                 gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
    12                 return;
    13         }
    14 
    15         list = head = mem_pool_ptr2chunkhead (ptr);
    16         tmp = mem_pool_from_ptr (head); //取出分配者地址
    17         if (!tmp) {
    18                 gf_log_callingfn ("mem-pool", GF_LOG_ERROR,
    19                                   "ptr header is corrupted");
    20                 return;
    21         }
    22 
    23         pool = *tmp;
    24         if (!pool) {
    25                 gf_log_callingfn ("mem-pool", GF_LOG_ERROR,
    26                                   "mem-pool ptr is NULL");
    27                 return;
    28         }
    29         LOCK (&pool->lock);
    30         {
    31 
    32                 switch (__is_member (pool, ptr))
    33                 {
    34                 case 1:
    35                         in_use = (head + GF_MEM_POOL_LIST_BOUNDARY +
    36                                   GF_MEM_POOL_PTR);
    37                         if (!is_mem_chunk_in_use(in_use)) {
    38                                 gf_log_callingfn ("mem-pool", GF_LOG_CRITICAL,
    39                                                   "mem_put called on freed ptr %p of mem "
    40                                                   "pool %p", ptr, pool);
    41                                 break;
    42                         }
    43                         pool->hot_count--;
    44                         pool->cold_count++;
    45                         *in_use = 0;
    46                         list_add (list, &pool->list);
    47                         break;
    48                 case -1:
    49                         /* For some reason, the address given is within
    50                          * the address range of the mem-pool but does not align
    51                          * with the expected start of a chunk that includes
    52                          * the list headers also. Sounds like a problem in
    53                          * layers of clouds up above us. ;)
    54                          */
    55                         abort ();
    56                         break;
    57                 case 0:
    58                         /* The address is outside the range of the mem-pool. We
    59                          * assume here that this address was allocated at a
    60                          * point when the mem-pool was out of chunks in mem_get
    61                          * or the programmer has made a mistake by calling the
    62                          * wrong de-allocation interface. We do
    63                          * not have enough info to distinguish between the two
    64                          * situations.
    65                          */
    66                         pool->curr_stdalloc--;
    67                         GF_FREE (list);
    68                         break;
    69                 default:
    70                         /* log error */
    71                         break;
    72                 }
    73         }
    74         UNLOCK (&pool->lock);
    75 }

    在switch语句中,在case 1中处理了内存池分配的过程。在case 0中处理内存不足的情况,从这里看出,glusterfs直接将内存释放了,正好与分配的过程完美的结合。



  • 相关阅读:
    初学者对Python的认知
    html5中table标签的延伸
    表格和表单
    html5行级标签
    块级标签
    分布式NoSQL数据库Cassandra集群搭建
    Grafana 邮箱报警测试之Failed to send alert notifications问题解决
    Elasticsearch启动之Exception in thread "main" java.nio.file.AccessDeniedException问题解决
    Mysql 主从同步之Got fatal error 1236 from master when reading data from binary log: 'Could not find first log file name in binary log index file'问题解决
    Filebeat (7.3.2)启动脚本
  • 原文地址:https://www.cnblogs.com/Richard-chen/p/3830634.html
Copyright © 2020-2023  润新知