Changes of Revision 3
[-] | Changed | memcached.spec |
1
2 %define groupname memcached 3 4 Name: memcached 5 -Version: 1.4.0 6 +Version: 1.2.8 7 Release: 1 8 Summary: High Performance, Distributed Memory Object Cache 9 10 |
||
[+] | Changed | memcached-1.2.8.tar.gz/ChangeLog ^ |
@@ -1,3 +1,15 @@ +2009-04-10 [Version 1.2.8 released] + + * make -b command actually work + + * *critical bugfix*. In 1.2.7 under multithreaded mode, memcached would + never restart accepting connections after hitting the maximum connection + limit. + + * remove 'stats maps' command, as it is a potential information leak, + usable if versions prior to 1.2.8 ever have buffer overflows + discovered. + 2009-04-02 [Version 1.2.7 released] * reset new stats with 'stats reset' | ||
[+] | Changed | memcached-1.2.8.tar.gz/configure ^ |
@@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.63 for memcached 1.2.7. +# Generated by GNU Autoconf 2.63 for memcached 1.2.8. # # Report bugs to <brad@danga.com>. # @@ -596,8 +596,8 @@ # Identity of this package. PACKAGE_NAME='memcached' PACKAGE_TARNAME='memcached' -PACKAGE_VERSION='1.2.7' -PACKAGE_STRING='memcached 1.2.7' +PACKAGE_VERSION='1.2.8' +PACKAGE_STRING='memcached 1.2.8' PACKAGE_BUGREPORT='brad@danga.com' ac_unique_file="memcached.c" @@ -1305,7 +1305,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures memcached 1.2.7 to adapt to many kinds of systems. +\`configure' configures memcached 1.2.8 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1376,7 +1376,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of memcached 1.2.7:";; + short | recursive ) echo "Configuration of memcached 1.2.8:";; esac cat <<\_ACEOF @@ -1471,7 +1471,7 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -memcached configure 1.2.7 +memcached configure 1.2.8 generated by GNU Autoconf 2.63 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, @@ -1485,7 +1485,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by memcached $as_me 1.2.7, which was +It was created by memcached $as_me 1.2.8, which was generated by GNU Autoconf 2.63. Invocation command line was $ $0 $@ @@ -2328,7 +2328,7 @@ # Define the identity of the package. PACKAGE=memcached - VERSION=1.2.7 + VERSION=1.2.8 cat >>confdefs.h <<_ACEOF @@ -6748,7 +6748,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by memcached $as_me 1.2.7, which was +This file was extended by memcached $as_me 1.2.8, which was generated by GNU Autoconf 2.63. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -6811,7 +6811,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_version="\\ -memcached config.status 1.2.7 +memcached config.status 1.2.8 configured by $0, generated by GNU Autoconf 2.63, with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" | ||
[+] | Changed | memcached-1.2.8.tar.gz/configure.ac ^ |
@@ -1,5 +1,5 @@ AC_PREREQ(2.52) -AC_INIT(memcached, 1.2.7, brad@danga.com) +AC_INIT(memcached, 1.2.8, brad@danga.com) AC_CANONICAL_SYSTEM AC_CONFIG_SRCDIR(memcached.c) AM_INIT_AUTOMAKE(AC_PACKAGE_NAME, AC_PACKAGE_VERSION) | ||
[+] | Changed | memcached-1.2.8.tar.gz/doc/Makefile ^ |
@@ -88,14 +88,14 @@ PACKAGE = memcached PACKAGE_BUGREPORT = brad@danga.com PACKAGE_NAME = memcached -PACKAGE_STRING = memcached 1.2.7 +PACKAGE_STRING = memcached 1.2.8 PACKAGE_TARNAME = memcached -PACKAGE_VERSION = 1.2.7 +PACKAGE_VERSION = 1.2.8 PATH_SEPARATOR = : SET_MAKE = SHELL = /bin/sh STRIP = -VERSION = 1.2.7 +VERSION = 1.2.8 abs_builddir = /home/dormando/p/danga/memcached_new/doc abs_srcdir = /home/dormando/p/danga/memcached_new/doc abs_top_builddir = /home/dormando/p/danga/memcached_new | ||
[+] | Changed | memcached-1.2.8.tar.gz/memcached.c ^ |
@@ -80,7 +80,6 @@ static void event_handler(const int fd, const short which, void *arg); static void conn_close(conn *c); static void conn_init(void); -static void accept_new_conns(const bool do_accept); static bool update_event(conn *c, const int new_flags); static void complete_nread(conn *c); static void process_command(conn *c, char *command); @@ -1138,43 +1137,6 @@ #endif /* HAVE_STRUCT_MALLINFO */ #endif /* HAVE_MALLOC_H */ -#if !defined(WIN32) || !defined(__APPLE__) - if (strcmp(subcommand, "maps") == 0) { - char *wbuf; - int wsize = 8192; /* should be enough */ - int fd; - int res; - - if ((wbuf = (char *)malloc(wsize)) == NULL) { - out_string(c, "SERVER_ERROR out of memory writing stats maps"); - return; - } - - fd = open("/proc/self/maps", O_RDONLY); - if (fd == -1) { - out_string(c, "SERVER_ERROR cannot open the maps file"); - free(wbuf); - return; - } - - res = read(fd, wbuf, wsize - 6); /* 6 = END\r\n\0 */ - if (res == wsize - 6) { - out_string(c, "SERVER_ERROR buffer overflow"); - free(wbuf); close(fd); - return; - } - if (res == 0 || res == -1) { - out_string(c, "SERVER_ERROR can't read the maps file"); - free(wbuf); close(fd); - return; - } - memcpy(wbuf + res, "END\r\n", 5); - write_and_free(c, wbuf, res + 5); - close(fd); - return; - } -#endif - if (strcmp(subcommand, "cachedump") == 0) { char *buf; @@ -1968,12 +1930,9 @@ /* * Sets whether we are listening for new connections or not. */ -void accept_new_conns(const bool do_accept) { +void do_accept_new_conns(const bool do_accept) { conn *next; - if (! is_listen_thread()) - return; - for (next = listen_conn; next; next = next->next) { if (do_accept) { update_event(next, EV_READ | EV_PERSIST); @@ -2861,7 +2820,7 @@ setbuf(stderr, NULL); /* process arguments */ - while ((c = getopt(argc, argv, "a:p:s:U:m:Mc:khirvdl:u:P:f:s:n:t:D:LR:b")) != -1) { + while ((c = getopt(argc, argv, "a:p:s:U:m:Mc:khirvdl:u:P:f:s:n:t:D:LR:b:")) != -1) { switch (c) { case 'a': /* access for unix domain socket, as octal mask (like chmod)*/ | ||
[+] | Changed | memcached-1.2.8.tar.gz/memcached.h ^ |
@@ -233,6 +233,7 @@ * Functions */ +void do_accept_new_conns(const bool do_accept); conn *do_conn_from_freelist(); bool do_conn_add_to_freelist(conn *c); char *do_suffix_from_freelist(); @@ -274,6 +275,7 @@ char *mt_add_delta(conn *c, item *item, const int incr, const int64_t delta, char *buf); void mt_assoc_move_next_bucket(void); +void mt_accept_new_conns(const bool do_accept); conn *mt_conn_from_freelist(void); bool mt_conn_add_to_freelist(conn *c); char *mt_suffix_from_freelist(void); @@ -303,6 +305,7 @@ # define add_delta(c,x,y,z,a) mt_add_delta(c,x,y,z,a) # define assoc_move_next_bucket() mt_assoc_move_next_bucket() +# define accept_new_conns(x) mt_accept_new_conns(x) # define conn_from_freelist() mt_conn_from_freelist() # define conn_add_to_freelist(x) mt_conn_add_to_freelist(x) # define suffix_from_freelist() mt_suffix_from_freelist() @@ -334,6 +337,7 @@ # define add_delta(c,x,y,z,a) do_add_delta(c,x,y,z,a) # define assoc_move_next_bucket() do_assoc_move_next_bucket() +# define accept_new_conns(x) do_accept_new_conns(x) # define conn_from_freelist() do_conn_from_freelist() # define conn_add_to_freelist(x) do_conn_add_to_freelist(x) # define suffix_from_freelist() do_suffix_from_freelist() | ||
[+] | Changed | memcached-1.2.8.tar.gz/memcached.spec ^ |
@@ -1,5 +1,5 @@ Name: memcached -Version: 1.2.7 +Version: 1.2.8 Release: 1%{?dist} Summary: High Performance, Distributed Memory Object Cache | ||
[+] | Changed | memcached-1.2.8.tar.gz/slabs.c ^ |
@@ -23,7 +23,7 @@ #include <assert.h> #define POWER_SMALLEST 1 -#define POWER_LARGEST 200 +#define POWER_LARGEST 22 #define POWER_BLOCK 1048576 #define CHUNK_ALIGN_BYTES 8 #define DONT_PREALLOC_SLABS | ||
[+] | Added | memcached-1.2.8.tar.gz/slabs.c~ ^ |
@@ -0,0 +1,437 @@ +/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +/* + * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size + * and are divided into chunks. The chunk sizes start off at the size of the + * "item" structure plus space for a small key and value. They increase by + * a multiplier factor from there, up to half the maximum slab size. The last + * slab size is always 1MB, since that's the maximum item size allowed by the + * memcached protocol. + * + * $Id$ + */ +#include "memcached.h" +#include <sys/stat.h> +#include <sys/socket.h> +#include <sys/signal.h> +#include <sys/resource.h> +#include <fcntl.h> +#include <netinet/in.h> +#include <errno.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <assert.h> + +#define POWER_SMALLEST 1 +#define POWER_LARGEST 200 +#define POWER_BLOCK 1048576 +#define CHUNK_ALIGN_BYTES 8 +#define DONT_PREALLOC_SLABS + +/* powers-of-N allocation structures */ + +typedef struct { + unsigned int size; /* sizes of items */ + unsigned int perslab; /* how many items per slab */ + + void **slots; /* list of item ptrs */ + unsigned int sl_total; /* size of previous array */ + unsigned int sl_curr; /* first free slot */ + + void *end_page_ptr; /* pointer to next free item at end of page, or 0 */ + unsigned int end_page_free; /* number of items remaining at end of last alloced page */ + + unsigned int slabs; /* how many slabs were allocated for this class */ + + void **slab_list; /* array of slab pointers */ + unsigned int list_size; /* size of prev array */ + + unsigned int killing; /* index+1 of dying slab, or zero if none */ +} slabclass_t; + +static slabclass_t slabclass[POWER_LARGEST + 1]; +static size_t mem_limit = 0; +static size_t mem_malloced = 0; +static int power_largest; + +static void *mem_base = NULL; +static void *mem_current = NULL; +static size_t mem_avail = 0; + +/* + * Forward Declarations + */ +static int do_slabs_newslab(const unsigned int id); +static void *memory_allocate(size_t size); + +#ifndef DONT_PREALLOC_SLABS +/* Preallocate as many slab pages as possible (called from slabs_init) + on start-up, so users don't get confused out-of-memory errors when + they do have free (in-slab) space, but no space to make new slabs. + if maxslabs is 18 (POWER_LARGEST - POWER_SMALLEST + 1), then all + slab types can be made. if max memory is less than 18 MB, only the + smaller ones will be made. */ +static void slabs_preallocate (const unsigned int maxslabs); +#endif + +/* + * Figures out which slab class (chunk size) is required to store an item of + * a given size. + * + * Given object size, return id to use when allocating/freeing memory for object + * 0 means error: can't store such a large object + */ + +unsigned int slabs_clsid(const size_t size) { + int res = POWER_SMALLEST; + + if (size == 0) + return 0; + while (size > slabclass[res].size) + if (res++ == power_largest) /* won't fit in the biggest slab */ + return 0; + return res; +} + +/** + * Determines the chunk sizes and initializes the slab class descriptors + * accordingly. + */ +void slabs_init(const size_t limit, const double factor, const bool prealloc) { + int i = POWER_SMALLEST - 1; + unsigned int size = sizeof(item) + settings.chunk_size; + + /* Factor of 2.0 means use the default memcached behavior */ + if (factor == 2.0 && size < 128) + size = 128; + + mem_limit = limit; + + if (prealloc) { + /* Allocate everything in a big chunk with malloc */ + mem_base = malloc(mem_limit); + if (mem_base != NULL) { + mem_current = mem_base; + mem_avail = mem_limit; + } else { + fprintf(stderr, "Warning: Failed to allocate requested memory in" + " one large chunk.\nWill allocate in smaller chunks\n"); + } + } + + memset(slabclass, 0, sizeof(slabclass)); + + while (++i < POWER_LARGEST && size <= POWER_BLOCK / 2) { + /* Make sure items are always n-byte aligned */ + if (size % CHUNK_ALIGN_BYTES) + size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); + + slabclass[i].size = size; + slabclass[i].perslab = POWER_BLOCK / slabclass[i].size; + size *= factor; + if (settings.verbose > 1) { + fprintf(stderr, "slab class %3d: chunk size %6u perslab %5u\n", + i, slabclass[i].size, slabclass[i].perslab); + } + } + + power_largest = i; + slabclass[power_largest].size = POWER_BLOCK; + slabclass[power_largest].perslab = 1; + + /* for the test suite: faking of how much we've already malloc'd */ + { + char *t_initial_malloc = getenv("T_MEMD_INITIAL_MALLOC"); + if (t_initial_malloc) { + mem_malloced = (size_t)atol(t_initial_malloc); + } + + } + +#ifndef DONT_PREALLOC_SLABS + { + char *pre_alloc = getenv("T_MEMD_SLABS_ALLOC"); + + if (pre_alloc == NULL || atoi(pre_alloc) != 0) { + slabs_preallocate(power_largest); + } + } +#endif +} + +#ifndef DONT_PREALLOC_SLABS +static void slabs_preallocate (const unsigned int maxslabs) { + int i; + unsigned int prealloc = 0; + + /* pre-allocate a 1MB slab in every size class so people don't get + confused by non-intuitive "SERVER_ERROR out of memory" + messages. this is the most common question on the mailing + list. if you really don't want this, you can rebuild without + these three lines. */ + + for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) { + if (++prealloc > maxslabs) + return; + do_slabs_newslab(i); + } + +} +#endif + +static int grow_slab_list (const unsigned int id) { + slabclass_t *p = &slabclass[id]; + if (p->slabs == p->list_size) { + size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16; + void *new_list = realloc(p->slab_list, new_size * sizeof(void *)); + if (new_list == 0) return 0; + p->list_size = new_size; + p->slab_list = new_list; + } + return 1; +} + +static int do_slabs_newslab(const unsigned int id) { + slabclass_t *p = &slabclass[id]; +#ifdef ALLOW_SLABS_REASSIGN + int len = POWER_BLOCK; +#else + int len = p->size * p->perslab; +#endif + char *ptr; + + if ((mem_limit && mem_malloced + len > mem_limit && p->slabs > 0) || + (grow_slab_list(id) == 0) || + ((ptr = memory_allocate((size_t)len)) == 0)) { + + MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id); + return 0; + } + + memset(ptr, 0, (size_t)len); + p->end_page_ptr = ptr; + p->end_page_free = p->perslab; + + p->slab_list[p->slabs++] = ptr; + mem_malloced += len; + + MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id); + return 1; +} + +/*@null@*/ +void *do_slabs_alloc(const size_t size, unsigned int id) { + slabclass_t *p; + void *ret = NULL; + + if (id < POWER_SMALLEST || id > power_largest) { + MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0); + return NULL; + } + + p = &slabclass[id]; + assert(p->sl_curr == 0 || ((item *)p->slots[p->sl_curr - 1])->slabs_clsid == 0); + +#ifdef USE_SYSTEM_MALLOC + if (mem_limit && mem_malloced + size > mem_limit) { + MEMCACHED_SLABS_ALLOCATE_FAILED(size, id); + return 0; + } + mem_malloced += size; + ret = malloc(size); + MEMCACHED_SLABS_ALLOCATE(size, id, 0, ret); + return ret; +#endif + + /* fail unless we have space at the end of a recently allocated page, + we have something on our freelist, or we could allocate a new page */ + if (! (p->end_page_ptr != 0 || p->sl_curr != 0 || + do_slabs_newslab(id) != 0)) { + /* We don't have more memory available */ + ret = NULL; + } else if (p->sl_curr != 0) { + /* return off our freelist */ + ret = p->slots[--p->sl_curr]; + } else { + /* if we recently allocated a whole page, return from that */ + assert(p->end_page_ptr != NULL); + ret = p->end_page_ptr; + if (--p->end_page_free != 0) { + p->end_page_ptr += p->size; + } else { + p->end_page_ptr = 0; + } + } + + if (ret) { + MEMCACHED_SLABS_ALLOCATE(size, id, p->size, ret); + } else { + MEMCACHED_SLABS_ALLOCATE_FAILED(size, id); + } + + return ret; +} + +void do_slabs_free(void *ptr, const size_t size, unsigned int id) { + slabclass_t *p; + + assert(((item *)ptr)->slabs_clsid == 0); + assert(id >= POWER_SMALLEST && id <= power_largest); + if (id < POWER_SMALLEST || id > power_largest) + return; + + MEMCACHED_SLABS_FREE(size, id, ptr); + p = &slabclass[id]; + +#ifdef USE_SYSTEM_MALLOC + mem_malloced -= size; + free(ptr); + return; +#endif + + if (p->sl_curr == p->sl_total) { /* need more space on the free list */ + int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16; /* 16 is arbitrary */ + void **new_slots = realloc(p->slots, new_size * sizeof(void *)); + if (new_slots == 0) + return; + p->slots = new_slots; + p->sl_total = new_size; + } + p->slots[p->sl_curr++] = ptr; + return; +} + +/*@null@*/ +char* do_slabs_stats(int *buflen) { + int i, total; + char *buf = (char *)malloc(power_largest * 200 + 100); + char *bufcurr = buf; + + *buflen = 0; + if (buf == NULL) return NULL; + + total = 0; + for(i = POWER_SMALLEST; i <= power_largest; i++) { + slabclass_t *p = &slabclass[i]; + if (p->slabs != 0) { + unsigned int perslab, slabs; + + slabs = p->slabs; + perslab = p->perslab; + + bufcurr += sprintf(bufcurr, "STAT %d:chunk_size %u\r\n", i, p->size); + bufcurr += sprintf(bufcurr, "STAT %d:chunks_per_page %u\r\n", i, perslab); + bufcurr += sprintf(bufcurr, "STAT %d:total_pages %u\r\n", i, slabs); + bufcurr += sprintf(bufcurr, "STAT %d:total_chunks %u\r\n", i, slabs*perslab); + bufcurr += sprintf(bufcurr, "STAT %d:used_chunks %u\r\n", i, slabs*perslab - p->sl_curr - p->end_page_free); + bufcurr += sprintf(bufcurr, "STAT %d:free_chunks %u\r\n", i, p->sl_curr); + bufcurr += sprintf(bufcurr, "STAT %d:free_chunks_end %u\r\n", i, p->end_page_free); + total++; + } + } + bufcurr += sprintf(bufcurr, "STAT active_slabs %d\r\nSTAT total_malloced %llu\r\n", total, (unsigned long long)mem_malloced); + bufcurr += sprintf(bufcurr, "END\r\n"); + *buflen = bufcurr - buf; + return buf; +} + +#ifdef ALLOW_SLABS_REASSIGN +/* Blows away all the items in a slab class and moves its slabs to another + class. This is only used by the "slabs reassign" command, for manual tweaking + of memory allocation. It's disabled by default since it requires that all + slabs be the same size (which can waste space for chunk size mantissas of + other than 2.0). + 1 = success + 0 = fail + -1 = tried. busy. send again shortly. */ +int do_slabs_reassign(unsigned char srcid, unsigned char dstid) { + void *slab, *slab_end; + slabclass_t *p, *dp; + void *iter; + bool was_busy = false; + + if (srcid < POWER_SMALLEST || srcid > power_largest || + dstid < POWER_SMALLEST || dstid > power_largest) + return 0; + + p = &slabclass[srcid]; + dp = &slabclass[dstid]; + + /* fail if src still populating, or no slab to give up in src */ + if (p->end_page_ptr || ! p->slabs) + return 0; + + /* fail if dst is still growing or we can't make room to hold its new one */ + if (dp->end_page_ptr || ! grow_slab_list(dstid)) + return 0; + + if (p->killing == 0) p->killing = 1; + + slab = p->slab_list[p->killing - 1]; + slab_end = (char*)slab + POWER_BLOCK; + + for (iter = slab; iter < slab_end; (char*)iter += p->size) { + item *it = (item *)iter; + if (it->slabs_clsid) { + if (it->refcount) was_busy = true; + item_unlink(it); + } + } + + /* go through free list and discard items that are no longer part of this slab */ + { + int fi; + for (fi = p->sl_curr - 1; fi >= 0; fi--) { + if (p->slots[fi] >= slab && p->slots[fi] < slab_end) { + p->sl_curr--; + if (p->sl_curr > fi) p->slots[fi] = p->slots[p->sl_curr]; + } + } + } + + if (was_busy) return -1; + + /* if good, now move it to the dst slab class */ + p->slab_list[p->killing - 1] = p->slab_list[p->slabs - 1]; + p->slabs--; + p->killing = 0; + dp->slab_list[dp->slabs++] = slab; + dp->end_page_ptr = slab; + dp->end_page_free = dp->perslab; + /* this isn't too critical, but other parts of the code do asserts to + make sure this field is always 0. */ + for (iter = slab; iter < slab_end; (char*)iter += dp->size) { + ((item *)iter)->slabs_clsid = 0; + } + return 1; +} +#endif + +static void *memory_allocate(size_t size) { + void *ret; + + if (mem_base == NULL) { + /* We are not using a preallocated large memory chunk */ + ret = malloc(size); + } else { + ret = mem_current; + + if (size > mem_avail) { + return NULL; + } + + /* mem_current pointer _must_ be aligned!!! */ + if (size % CHUNK_ALIGN_BYTES) { + size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); + } + + mem_current += size; + if (size < mem_avail) { + mem_avail -= size; + } else { + mem_avail = 0; + } + } + + return ret; +} | ||
[+] | Added | memcached-1.2.8.tar.gz/slaps.c.orig ^ |
@@ -0,0 +1,437 @@ +/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +/* + * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size + * and are divided into chunks. The chunk sizes start off at the size of the + * "item" structure plus space for a small key and value. They increase by + * a multiplier factor from there, up to half the maximum slab size. The last + * slab size is always 1MB, since that's the maximum item size allowed by the + * memcached protocol. + * + * $Id$ + */ +#include "memcached.h" +#include <sys/stat.h> +#include <sys/socket.h> +#include <sys/signal.h> +#include <sys/resource.h> +#include <fcntl.h> +#include <netinet/in.h> +#include <errno.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <assert.h> + +#define POWER_SMALLEST 1 +#define POWER_LARGEST 200 +#define POWER_BLOCK 1048576 +#define CHUNK_ALIGN_BYTES 8 +#define DONT_PREALLOC_SLABS + +/* powers-of-N allocation structures */ + +typedef struct { + unsigned int size; /* sizes of items */ + unsigned int perslab; /* how many items per slab */ + + void **slots; /* list of item ptrs */ + unsigned int sl_total; /* size of previous array */ + unsigned int sl_curr; /* first free slot */ + + void *end_page_ptr; /* pointer to next free item at end of page, or 0 */ + unsigned int end_page_free; /* number of items remaining at end of last alloced page */ + + unsigned int slabs; /* how many slabs were allocated for this class */ + + void **slab_list; /* array of slab pointers */ + unsigned int list_size; /* size of prev array */ + + unsigned int killing; /* index+1 of dying slab, or zero if none */ +} slabclass_t; + +static slabclass_t slabclass[POWER_LARGEST + 1]; +static size_t mem_limit = 0; +static size_t mem_malloced = 0; +static int power_largest; + +static void *mem_base = NULL; +static void *mem_current = NULL; +static size_t mem_avail = 0; + +/* + * Forward Declarations + */ +static int do_slabs_newslab(const unsigned int id); +static void *memory_allocate(size_t size); + +#ifndef DONT_PREALLOC_SLABS +/* Preallocate as many slab pages as possible (called from slabs_init) + on start-up, so users don't get confused out-of-memory errors when + they do have free (in-slab) space, but no space to make new slabs. + if maxslabs is 18 (POWER_LARGEST - POWER_SMALLEST + 1), then all + slab types can be made. if max memory is less than 18 MB, only the + smaller ones will be made. */ +static void slabs_preallocate (const unsigned int maxslabs); +#endif + +/* + * Figures out which slab class (chunk size) is required to store an item of + * a given size. + * + * Given object size, return id to use when allocating/freeing memory for object + * 0 means error: can't store such a large object + */ + +unsigned int slabs_clsid(const size_t size) { + int res = POWER_SMALLEST; + + if (size == 0) + return 0; + while (size > slabclass[res].size) + if (res++ == power_largest) /* won't fit in the biggest slab */ + return 0; + return res; +} + +/** + * Determines the chunk sizes and initializes the slab class descriptors + * accordingly. + */ +void slabs_init(const size_t limit, const double factor, const bool prealloc) { + int i = POWER_SMALLEST - 1; + unsigned int size = sizeof(item) + settings.chunk_size; + + /* Factor of 2.0 means use the default memcached behavior */ + if (factor == 2.0 && size < 128) + size = 128; + + mem_limit = limit; + + if (prealloc) { + /* Allocate everything in a big chunk with malloc */ + mem_base = malloc(mem_limit); + if (mem_base != NULL) { + mem_current = mem_base; + mem_avail = mem_limit; + } else { + fprintf(stderr, "Warning: Failed to allocate requested memory in" + " one large chunk.\nWill allocate in smaller chunks\n"); + } + } + + memset(slabclass, 0, sizeof(slabclass)); + + while (++i < POWER_LARGEST && size <= POWER_BLOCK / 2) { + /* Make sure items are always n-byte aligned */ + if (size % CHUNK_ALIGN_BYTES) + size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); + + slabclass[i].size = size; + slabclass[i].perslab = POWER_BLOCK / slabclass[i].size; + size *= factor; + if (settings.verbose > 1) { + fprintf(stderr, "slab class %3d: chunk size %6u perslab %5u\n", + i, slabclass[i].size, slabclass[i].perslab); + } + } + + power_largest = i; + slabclass[power_largest].size = POWER_BLOCK; + slabclass[power_largest].perslab = 1; + + /* for the test suite: faking of how much we've already malloc'd */ + { + char *t_initial_malloc = getenv("T_MEMD_INITIAL_MALLOC"); + if (t_initial_malloc) { + mem_malloced = (size_t)atol(t_initial_malloc); + } + + } + +#ifndef DONT_PREALLOC_SLABS + { + char *pre_alloc = getenv("T_MEMD_SLABS_ALLOC"); + + if (pre_alloc == NULL || atoi(pre_alloc) != 0) { + slabs_preallocate(power_largest); + } + } +#endif +} + +#ifndef DONT_PREALLOC_SLABS +static void slabs_preallocate (const unsigned int maxslabs) { + int i; + unsigned int prealloc = 0; + + /* pre-allocate a 1MB slab in every size class so people don't get + confused by non-intuitive "SERVER_ERROR out of memory" + messages. this is the most common question on the mailing + list. if you really don't want this, you can rebuild without + these three lines. */ + + for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) { + if (++prealloc > maxslabs) + return; + do_slabs_newslab(i); + } + +} +#endif + +static int grow_slab_list (const unsigned int id) { + slabclass_t *p = &slabclass[id]; + if (p->slabs == p->list_size) { + size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16; + void *new_list = realloc(p->slab_list, new_size * sizeof(void *)); + if (new_list == 0) return 0; + p->list_size = new_size; + p->slab_list = new_list; + } + return 1; +} + +static int do_slabs_newslab(const unsigned int id) { + slabclass_t *p = &slabclass[id]; +#ifdef ALLOW_SLABS_REASSIGN + int len = POWER_BLOCK; +#else + int len = p->size * p->perslab; +#endif + char *ptr; + + if ((mem_limit && mem_malloced + len > mem_limit && p->slabs > 0) || + (grow_slab_list(id) == 0) || + ((ptr = memory_allocate((size_t)len)) == 0)) { + + MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id); + return 0; + } + + memset(ptr, 0, (size_t)len); + p->end_page_ptr = ptr; + p->end_page_free = p->perslab; + + p->slab_list[p->slabs++] = ptr; + mem_malloced += len; + + MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id); + return 1; +} + +/*@null@*/ +void *do_slabs_alloc(const size_t size, unsigned int id) { + slabclass_t *p; + void *ret = NULL; + + if (id < POWER_SMALLEST || id > power_largest) { + MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0); + return NULL; + } + + p = &slabclass[id]; + assert(p->sl_curr == 0 || ((item *)p->slots[p->sl_curr - 1])->slabs_clsid == 0); + +#ifdef USE_SYSTEM_MALLOC + if (mem_limit && mem_malloced + size > mem_limit) { + MEMCACHED_SLABS_ALLOCATE_FAILED(size, id); + return 0; + } + mem_malloced += size; + ret = malloc(size); + MEMCACHED_SLABS_ALLOCATE(size, id, 0, ret); + return ret; +#endif + + /* fail unless we have space at the end of a recently allocated page, + we have something on our freelist, or we could allocate a new page */ + if (! (p->end_page_ptr != 0 || p->sl_curr != 0 || + do_slabs_newslab(id) != 0)) { + /* We don't have more memory available */ + ret = NULL; + } else if (p->sl_curr != 0) { + /* return off our freelist */ + ret = p->slots[--p->sl_curr]; + } else { + /* if we recently allocated a whole page, return from that */ + assert(p->end_page_ptr != NULL); + ret = p->end_page_ptr; + if (--p->end_page_free != 0) { + p->end_page_ptr += p->size; + } else { + p->end_page_ptr = 0; + } + } + + if (ret) { + MEMCACHED_SLABS_ALLOCATE(size, id, p->size, ret); + } else { + MEMCACHED_SLABS_ALLOCATE_FAILED(size, id); + } + + return ret; +} + +void do_slabs_free(void *ptr, const size_t size, unsigned int id) { + slabclass_t *p; + + assert(((item *)ptr)->slabs_clsid == 0); + assert(id >= POWER_SMALLEST && id <= power_largest); + if (id < POWER_SMALLEST || id > power_largest) + return; + + MEMCACHED_SLABS_FREE(size, id, ptr); + p = &slabclass[id]; + +#ifdef USE_SYSTEM_MALLOC + mem_malloced -= size; + free(ptr); + return; +#endif + + if (p->sl_curr == p->sl_total) { /* need more space on the free list */ + int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16; /* 16 is arbitrary */ + void **new_slots = realloc(p->slots, new_size * sizeof(void *)); + if (new_slots == 0) + return; + p->slots = new_slots; + p->sl_total = new_size; + } + p->slots[p->sl_curr++] = ptr; + return; +} + +/*@null@*/ +char* do_slabs_stats(int *buflen) { + int i, total; + char *buf = (char *)malloc(power_largest * 200 + 100); + char *bufcurr = buf; + + *buflen = 0; + if (buf == NULL) return NULL; + + total = 0; + for(i = POWER_SMALLEST; i <= power_largest; i++) { + slabclass_t *p = &slabclass[i]; + if (p->slabs != 0) { + unsigned int perslab, slabs; + + slabs = p->slabs; + perslab = p->perslab; + + bufcurr += sprintf(bufcurr, "STAT %d:chunk_size %u\r\n", i, p->size); + bufcurr += sprintf(bufcurr, "STAT %d:chunks_per_page %u\r\n", i, perslab); + bufcurr += sprintf(bufcurr, "STAT %d:total_pages %u\r\n", i, slabs); + bufcurr += sprintf(bufcurr, "STAT %d:total_chunks %u\r\n", i, slabs*perslab); + bufcurr += sprintf(bufcurr, "STAT %d:used_chunks %u\r\n", i, slabs*perslab - p->sl_curr - p->end_page_free); + bufcurr += sprintf(bufcurr, "STAT %d:free_chunks %u\r\n", i, p->sl_curr); + bufcurr += sprintf(bufcurr, "STAT %d:free_chunks_end %u\r\n", i, p->end_page_free); + total++; + } + } + bufcurr += sprintf(bufcurr, "STAT active_slabs %d\r\nSTAT total_malloced %llu\r\n", total, (unsigned long long)mem_malloced); + bufcurr += sprintf(bufcurr, "END\r\n"); + *buflen = bufcurr - buf; + return buf; +} + +#ifdef ALLOW_SLABS_REASSIGN +/* Blows away all the items in a slab class and moves its slabs to another + class. This is only used by the "slabs reassign" command, for manual tweaking + of memory allocation. It's disabled by default since it requires that all + slabs be the same size (which can waste space for chunk size mantissas of + other than 2.0). + 1 = success + 0 = fail + -1 = tried. busy. send again shortly. */ +int do_slabs_reassign(unsigned char srcid, unsigned char dstid) { + void *slab, *slab_end; + slabclass_t *p, *dp; + void *iter; + bool was_busy = false; + + if (srcid < POWER_SMALLEST || srcid > power_largest || + dstid < POWER_SMALLEST || dstid > power_largest) + return 0; + + p = &slabclass[srcid]; + dp = &slabclass[dstid]; + + /* fail if src still populating, or no slab to give up in src */ + if (p->end_page_ptr || ! p->slabs) + return 0; + + /* fail if dst is still growing or we can't make room to hold its new one */ + if (dp->end_page_ptr || ! grow_slab_list(dstid)) + return 0; + + if (p->killing == 0) p->killing = 1; + + slab = p->slab_list[p->killing - 1]; + slab_end = (char*)slab + POWER_BLOCK; + + for (iter = slab; iter < slab_end; (char*)iter += p->size) { + item *it = (item *)iter; + if (it->slabs_clsid) { + if (it->refcount) was_busy = true; + item_unlink(it); + } + } + + /* go through free list and discard items that are no longer part of this slab */ + { + int fi; + for (fi = p->sl_curr - 1; fi >= 0; fi--) { + if (p->slots[fi] >= slab && p->slots[fi] < slab_end) { + p->sl_curr--; + if (p->sl_curr > fi) p->slots[fi] = p->slots[p->sl_curr]; + } + } + } + + if (was_busy) return -1; + + /* if good, now move it to the dst slab class */ + p->slab_list[p->killing - 1] = p->slab_list[p->slabs - 1]; + p->slabs--; + p->killing = 0; + dp->slab_list[dp->slabs++] = slab; + dp->end_page_ptr = slab; + dp->end_page_free = dp->perslab; + /* this isn't too critical, but other parts of the code do asserts to + make sure this field is always 0. */ + for (iter = slab; iter < slab_end; (char*)iter += dp->size) { + ((item *)iter)->slabs_clsid = 0; + } + return 1; +} +#endif + +static void *memory_allocate(size_t size) { + void *ret; + + if (mem_base == NULL) { + /* We are not using a preallocated large memory chunk */ + ret = malloc(size); + } else { + ret = mem_current; + + if (size > mem_avail) { + return NULL; + } + + /* mem_current pointer _must_ be aligned!!! */ + if (size % CHUNK_ALIGN_BYTES) { + size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); + } + + mem_current += size; + if (size < mem_avail) { + mem_avail -= size; + } else { + mem_avail = 0; + } + } + + return ret; +} | ||
[+] | Changed | memcached-1.2.8.tar.gz/thread.c ^ |
@@ -219,6 +219,14 @@ } } +/* + * Sets whether or not we accept new connections. + */ +void mt_accept_new_conns(const bool do_accept) { + pthread_mutex_lock(&conn_lock); + do_accept_new_conns(do_accept); + pthread_mutex_unlock(&conn_lock); +} /* * Pulls a conn structure from the freelist, if one is available. | ||
Deleted | memcached-1.4.0.tar.gz ^ |