Rework flow of multi_get.

This should fix some corner-case bugs, and makes the code read a lot
better.
This commit is contained in:
lericson 2009-09-06 01:34:15 +02:00
parent c3d46783d3
commit c023d18a20

View File

@ -352,36 +352,43 @@ static PyObject *PylibMC_Client_get_multi(PylibMC_Client *self, PyObject *args,
PyObject *key_seq, **key_objs, *retval = NULL; PyObject *key_seq, **key_objs, *retval = NULL;
char **keys, *prefix = NULL; char **keys, *prefix = NULL;
unsigned int prefix_len = 0; unsigned int prefix_len = 0;
Py_ssize_t i;
PyObject *key_it, *ckey;
size_t *key_lens; size_t *key_lens;
size_t nkeys; size_t nkeys;
unsigned int valid_keys_len = 0;
memcached_return rc; memcached_return rc;
char curr_key[MEMCACHED_MAX_KEY]; char curr_key[MEMCACHED_MAX_KEY];
size_t curr_key_len, curr_val_len; size_t curr_key_len, curr_val_len;
uint32_t curr_flags; uint32_t curr_flags;
char *curr_val;
static char *kws[] = { "keys", "key_prefix", NULL }; static char *kws[] = { "keys", "key_prefix", NULL };
if (PyArg_ParseTupleAndKeywords(args, kwds, "O|s#", kws, if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|s#", kws,
&key_seq, &prefix, &prefix_len)) { &key_seq, &prefix, &prefix_len)) {
PyObject *key_it, *ckey; return NULL;
Py_ssize_t i; }
if ((nkeys = PySequence_Length(key_seq)) == -1) {
return NULL;
}
/* First clear any potential earlier mishap because we rely on it in
* our iteration over keys. */
PyErr_Clear();
/* Populate keys and key_lens. */ /* Populate keys and key_lens. */
nkeys = PySequence_Length(key_seq); keys = PyMem_New(char *, nkeys);
keys = malloc(sizeof(char *) * nkeys); key_lens = PyMem_New(size_t, nkeys);
key_lens = malloc(sizeof(size_t) * nkeys); key_objs = PyMem_New(PyObject *, nkeys);
key_objs = malloc(sizeof(PyObject *) * nkeys);
if (keys == NULL || key_lens == NULL || key_objs == NULL) { if (keys == NULL || key_lens == NULL || key_objs == NULL) {
free(keys); PyMem_Free(keys);
free(key_lens); PyMem_Free(key_lens);
free(key_objs); PyMem_Free(key_objs);
return PyErr_NoMemory(); return PyErr_NoMemory();
} }
/* Clear potential previous exception, because we explicitly check for
* exceptions as a loop predicate. */
PyErr_Clear();
/* Iterate through all keys and set lengths etc. */ /* Iterate through all keys and set lengths etc. */
i = 0; i = 0;
key_it = PyObject_GetIter(key_seq); key_it = PyObject_GetIter(key_seq);
@ -403,33 +410,32 @@ static PyObject *PylibMC_Client_get_multi(PylibMC_Client *self, PyObject *args,
} }
keys[i] = PyString_AS_STRING(rkey); keys[i] = PyString_AS_STRING(rkey);
key_objs[i++] = rkey; key_objs[i++] = rkey;
valid_keys_len++;
} }
} }
Py_DECREF(key_it); Py_XDECREF(key_it);
if (PyErr_Occurred()) { if (i == 0) {
free(key_lens); /* No usable keys to fetch. */
free(keys); nkeys = 0;
nkeys = i; goto cleanup;
for (i = 0; i < nkeys; i++) } else if (PyErr_Occurred()) {
Py_DECREF(key_objs[i]); nkeys--;
free(key_objs); goto cleanup;
return NULL;
} }
/* TODO Change this interface or at least provide an alternative that /* TODO Make an iterator interface for getting each key separately.
* returns some kind of iterable which fetches keys sequentially *
* instead of doing it all at once. The current way is grossly * This would help large retrievals, as a single dictionary containing all
* inefficient for larger datasets, as a dict has to be allocated that * the data at once isn't needed. (Should probably look into if it's even
* is large enough to hold all the data at once. * worth it.)
*/ */
retval = PyDict_New(); retval = PyDict_New();
if(valid_keys_len > 0) {
if ((rc = memcached_mget(self->mc, keys, key_lens, nkeys)) if ((rc = memcached_mget(self->mc, keys, key_lens, nkeys))
== MEMCACHED_SUCCESS) { != MEMCACHED_SUCCESS) {
char *curr_val; PylibMC_ErrFromMemcached(self, "memcached_mget", rc);
goto cleanup;
}
while ((curr_val = memcached_fetch( while ((curr_val = memcached_fetch(
self->mc, curr_key, &curr_key_len, &curr_val_len, self->mc, curr_key, &curr_key_len, &curr_val_len,
@ -441,9 +447,9 @@ static PyObject *PylibMC_Client_get_multi(PylibMC_Client *self, PyObject *args,
|| rc == MEMCACHED_NO_KEY_PROVIDED) { || rc == MEMCACHED_NO_KEY_PROVIDED) {
/* Do nothing at all. :-) */ /* Do nothing at all. :-) */
} else if (rc != MEMCACHED_SUCCESS) { } else if (rc != MEMCACHED_SUCCESS) {
Py_DECREF(retval); PylibMC_ErrFromMemcached(self, "memcached_fetch", rc);
retval = PylibMC_ErrFromMemcached( memcached_quit(self->mc);
self, "memcached_fetch", rc); goto cleanup;
} else { } else {
PyObject *val; PyObject *val;
@ -452,32 +458,37 @@ static PyObject *PylibMC_Client_get_multi(PylibMC_Client *self, PyObject *args,
curr_key[curr_key_len] = 0; curr_key[curr_key_len] = 0;
val = _PylibMC_parse_memcached_value( val = _PylibMC_parse_memcached_value(
curr_val, curr_val_len, curr_flags); curr_val, curr_val_len, curr_flags);
if (val == NULL) {
memcached_quit(self->mc);
goto cleanup;
}
PyDict_SetItemString(retval, curr_key + prefix_len, val); PyDict_SetItemString(retval, curr_key + prefix_len, val);
Py_DECREF(val); Py_DECREF(val);
} }
/* Although Python prohibits you from using the libc memory allocation
* interface, we have to since libmemcached goes around doing
* malloc()... */
free(curr_val); free(curr_val);
} }
/* Need to cleanup. */
if (PyErr_Occurred()) {
/* Not checking rc because an exception already occured, and
* we wouldn't want to mask it. */
memcached_quit(self->mc);
}
} else {
retval = PylibMC_ErrFromMemcached(self, "memcached_mget", rc);
}
}
free(key_lens); PyMem_Free(key_lens);
free(keys); PyMem_Free(keys);
for (i = 0; i < nkeys; i++) for (i = 0; i < nkeys; i++) {
Py_DECREF(key_objs[i]); Py_DECREF(key_objs[i]);
free(key_objs);
} }
PyMem_Free(key_objs);
/* Not INCREFing because the only two outcomes are NULL and a new dict. /* Not INCREFing because the only two outcomes are NULL and a new dict.
* We're the owner of that dict already, so. */ * We're the owner of that dict already, so. */
return retval; return retval;
cleanup:
Py_XDECREF(retval);
PyMem_Free(key_lens);
PyMem_Free(keys);
for (i = 0; i < nkeys; i++)
Py_DECREF(key_objs[i]);
PyMem_Free(key_objs);
return NULL;
} }
/** /**