ext/nmatrix/storage/list/list.cpp in nmatrix-0.1.0.rc4 vs ext/nmatrix/storage/list/list.cpp in nmatrix-0.1.0.rc5
- old
+ new
@@ -85,15 +85,15 @@
actual_shape_ = actual->shape;
if (init_obj_ == Qnil) {
init_obj_ = s->dtype == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s->default_val) : rubyobj_from_cval(s->default_val, s->dtype).rval;
}
- nm_register_value(init_obj_);
+ nm_register_value(&init_obj_);
}
~RecurseData() {
- nm_unregister_value(init_obj_);
+ nm_unregister_value(&init_obj_);
nm_list_storage_unregister(ref);
nm_list_storage_unregister(actual);
}
dtype_t dtype() const { return ref->dtype; }
@@ -196,18 +196,18 @@
else s_val = rubyobj_from_cval(curr->val, s.dtype()).rval;
if (rev) val = rb_yield_values(2, t_init, s_val);
else val = rb_yield_values(2, s_val, t_init);
- nm_register_value(val);
+ nm_register_value(&val);
if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
xcurr = nm::list::insert_helper(x, xcurr, curr->key - offset, val);
temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
- nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
}
- nm_unregister_value(val);
+ nm_unregister_value(&val);
curr = curr->next;
if (curr && curr->key - offset >= x_shape) curr = NULL;
}
__nm_list_storage_unregister_temp_value_list(temp_vals);
@@ -270,11 +270,11 @@
lcurr = lcurr->next;
if (!rb_equal(val, result.init_obj())) {
xcurr = nm::list::insert_helper(x, xcurr, key, val);
temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
- nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
}
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
}
__nm_list_storage_unregister_temp_value_list(temp_vals);
@@ -367,19 +367,19 @@
key = lcurr->key - left.offset(rec);
lcurr = lcurr->next;
rcurr = rcurr->next;
}
- nm_register_value(val);
+ nm_register_value(&val);
if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
xcurr = nm::list::insert_helper(x, xcurr, key, val);
temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
- nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
}
- nm_unregister_value(val);
+ nm_unregister_value(&val);
if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
}
__nm_list_storage_unregister_temp_value_list(temp_vals);
@@ -493,11 +493,11 @@
node = node->next ? node->next : NULL;
}
} else if (node->key > key) {
D* nv = NM_ALLOC(D); *nv = v[v_offset++];
if (dest->dtype == nm::RUBYOBJ) {
- nm_register_value(*reinterpret_cast<VALUE*>(nv));
+ nm_register_value(&*reinterpret_cast<VALUE*>(nv));
temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
}
if (prev) node = insert_after(prev, key, nv);
else node = insert_first_node(l, key, nv, sizeof(D));
@@ -506,11 +506,11 @@
node = prev->next ? prev->next : NULL;
}
} else { // no node -- insert a new one
D* nv = NM_ALLOC(D); *nv = v[v_offset++];
if (dest->dtype == nm::RUBYOBJ) {
- nm_register_value(*reinterpret_cast<VALUE*>(nv));
+ nm_register_value(&*reinterpret_cast<VALUE*>(nv));
temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
}
if (prev) node = insert_after(prev, key, nv);
else node = insert_first_node(l, key, nv, sizeof(D));
@@ -533,12 +533,12 @@
}
template <typename D>
void set(VALUE left, SLICE* slice, VALUE right) {
- NM_CONSERVATIVE(nm_register_value(left));
- NM_CONSERVATIVE(nm_register_value(right));
+ NM_CONSERVATIVE(nm_register_value(&left));
+ NM_CONSERVATIVE(nm_register_value(&right));
LIST_STORAGE* s = NM_STORAGE_LIST(left);
std::pair<NMATRIX*,bool> nm_and_free =
interpret_arg_as_dense_nmatrix(right, NM_DTYPE(left));
@@ -588,12 +588,12 @@
}
} else {
NM_FREE(v);
nm_unregister_nmatrix(nm_and_free.first);
}
- NM_CONSERVATIVE(nm_unregister_value(left));
- NM_CONSERVATIVE(nm_unregister_value(right));
+ NM_CONSERVATIVE(nm_unregister_value(&left));
+ NM_CONSERVATIVE(nm_unregister_value(&right));
}
/*
* Used only to set a default initial value.
*/
@@ -691,37 +691,37 @@
}
}
static void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals) {
for (std::list<VALUE*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
- nm_unregister_value(**it);
+ nm_unregister_value(&**it);
}
}
static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions) {
for (std::list<LIST*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
nm_list_storage_unregister_list(*it, recursions);
}
}
void nm_list_storage_register_node(const NODE* curr) {
- nm_register_value(*reinterpret_cast<VALUE*>(curr->val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(curr->val));
}
void nm_list_storage_unregister_node(const NODE* curr) {
- nm_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));
}
/**
* Gets rid of all instances of a given node in the registration list.
* Sometimes a node will get deleted and replaced deep in a recursion, but
* further up it will still get registered. This leads to a potential read
* after free during the GC marking. This function completely clears out a
* node so that this won't happen.
*/
void nm_list_storage_completely_unregister_node(const NODE* curr) {
- nm_completely_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
+ nm_completely_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));
}
void nm_list_storage_register_list(const LIST* list, size_t recursions) {
NODE* next;
if (!list) return;
@@ -755,19 +755,19 @@
}
void nm_list_storage_register(const STORAGE* s) {
const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
if (storage && storage->dtype == nm::RUBYOBJ) {
- nm_register_value(*reinterpret_cast<VALUE*>(storage->default_val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(storage->default_val));
nm_list_storage_register_list(storage->rows, storage->dim - 1);
}
}
void nm_list_storage_unregister(const STORAGE* s) {
const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
if (storage && storage->dtype == nm::RUBYOBJ) {
- nm_unregister_value(*reinterpret_cast<VALUE*>(storage->default_val));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(storage->default_val));
nm_list_storage_unregister_list(storage->rows, storage->dim - 1);
}
}
///////////////
@@ -796,11 +796,11 @@
* Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
* Handles empty/non-existent sublists.
*/
static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t rec, VALUE& stack) {
VALUE empty = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();
- NM_CONSERVATIVE(nm_register_value(stack));
+ NM_CONSERVATIVE(nm_register_value(&stack));
if (rec) {
for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {
// Don't do an unshift/shift here -- we'll let that be handled in the lowest-level iteration (recursions == 0)
rb_ary_push(stack, LONG2NUM(index));
@@ -814,20 +814,20 @@
rb_yield_splat(stack);
rb_ary_pop(stack);
}
rb_ary_shift(stack);
}
- NM_CONSERVATIVE(nm_unregister_value(stack));
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
}
/*
* Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
*/
static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
if (s.dtype() == nm::RUBYOBJ)
nm_list_storage_register_list(l, rec);
- NM_CONSERVATIVE(nm_register_value(stack));
+ NM_CONSERVATIVE(nm_register_value(&stack));
NODE* curr = l->first;
size_t offset = s.offset(rec);
size_t shape = s.ref_shape(rec);
@@ -863,11 +863,11 @@
rb_ary_shift(stack);
rb_ary_pop(stack);
}
}
- NM_CONSERVATIVE(nm_unregister_value(stack));
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
if (s.dtype() == nm::RUBYOBJ)
nm_list_storage_unregister_list(l, rec);
}
@@ -875,11 +875,11 @@
* Recursive helper function for each_stored_with_indices, based on nm_list_storage_count_elements_r.
*/
static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
if (s.dtype() == nm::RUBYOBJ)
nm_list_storage_register_list(l, rec);
- NM_CONSERVATIVE(nm_register_value(stack));
+ NM_CONSERVATIVE(nm_register_value(&stack));
NODE* curr = l->first;
size_t offset = s.offset(rec);
size_t shape = s.ref_shape(rec);
@@ -914,47 +914,47 @@
curr = curr->next;
if (curr && curr->key - offset >= shape) curr = NULL;
}
}
- NM_CONSERVATIVE(nm_unregister_value(stack));
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
if (s.dtype() == nm::RUBYOBJ)
nm_list_storage_unregister_list(l, rec);
}
/*
* Each/each-stored iterator, brings along the indices.
*/
VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
- NM_CONSERVATIVE(nm_register_value(nmatrix));
+ NM_CONSERVATIVE(nm_register_value(&nmatrix));
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, 0);
nm::list_storage::RecurseData sdata(NM_STORAGE_LIST(nmatrix));
VALUE stack = rb_ary_new();
if (stored) each_stored_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
else each_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
return nmatrix;
}
/*
* map merged stored iterator. Always returns a matrix containing RubyObjects
* which probably needs to be casted.
*/
VALUE nm_list_map_stored(VALUE left, VALUE init) {
- NM_CONSERVATIVE(nm_register_value(left));
- NM_CONSERVATIVE(nm_register_value(init));
+ NM_CONSERVATIVE(nm_register_value(&left));
+ NM_CONSERVATIVE(nm_register_value(&init));
LIST_STORAGE *s = NM_STORAGE_LIST(left);
// For each matrix, if it's a reference, we want to deal directly with the
// original (with appropriate offsetting)
@@ -963,49 +963,49 @@
//if (!rb_block_given_p()) {
// rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
//}
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
- NM_CONSERVATIVE(nm_unregister_value(left));
- NM_CONSERVATIVE(nm_unregister_value(init));
+ NM_CONSERVATIVE(nm_unregister_value(&left));
+ NM_CONSERVATIVE(nm_unregister_value(&init));
RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
// Figure out default value if none provided by the user
if (init == Qnil) {
- nm_unregister_value(init);
+ nm_unregister_value(&init);
init = rb_yield_values(1, sdata.init_obj());
- nm_register_value(init);
+ nm_register_value(&init);
}
// Allocate a new shape array for the resulting matrix.
void* init_val = NM_ALLOC(VALUE);
memcpy(init_val, &init, sizeof(VALUE));
- nm_register_value(*reinterpret_cast<VALUE*>(init_val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
nm::list_storage::RecurseData rdata(r, init);
nm_register_nmatrix(result);
map_stored_r(rdata, sdata, rdata.top_level_list(), sdata.top_level_list(), sdata.dim() - 1);
VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
nm_unregister_nmatrix(result);
- nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
- NM_CONSERVATIVE(nm_unregister_value(init));
- NM_CONSERVATIVE(nm_unregister_value(left));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
+ NM_CONSERVATIVE(nm_unregister_value(&init));
+ NM_CONSERVATIVE(nm_unregister_value(&left));
return to_return;
}
/*
* map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
*/
VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
- NM_CONSERVATIVE(nm_register_value(left));
- NM_CONSERVATIVE(nm_register_value(right));
- NM_CONSERVATIVE(nm_register_value(init));
+ NM_CONSERVATIVE(nm_register_value(&left));
+ NM_CONSERVATIVE(nm_register_value(&right));
+ NM_CONSERVATIVE(nm_register_value(&init));
bool scalar = false;
LIST_STORAGE *s = NM_STORAGE_LIST(left),
*t;
@@ -1029,27 +1029,27 @@
//if (!rb_block_given_p()) {
// rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
//}
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
- NM_CONSERVATIVE(nm_unregister_value(left));
- NM_CONSERVATIVE(nm_unregister_value(right));
- NM_CONSERVATIVE(nm_unregister_value(init));
+ NM_CONSERVATIVE(nm_unregister_value(&left));
+ NM_CONSERVATIVE(nm_unregister_value(&right));
+ NM_CONSERVATIVE(nm_unregister_value(&init));
RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
// Figure out default value if none provided by the user
nm::list_storage::RecurseData& tdata = *(new nm::list_storage::RecurseData(t)); //FIXME: this is a hack to make sure that we can run the destructor before nm_list_storage_delete(t) below.
if (init == Qnil) {
- nm_unregister_value(init);
+ nm_unregister_value(&init);
init = rb_yield_values(2, sdata.init_obj(), tdata.init_obj());
- nm_register_value(init);
+ nm_register_value(&init);
}
// Allocate a new shape array for the resulting matrix.
void* init_val = NM_ALLOC(VALUE);
memcpy(init_val, &init, sizeof(VALUE));
- nm_register_value(*reinterpret_cast<VALUE*>(init_val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
nm::list_storage::RecurseData rdata(r, init);
map_merged_stored_r(rdata, sdata, tdata, rdata.top_level_list(), sdata.top_level_list(), tdata.top_level_list(), sdata.dim() - 1);
@@ -1058,15 +1058,15 @@
// If we are working with a scalar operation
if (scalar) nm_list_storage_delete(t);
VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
- nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
- NM_CONSERVATIVE(nm_unregister_value(init));
- NM_CONSERVATIVE(nm_unregister_value(right));
- NM_CONSERVATIVE(nm_unregister_value(left));
+ NM_CONSERVATIVE(nm_unregister_value(&init));
+ NM_CONSERVATIVE(nm_unregister_value(&right));
+ NM_CONSERVATIVE(nm_unregister_value(&left));
return to_return;
}
@@ -1099,11 +1099,11 @@
}
nm::list::insert_copy(dst_rows, false, key, val, sizeof(LIST));
}
} else { // matches src->dim - n > 1
if (src->dtype == nm::RUBYOBJ) {
- nm_register_value(*reinterpret_cast<VALUE*>(src_node->val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(src_node->val));
temp_vals.push_front(reinterpret_cast<VALUE*>(src_node->val));
}
nm::list::insert_copy(dst_rows, false, key, src_node->val, DTYPE_SIZES[src->dtype]);
}
}
@@ -1132,21 +1132,21 @@
return (n ? n->val : s->default_val);
} else {
void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype]);
memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);
if (s->dtype == nm::RUBYOBJ)
- nm_register_value(*reinterpret_cast<VALUE*>(init_val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
size_t *shape = NM_ALLOC_N(size_t, s->dim);
memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);
ns = nm_list_storage_create(s->dtype, shape, s->dim, init_val);
ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);
if (s->dtype == nm::RUBYOBJ) {
- nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
}
nm_list_storage_unregister(s);
return ns;
@@ -1195,11 +1195,11 @@
* Recursive function, sets multiple values in a matrix from a single source value.
*/
static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n) {
nm_list_storage_register(dest);
if (dest->dtype == nm::RUBYOBJ) {
- nm_register_value(*reinterpret_cast<VALUE*>(val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(val));
nm_list_storage_register_list(l, dest->dim - n - 1);
}
// drill down into the structure
NODE* node = NULL;
@@ -1238,19 +1238,19 @@
} else {
node = nm::list::replace_insert_after(node, key, val, true, DTYPE_SIZES[dest->dtype]);
}
if (dest->dtype == nm::RUBYOBJ) {
temp_vals.push_front(reinterpret_cast<VALUE*>(node->val));
- nm_register_value(*reinterpret_cast<VALUE*>(node->val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(node->val));
}
}
__nm_list_storage_unregister_temp_value_list(temp_vals);
}
nm_list_storage_unregister(dest);
if (dest->dtype == nm::RUBYOBJ) {
- nm_unregister_value(*reinterpret_cast<VALUE*>(val));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(val));
nm_list_storage_unregister_list(l, dest->dim - n - 1);
}
}
@@ -1273,11 +1273,11 @@
*/
NODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {
LIST_STORAGE* s = (LIST_STORAGE*)storage;
nm_list_storage_register(s);
if (s->dtype == nm::RUBYOBJ)
- nm_register_value(*reinterpret_cast<VALUE*>(val));
+ nm_register_value(&*reinterpret_cast<VALUE*>(val));
// Pretend dims = 2
// Then coords is going to be size 2
// So we need to find out if some key already exists
size_t r;
NODE* n;
@@ -1289,11 +1289,11 @@
l = reinterpret_cast<LIST*>(n->val);
}
nm_list_storage_unregister(s);
if (s->dtype == nm::RUBYOBJ)
- nm_unregister_value(*reinterpret_cast<VALUE*>(val));
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(val));
return nm::list::insert(l, true, s->offset[r] + slice->coords[r], val);
}
/*
@@ -1618,11 +1618,11 @@
* __list_default_value__ -> ...
*
* Get the default_value property from a list matrix.
*/
VALUE nm_list_default_value(VALUE self) {
- NM_CONSERVATIVE(nm_register_value(self));
+ NM_CONSERVATIVE(nm_register_value(&self));
VALUE to_return = (NM_DTYPE(self) == nm::RUBYOBJ) ? *reinterpret_cast<VALUE*>(NM_DEFAULT_VAL(self)) : rubyobj_from_cval(NM_DEFAULT_VAL(self), NM_DTYPE(self)).rval;
- NM_CONSERVATIVE(nm_unregister_value(self));
+ NM_CONSERVATIVE(nm_unregister_value(&self));
return to_return;
}
} // end of extern "C" block