Project

General

Profile

Feature #9614 » 0002-ihash-initial-implementation-method-table-conversion.patch

commit message here has more info - normalperson (Eric Wong), 03/09/2014 02:22 AM

View differences:

class.c
#define id_attached id__attached__
static void
RCLASS_M_TBL_INIT(VALUE c)
{
struct method_table_wrapper *wrapper;
wrapper = ALLOC(struct method_table_wrapper);
wrapper->tbl = rb_ihash_new(&rb_ihash_method_entry, 0);
wrapper->serial = 0;
RCLASS_M_TBL_WRAPPER(c) = wrapper;
}
void
rb_class_subclass_add(VALUE super, VALUE klass)
{
......
}
}
static int
clone_method_i(st_data_t key, st_data_t value, st_data_t data)
static enum rb_ihash_next
clone_method_i(struct rb_ihash_node *node, void *arg)
{
clone_method((VALUE)data, (ID)key, (const rb_method_entry_t *)value);
return ST_CONTINUE;
const rb_method_entry_t *me = rb_method_entry_of(node);
clone_method((VALUE)arg, me->called_id, me);
return RB_IHASH_CONTINUE;
}
struct clone_const_arg {
......
rb_free_m_tbl_wrapper(RCLASS_M_TBL_WRAPPER(clone));
}
RCLASS_M_TBL_INIT(clone);
st_foreach(RCLASS_M_TBL(orig), clone_method_i, (st_data_t)clone);
rb_ihash_foreach(RCLASS_M_TBLP(orig), clone_method_i, (void *)clone);
}
return clone;
......
rb_singleton_class_attached(clone, attach);
}
RCLASS_M_TBL_INIT(clone);
st_foreach(RCLASS_M_TBL(klass), clone_method_i, (st_data_t)clone);
rb_ihash_foreach(RCLASS_M_TBLP(klass), clone_method_i, (void *)clone);
rb_singleton_class_attached(RBASIC(clone)->klass, clone);
FL_SET(clone, FL_SINGLETON);
......
rb_raise(rb_eArgError, "cyclic include detected");
}
static int
add_refined_method_entry_i(st_data_t key, st_data_t value, st_data_t data)
static enum rb_ihash_next
add_refined_method_entry_i(struct rb_ihash_node *node, void *arg)
{
rb_add_refined_method_entry((VALUE) data, (ID) key);
return ST_CONTINUE;
const rb_method_entry_t *me = rb_method_entry_of(node);
rb_add_refined_method_entry((VALUE) arg, me->called_id);
return RB_IHASH_CONTINUE;
}
static int
......
{
VALUE p, iclass;
int method_changed = 0, constant_changed = 0;
const st_table *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass));
const struct rb_ihash_tbl *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass));
while (module) {
int superclass_seen = FALSE;
......
VALUE refined_class =
rb_refinement_module_get_refined_class(klass);
st_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i,
(st_data_t) refined_class);
rb_ihash_foreach(RMODULE_M_TBLP(module), add_refined_method_entry_i,
(void *)refined_class);
FL_SET(c, RMODULE_INCLUDED_INTO_REFINEMENT);
}
if (RMODULE_M_TBL(module) && RMODULE_M_TBL(module)->num_entries)
......
return method_changed;
}
static int
move_refined_method(st_data_t key, st_data_t value, st_data_t data)
static enum rb_ihash_next
move_refined_method(struct rb_ihash_node *node, void *arg)
{
rb_method_entry_t *me = (rb_method_entry_t *) value;
st_table *tbl = (st_table *) data;
rb_method_entry_t *me = rb_method_entry_of(node);
struct rb_ihash_tbl **tblp = arg;
if (me->def->type == VM_METHOD_TYPE_REFINED) {
if (me->def->body.orig_me) {
......
me->def->body.orig_me = NULL;
new_me = ALLOC(rb_method_entry_t);
*new_me = *me;
st_add_direct(tbl, key, (st_data_t) new_me);
orig_me->mtbl_node = me->mtbl_node; /* careful */
rb_ihash_add_direct(tblp, &new_me->mtbl_node);
*me = *orig_me;
xfree(orig_me);
return ST_CONTINUE;
return RB_IHASH_CONTINUE;
}
else {
st_add_direct(tbl, key, (st_data_t) me);
return ST_DELETE;
rb_ihash_add_direct(tblp, &me->mtbl_node);
return RB_IHASH_UNLINKED;
}
}
else {
return ST_CONTINUE;
return RB_IHASH_CONTINUE;
}
}
......
RCLASS_ORIGIN(klass) = origin;
RCLASS_M_TBL_WRAPPER(origin) = RCLASS_M_TBL_WRAPPER(klass);
RCLASS_M_TBL_INIT(klass);
st_foreach(RCLASS_M_TBL(origin), move_refined_method,
(st_data_t) RCLASS_M_TBL(klass));
rb_ihash_foreach(RCLASS_M_TBLP(origin), move_refined_method,
(void *)RCLASS_M_TBLP(klass));
}
changed = include_modules_at(klass, klass, module);
if (changed < 0)
......
return ins_methods_push((ID)name, (long)type, (VALUE)ary, NOEX_PUBLIC);
}
static int
method_entry_i(st_data_t key, st_data_t value, st_data_t data)
static enum rb_ihash_next
method_entry_i(struct rb_ihash_node *node, void *arg)
{
const rb_method_entry_t *me = (const rb_method_entry_t *)value;
st_table *list = (st_table *)data;
const rb_method_entry_t *me = rb_method_entry_of(node);
st_table *list = arg;
long type;
if (me && me->def->type == VM_METHOD_TYPE_REFINED) {
me = rb_resolve_refined_method(Qnil, me, NULL);
if (!me) return ST_CONTINUE;
if (!me) return RB_IHASH_CONTINUE;
}
if (!st_lookup(list, key, 0)) {
if (!st_lookup(list, me->called_id, 0)) {
if (UNDEFINED_METHOD_ENTRY_P(me)) {
type = -1; /* none */
}
else {
type = VISI(me->flag);
}
st_add_direct(list, key, type);
st_add_direct(list, me->called_id, type);
}
return ST_CONTINUE;
return RB_IHASH_CONTINUE;
}
static VALUE
......
list = st_init_numtable();
for (; mod; mod = RCLASS_SUPER(mod)) {
if (RCLASS_M_TBL(mod)) st_foreach(RCLASS_M_TBL(mod), method_entry_i, (st_data_t)list);
if (RCLASS_M_TBL(mod)) rb_ihash_foreach(RCLASS_M_TBLP(mod), method_entry_i, list);
if (BUILTIN_TYPE(mod) == T_ICLASS && !prepended) continue;
if (obj && FL_TEST(mod, FL_SINGLETON)) continue;
if (!recur) break;
......
rb_obj_singleton_methods(int argc, VALUE *argv, VALUE obj)
{
VALUE recur, ary, klass, origin;
st_table *list, *mtbl;
st_table *list;
if (argc == 0) {
recur = Qtrue;
......
origin = RCLASS_ORIGIN(klass);
list = st_init_numtable();
if (klass && FL_TEST(klass, FL_SINGLETON)) {
if ((mtbl = RCLASS_M_TBL(origin)) != 0)
st_foreach(mtbl, method_entry_i, (st_data_t)list);
if (RCLASS_M_TBL(origin))
rb_ihash_foreach(RCLASS_M_TBLP(origin), method_entry_i, list);
klass = RCLASS_SUPER(klass);
}
if (RTEST(recur)) {
while (klass && (FL_TEST(klass, FL_SINGLETON) || RB_TYPE_P(klass, T_ICLASS))) {
if (klass != origin && (mtbl = RCLASS_M_TBL(klass)) != 0)
st_foreach(mtbl, method_entry_i, (st_data_t)list);
if (klass != origin && RCLASS_M_TBL(klass))
rb_ihash_foreach(RCLASS_M_TBLP(klass), method_entry_i, list);
klass = RCLASS_SUPER(klass);
}
}
common.mk
enumerator.$(OBJEXT) \
error.$(OBJEXT) \
eval.$(OBJEXT) \
ihash.$(OBJEXT) \
load.$(OBJEXT) \
proc.$(OBJEXT) \
file.$(OBJEXT) \
......
PROBES_H_INCLUDES = {$(VPATH)}probes.h
VM_CORE_H_INCLUDES = {$(VPATH)}vm_core.h {$(VPATH)}thread_$(THREAD_MODEL).h \
{$(VPATH)}node.h {$(VPATH)}method.h {$(VPATH)}ruby_atomic.h \
{$(VPATH)}vm_debug.h {$(VPATH)}id.h {$(VPATH)}thread_native.h
{$(VPATH)}vm_debug.h {$(VPATH)}id.h {$(VPATH)}thread_native.h \
{$(VPATH)}ihash.h
###
......
$(RUBY_H_INCLUDES) $(VM_CORE_H_INCLUDES) {$(VPATH)}eval_error.c \
{$(VPATH)}eval_jump.c {$(VPATH)}gc.h {$(VPATH)}iseq.h \
$(ENCODING_H_INCLUDES) {$(VPATH)}internal.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h {$(VPATH)}probes_helper.h
ihash.$(OBJEXT): {$(VPATH)}ihash.c $(RUBY_H_INCLUDES) {$(VPATH)}ihash.h
load.$(OBJEXT): {$(VPATH)}load.c {$(VPATH)}eval_intern.h \
{$(VPATH)}util.h $(RUBY_H_INCLUDES) $(VM_CORE_H_INCLUDES) \
{$(VPATH)}dln.h {$(VPATH)}internal.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h
gc.c
return FALSE;
}
static int
free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
static enum rb_ihash_next
free_method_entry_i(struct rb_ihash_node *node, void *unused)
{
rb_method_entry_t *me = rb_method_entry_of(node);
if (!me->mark) {
rb_free_method_entry(me);
}
return ST_CONTINUE;
return RB_IHASH_UNLINKED;
}
void
rb_free_m_tbl(st_table *tbl)
rb_free_m_tbl(struct rb_ihash_tbl **tblp)
{
st_foreach(tbl, free_method_entry_i, 0);
st_free_table(tbl);
rb_ihash_foreach(tblp, free_method_entry_i, 0);
rb_ihash_free(*tblp);
}
void
rb_free_m_tbl_wrapper(struct method_table_wrapper *wrapper)
{
if (wrapper->tbl) {
rb_free_m_tbl(wrapper->tbl);
rb_free_m_tbl(&wrapper->tbl);
}
xfree(wrapper);
}
......
size += sizeof(struct method_table_wrapper);
}
if (RCLASS_M_TBL(obj)) {
size += st_memsize(RCLASS_M_TBL(obj));
size += rb_ihash_memsize(RCLASS_M_TBL(obj));
}
if (RCLASS_EXT(obj)) {
if (RCLASS_IV_TBL(obj)) {
......
mark_method_entry(&rb_objspace, me);
}
static int
mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
static enum rb_ihash_next
mark_method_entry_i(struct rb_ihash_node *node, void *mta)
{
struct mark_tbl_arg *arg = (void*)data;
const rb_method_entry_t *me = rb_method_entry_of(node);
struct mark_tbl_arg *arg = mta;
mark_method_entry(arg->objspace, me);
return ST_CONTINUE;
}
......
wrapper->serial = serial;
}
arg.objspace = objspace;
st_foreach(wrapper->tbl, mark_method_entry_i, (st_data_t)&arg);
rb_ihash_foreach(&wrapper->tbl, mark_method_entry_i, &arg);
}
static int
hash.c
#include "internal.h"
#include <errno.h>
#include "probes.h"
#include "ihash.h"
#ifdef __APPLE__
# ifdef HAVE_CRT_EXTERNS_H
......
const VALUE base = rb_cHash;
VALUE c = klass;
while (c != base) {
st_table *mtbl = RCLASS_M_TBL(c);
struct rb_ihash_tbl *mtbl = RCLASS_M_TBL(c);
if (mtbl && mtbl->num_entries) return klass;
c = RCLASS_SUPER(c);
}
ihash.c
#include "ihash.h"
#include "internal.h"
/* accounts for the compiler-agnostic flexible array byte */
#define SIZEOF_IHASH_TBL (sizeof(struct rb_ihash_tbl) - 1)
static inline size_t
rb_ihash_nbins(const struct rb_ihash_tbl *tbl)
{
return tbl->hash_mask + 1;
}
static inline size_t
rb_ihash_binpos(const struct rb_ihash_tbl *tbl, st_index_t hash)
{
return ((size_t)hash & (size_t)tbl->hash_mask);
}
static inline struct rb_ihash_node **
rb_ihash_bins(const struct rb_ihash_tbl *tbl)
{
return (struct rb_ihash_node **)tbl->flexible_array;
}
size_t
rb_ihash_memsize(const struct rb_ihash_tbl *tbl)
{
size_t nbins = rb_ihash_nbins(tbl);
size_t n = SIZEOF_IHASH_TBL + nbins * sizeof(struct rb_ihash_node *);
return n + sizeof(struct rb_ihash_node) * tbl->num_entries;
}
static struct rb_ihash_tbl *
rb_ihash_alloc(const struct rb_ihash_type *type, size_t nbins)
{
size_t nbytes = SIZEOF_IHASH_TBL + nbins * sizeof(struct rb_ihash_node *);
struct rb_ihash_tbl *tbl = xcalloc(1, nbytes);
tbl->type = type;
tbl->hash_mask = nbins - 1;
if ((size_t)tbl->hash_mask != (nbins - 1)) {
rb_bug("ihash table too big");
}
return tbl;
}
struct rb_ihash_tbl *
rb_ihash_new(const struct rb_ihash_type *type, size_t power)
{
return rb_ihash_alloc(type, 1 << power);
}
int
rb_ihash_foreach(struct rb_ihash_tbl **tblp, rb_ihash_iterate fn, void *arg)
{
struct rb_ihash_tbl *tbl = *tblp;
size_t nbins = rb_ihash_nbins(tbl);
struct rb_ihash_node **bins = rb_ihash_bins(tbl);
size_t i;
for (i = 0; i < nbins; i++) {
struct rb_ihash_node *cur = bins[i];
struct rb_ihash_node *last = 0;
while (cur) {
struct rb_ihash_node *next = cur->ihash_next;
struct rb_ihash_node *tmp;
switch (fn(cur, arg)) {
case RB_IHASH_UNLINKED:
if (last) last->ihash_next = next;
else bins[i] = next;
cur = next;
tbl->num_entries--;
break;
case RB_IHASH_CHECK:
/* check if hash is modified during iteration */
tmp = 0;
tbl = *tblp;
nbins = rb_ihash_nbins(tbl);
bins = rb_ihash_bins(tbl);
if (i < nbins) {
for (tmp = bins[i]; tmp; tmp = tmp->ihash_next) {
if (tmp == cur) break;
}
}
if (!tmp) return 1;
last = cur;
cur = cur->ihash_next;
break;
case RB_IHASH_CONTINUE:
last = cur;
cur = next;
break;
case RB_IHASH_STOP:
return 0;
}
}
}
return 0;
}
struct rb_ihash_node *
rb_ihash_lookup(const struct rb_ihash_tbl *tbl, const struct rb_ihash_node *key)
{
st_index_t hval = tbl->type->hash(key);
struct rb_ihash_node **bins = rb_ihash_bins(tbl);
size_t pos = rb_ihash_binpos(tbl, hval);
struct rb_ihash_node *cur = bins[pos];
for (; cur; cur = cur->ihash_next) {
if (tbl->type->cmp(key, cur) == 0) {
return cur;
}
}
return 0;
}
static inline void
rb_ihash_add_pos(struct rb_ihash_tbl *tbl,
struct rb_ihash_node *ins, size_t pos)
{
struct rb_ihash_node **bins = rb_ihash_bins(tbl);
ins->ihash_next = bins[pos];
bins[pos] = ins;
}
static enum rb_ihash_next
relink_i(struct rb_ihash_node *node, void *ptr)
{
struct rb_ihash_tbl *new_tbl = ptr;
st_index_t hval = new_tbl->type->hash(node);
size_t pos = rb_ihash_binpos(new_tbl, hval);
rb_ihash_add_pos(new_tbl, node, pos);
return RB_IHASH_UNLINKED;
}
static struct rb_ihash_tbl *
rb_ihash_realloc(struct rb_ihash_tbl **oldp, size_t nbins)
{
struct rb_ihash_tbl *old_tbl = *oldp;
struct rb_ihash_tbl *new_tbl = rb_ihash_alloc(old_tbl->type, nbins);
rb_ihash_foreach(oldp, relink_i, new_tbl);
rb_ihash_free(old_tbl);
return new_tbl;
}
static void
rb_ihash_added(struct rb_ihash_tbl **tblp)
{
struct rb_ihash_tbl *tbl = *tblp;
size_t nbins = rb_ihash_nbins(tbl);
static const size_t max_density = 6;
tbl->num_entries++;
if (tbl->num_entries > (max_density * nbins)) {
*tblp = rb_ihash_realloc(tblp, nbins << 1);
}
}
void
rb_ihash_add_direct(struct rb_ihash_tbl **tblp, struct rb_ihash_node *ins)
{
struct rb_ihash_tbl *tbl = *tblp;
st_index_t hval = tbl->type->hash(ins);
size_t pos = rb_ihash_binpos(tbl, hval);
rb_ihash_add_pos(tbl, ins, pos);
rb_ihash_added(tblp);
}
/* returns pointer to replaced node, 0 if nothing was replaced */
struct rb_ihash_node *
rb_ihash_insert(struct rb_ihash_tbl **tblp, struct rb_ihash_node *ins)
{
struct rb_ihash_tbl *tbl = *tblp;
st_index_t hval = tbl->type->hash(ins);
struct rb_ihash_node **bins = rb_ihash_bins(tbl);
size_t i = rb_ihash_binpos(tbl, hval);
struct rb_ihash_node *last = NULL;
struct rb_ihash_node *cur = bins[i];
while (cur) {
if (tbl->type->cmp(ins, cur) == 0) {
/* replace and return existing entry */
ins->ihash_next = cur->ihash_next;
if (last) last->ihash_next = ins;
else bins[i] = ins;
return cur;
}
last = cur;
cur = cur->ihash_next;
}
rb_ihash_add_pos(tbl, ins, i);
rb_ihash_added(tblp);
return 0;
}
/* returns pointer to unlinked node, 0 if nothing was unlinked */
struct rb_ihash_node *
rb_ihash_unlink(struct rb_ihash_tbl *tbl, const struct rb_ihash_node *key)
{
st_index_t hval = tbl->type->hash(key);
struct rb_ihash_node **bins = rb_ihash_bins(tbl);
size_t i = rb_ihash_binpos(tbl, hval);
struct rb_ihash_node *last = 0;
struct rb_ihash_node *cur = bins[i];
while (cur) {
if (tbl->type->cmp(key, cur) == 0) {
if (last) last->ihash_next = cur->ihash_next;
else bins[i] = cur->ihash_next;
tbl->num_entries--;
return cur;
}
last = cur;
cur = cur->ihash_next;
}
return 0;
}
ihash.h
/*
* Hash table implementation for internal use inside Ruby.
*
* Notes:
*
* - Users are expected to embed the rb_ihash_node struct and st_data_t key
* inside a main struct stored by users. This reduces pointer chasing
* and allows cache-warming even on missed lookups.
* Wrap the RB_CONTAINER_OF macro in a static inline to get the
* stored struct pointer from an st_data_t-compatible key pointer.
*
* - does no allocation for entires, user is responsible for it
*
* - this should be safe to use outside of GVL. Of course users must manage
* their own locking.
*/
#ifndef RUBY_IHASH_H
#define RUBY_IHASH_H 1
#if defined(__cplusplus)
extern "C" {
#if 0
} /* satisfy cc-mode */
#endif
#endif
#include "ruby/ruby.h"
#include "ruby/st.h" /* we use st_data_t and st_index_t data types */
/*
* each struct must have its own rb_ihash_node field inside it,
* use CONTAINER_OF to get the original struct ptr
*/
struct rb_ihash_node;
struct rb_ihash_node {
struct rb_ihash_node *ihash_next;
};
enum rb_ihash_next {
RB_IHASH_CONTINUE = 0,
RB_IHASH_CHECK,
RB_IHASH_STOP,
RB_IHASH_UNLINKED /* user must deallocate this manually */
};
typedef int (*rb_ihash_compare)(const struct rb_ihash_node *,
const struct rb_ihash_node *);
typedef st_index_t (*rb_ihash_compute)(const struct rb_ihash_node *);
typedef enum rb_ihash_next (*rb_ihash_iterate)(struct rb_ihash_node *, void *);
struct rb_ihash_type {
rb_ihash_compare cmp;
rb_ihash_compute hash;
};
struct rb_ihash_tbl {
const struct rb_ihash_type *type;
uint32_t num_entries;
uint32_t hash_mask;
char flexible_array[1];
};
struct rb_ihash_tbl *rb_ihash_new(const struct rb_ihash_type *, size_t);
struct rb_ihash_node *
rb_ihash_lookup(const struct rb_ihash_tbl *, const struct rb_ihash_node *);
struct rb_ihash_node *
rb_ihash_insert(struct rb_ihash_tbl **, struct rb_ihash_node *);
void rb_ihash_add_direct(struct rb_ihash_tbl **, struct rb_ihash_node *);
struct rb_ihash_node *
rb_ihash_unlink(struct rb_ihash_tbl *, const struct rb_ihash_node *);
int rb_ihash_foreach(struct rb_ihash_tbl **, rb_ihash_iterate, void *);
static inline void
rb_ihash_free(struct rb_ihash_tbl *tbl)
{
/*
* user must call rb_ihash_foreach with an iterator which frees
* and returns RB_IHASH_UNLINK for each entry before calling this
*/
xfree(tbl);
}
size_t rb_ihash_memsize(const struct rb_ihash_tbl *);
#if defined(__cplusplus)
#if 0
{ /* satisfy cc-mode */
#endif
} /* extern "C" { */
#endif
#endif /* RUBY_IHASH_H */
include/ruby/ruby.h
#define RMODULE_IV_TBL(m) RCLASS_IV_TBL(m)
#define RMODULE_CONST_TBL(m) RCLASS_CONST_TBL(m)
#define RMODULE_M_TBL(m) RCLASS_M_TBL(m)
#define RMODULE_M_TBLP(m) RCLASS_M_TBLP(m)
#define RMODULE_SUPER(m) RCLASS_SUPER(m)
#define RMODULE_IS_OVERLAID FL_USER2
#define RMODULE_IS_REFINEMENT FL_USER3
internal.h
#define numberof(array) ((int)(sizeof(array) / sizeof((array)[0])))
#define RB_CONTAINER_OF(ptr,type,field) \
((type *)((uint8_t *)(ptr) - offsetof(type,field)))
#define STATIC_ASSERT(name, expr) typedef int static_assert_##name##_check[1 - 2*!(expr)]
#define GCC_VERSION_SINCE(major, minor, patchlevel) \
......
};
struct method_table_wrapper {
st_table *tbl;
struct rb_ihash_tbl *tbl;
size_t serial;
};
......
#define RCLASS_CONST_TBL(c) (RCLASS_EXT(c)->const_tbl)
#define RCLASS_M_TBL_WRAPPER(c) (RCLASS(c)->m_tbl_wrapper)
#define RCLASS_M_TBL(c) (RCLASS_M_TBL_WRAPPER(c) ? RCLASS_M_TBL_WRAPPER(c)->tbl : 0)
#define RCLASS_M_TBLP(c) (&RCLASS_M_TBL_WRAPPER(c)->tbl)
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin)
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
#define RCLASS_SERIAL(c) (RCLASS_EXT(c)->class_serial)
static inline void
RCLASS_M_TBL_INIT(VALUE c)
{
struct method_table_wrapper *wrapper;
wrapper = ALLOC(struct method_table_wrapper);
wrapper->tbl = st_init_numtable();
wrapper->serial = 0;
RCLASS_M_TBL_WRAPPER(c) = wrapper;
}
#undef RCLASS_SUPER
static inline VALUE
RCLASS_SUPER(VALUE klass)
marshal.c
#include "ruby/util.h"
#include "ruby/encoding.h"
#include "internal.h"
#include "ihash.h"
#include <math.h>
#ifdef HAVE_FLOAT_H
method.h
#define METHOD_H
#include "internal.h"
#include "ihash.h"
#ifndef END_OF_ENUMERATION
# if defined(__GNUC__) &&! defined(__STRICT_ANSI__)
......
} rb_method_definition_t;
typedef struct rb_method_entry_struct {
ID called_id; /* key for ihash */
struct rb_ihash_node mtbl_node; /* TODO: union for unlinked entries */
rb_method_flag_t flag;
char mark;
rb_method_definition_t *def;
ID called_id;
VALUE klass; /* should be mark */
} rb_method_entry_t;
static inline rb_method_entry_t *
rb_method_entry_of(const struct rb_ihash_node *node)
{
return RB_CONTAINER_OF(node, rb_method_entry_t, mtbl_node);
}
struct unlinked_method_entry_list_entry {
struct unlinked_method_entry_list_entry *next;
rb_method_entry_t *me;
......
void rb_mark_method_entry(const rb_method_entry_t *me);
void rb_free_method_entry(rb_method_entry_t *me);
void rb_sweep_method_entry(void *vm);
void rb_free_m_tbl(st_table *tbl);
void rb_free_m_tbl(struct rb_ihash_tbl **);
void rb_free_m_tbl_wrapper(struct method_table_wrapper *wrapper);
extern struct rb_ihash_type rb_ihash_method_entry;
#endif /* METHOD_H */
vm.c
}
}
static int
check_redefined_method(st_data_t key, st_data_t value, st_data_t data)
static enum rb_ihash_next
check_redefined_method(struct rb_ihash_node *node, void *arg)
{
ID mid = (ID)key;
rb_method_entry_t *me = (rb_method_entry_t *)value;
VALUE klass = (VALUE)data;
rb_method_entry_t *me = rb_method_entry_of(node);
ID mid = me->called_id;
VALUE klass = (VALUE)arg;
rb_method_entry_t *newme = rb_method_entry(klass, mid, NULL);
if (newme != me)
rb_vm_check_redefinition_opt_method(me, me->klass);
return ST_CONTINUE;
return RB_IHASH_CONTINUE;
}
void
rb_vm_check_redefinition_by_prepend(VALUE klass)
{
if (!vm_redefinition_check_flag(klass)) return;
st_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method,
(st_data_t)klass);
rb_ihash_foreach(RCLASS_M_TBLP(RCLASS_ORIGIN(klass)), check_redefined_method,
(void *)klass);
}
static void
vm_insnhelper.c
const int max = (iseq->arg_rest == -1) ? m + opts + iseq->arg_post_len : UNLIMITED_ARGUMENTS;
const int orig_argc = ci->argc;
int argc = orig_argc;
VALUE *new_argv;
VALUE *argv = orig_argv;
VALUE keyword_hash = Qnil;
rb_num_t opt_pc = 0;
......
/* post arguments */
if (iseq->arg_post_len) {
if (!(orig_argc < iseq->arg_post_start)) {
VALUE *new_argv = ALLOCA_N(VALUE, argc);
new_argv = ALLOCA_N(VALUE, argc);
MEMCPY(new_argv, argv, VALUE, argc);
argv = new_argv;
}
vm_method.c
static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
/*
* use small struct here to avoid errors from tiny stacks in fiber tests,
* TODO: unify this struct into rb_method_entry_t in the future,
* this struct only exists to minimize code churn from introducing ihash
* into method tables
*/
struct rb_method_entry_finder {
ID called_id;
union {
struct rb_ihash_node mtbl_node; /* lookup does not look into this */
struct rb_ihash_node *retval;
};
};
STATIC_ASSERT(method_entry_finder_offsets,
offsetof(struct rb_method_entry_finder, called_id) ==
offsetof(rb_method_entry_t, called_id) &&
offsetof(struct rb_method_entry_finder, mtbl_node) ==
offsetof(rb_method_entry_t, mtbl_node));
static inline rb_method_entry_t *
lookup_method_table(VALUE klass, ID id)
{
st_data_t body;
st_table *m_tbl = RCLASS_M_TBL(klass);
if (st_lookup(m_tbl, id, &body)) {
return (rb_method_entry_t *) body;
}
else {
return 0;
}
struct rb_method_entry_finder finder;
finder.called_id = id;
finder.retval = rb_ihash_lookup(RCLASS_M_TBL(klass), &finder.mtbl_node);
return finder.retval ? rb_method_entry_of(finder.retval) : 0;
}
static void
......
rb_method_definition_t *def, rb_method_flag_t noex,
VALUE defined_class)
{
rb_method_entry_t *me;
rb_method_entry_t *me, *old_me;
struct rb_ihash_node *old;
#if NOEX_NOREDEF
VALUE rklass;
#endif
st_table *mtbl;
st_data_t data;
int make_refined = 0;
if (NIL_P(klass)) {
......
rb_add_refined_method_entry(refined_class, mid);
}
if (type == VM_METHOD_TYPE_REFINED) {
rb_method_entry_t *old_me =
lookup_method_table(RCLASS_ORIGIN(klass), mid);
old_me = lookup_method_table(RCLASS_ORIGIN(klass), mid);
if (old_me) rb_vm_check_redefinition_opt_method(old_me, klass);
}
else {
klass = RCLASS_ORIGIN(klass);
}
mtbl = RCLASS_M_TBL(klass);
/* check re-definition */
if (st_lookup(mtbl, mid, &data)) {
rb_method_entry_t *old_me = (rb_method_entry_t *)data;
old_me = lookup_method_table(klass, mid);
if (old_me) {
rb_method_definition_t *old_def = old_me->def;
if (rb_method_definition_eq(old_def, def)) return old_me;
......
make_method_entry_refined(me);
}
st_insert(mtbl, mid, (st_data_t) me);
old = rb_ihash_insert(RCLASS_M_TBLP(klass), &me->mtbl_node);
if (old_me && (old != &old_me->mtbl_node)) {
rb_bug("rb_method_entry_make: ihash race between lookup and insert");
}
return me;
}
......
static void
remove_method(VALUE klass, ID mid)
{
st_data_t key, data;
rb_method_entry_t *me = 0;
rb_method_entry_t *me;
struct rb_ihash_node *node;
VALUE self = klass;
klass = RCLASS_ORIGIN(klass);
......
rb_warn("removing `%s' may cause serious problems", rb_id2name(mid));
}
if (!st_lookup(RCLASS_M_TBL(klass), mid, &data) ||
!(me = (rb_method_entry_t *)data) ||
(!me->def || me->def->type == VM_METHOD_TYPE_UNDEF)) {
me = lookup_method_table(klass, mid);
if (!me || !me->def || (me->def->type == VM_METHOD_TYPE_UNDEF)) {
rb_name_error(mid, "method `%s' not defined in %s",
rb_id2name(mid), rb_class2name(klass));
}
key = (st_data_t)mid;
st_delete(RCLASS_M_TBL(klass), &key, &data);
node = rb_ihash_unlink(RCLASS_M_TBL(klass), &me->mtbl_node);
if (node != &me->mtbl_node) {
rb_bug("remove_method: ihash race between lookup and unlink");
}
rb_vm_check_redefinition_opt_method(me, klass);
rb_clear_method_cache_by_class(klass);
......
REPLICATE_METHOD(rb_eException, idRespond_to_missing, NOEX_PUBLIC);
}
}
static int
rb_ihash_me_cmp(const struct rb_ihash_node *a, const struct rb_ihash_node *b)
{
rb_method_entry_t *me1 = rb_method_entry_of(a);
rb_method_entry_t *me2 = rb_method_entry_of(b);
return me1->called_id != me2->called_id;
}
static st_index_t
rb_ihash_me_hash(const struct rb_ihash_node *node)
{
rb_method_entry_t *me = rb_method_entry_of(node);
ID id = me->called_id;
/* TODO: tuning try generating id from hash of str associated with sym */
return (st_index_t)((id >> 3) ^ (id << 3));
}
struct rb_ihash_type rb_ihash_method_entry = {
rb_ihash_me_cmp,
rb_ihash_me_hash
};
(2-2/4)