Update 2025-04-24_11:44:19

This commit is contained in:
oib
2025-04-24 11:44:23 +02:00
commit e748c737f4
3408 changed files with 717481 additions and 0 deletions

View File

@ -0,0 +1,157 @@
#ifndef COBJECTS_CPP
#define COBJECTS_CPP
/*****************************************************************************
* C interface
*
* These are exported using the CObject API
*/
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-function"
#endif
#include "greenlet_exceptions.hpp"
#include "greenlet_internal.hpp"
#include "greenlet_refs.hpp"
#include "TThreadStateDestroy.cpp"
#include "PyGreenlet.hpp"
using greenlet::PyErrOccurred;
using greenlet::Require;
extern "C" {
static PyGreenlet*
PyGreenlet_GetCurrent(void)
{
return GET_THREAD_STATE().state().get_current().relinquish_ownership();
}
static int
PyGreenlet_SetParent(PyGreenlet* g, PyGreenlet* nparent)
{
return green_setparent((PyGreenlet*)g, (PyObject*)nparent, NULL);
}
static PyGreenlet*
PyGreenlet_New(PyObject* run, PyGreenlet* parent)
{
using greenlet::refs::NewDictReference;
// In the past, we didn't use green_new and green_init, but that
// was a maintenance issue because we duplicated code. This way is
// much safer, but slightly slower. If that's a problem, we could
// refactor green_init to separate argument parsing from initialization.
OwnedGreenlet g = OwnedGreenlet::consuming(green_new(&PyGreenlet_Type, nullptr, nullptr));
if (!g) {
return NULL;
}
try {
NewDictReference kwargs;
if (run) {
kwargs.SetItem(mod_globs->str_run, run);
}
if (parent) {
kwargs.SetItem("parent", (PyObject*)parent);
}
Require(green_init(g.borrow(), mod_globs->empty_tuple, kwargs.borrow()));
}
catch (const PyErrOccurred&) {
return nullptr;
}
return g.relinquish_ownership();
}
static PyObject*
PyGreenlet_Switch(PyGreenlet* self, PyObject* args, PyObject* kwargs)
{
if (!PyGreenlet_Check(self)) {
PyErr_BadArgument();
return NULL;
}
if (args == NULL) {
args = mod_globs->empty_tuple;
}
if (kwargs == NULL || !PyDict_Check(kwargs)) {
kwargs = NULL;
}
return green_switch(self, args, kwargs);
}
static PyObject*
PyGreenlet_Throw(PyGreenlet* self, PyObject* typ, PyObject* val, PyObject* tb)
{
if (!PyGreenlet_Check(self)) {
PyErr_BadArgument();
return nullptr;
}
try {
PyErrPieces err_pieces(typ, val, tb);
return internal_green_throw(self, err_pieces).relinquish_ownership();
}
catch (const PyErrOccurred&) {
return nullptr;
}
}
static int
Extern_PyGreenlet_MAIN(PyGreenlet* self)
{
if (!PyGreenlet_Check(self)) {
PyErr_BadArgument();
return -1;
}
return self->pimpl->main();
}
static int
Extern_PyGreenlet_ACTIVE(PyGreenlet* self)
{
if (!PyGreenlet_Check(self)) {
PyErr_BadArgument();
return -1;
}
return self->pimpl->active();
}
static int
Extern_PyGreenlet_STARTED(PyGreenlet* self)
{
if (!PyGreenlet_Check(self)) {
PyErr_BadArgument();
return -1;
}
return self->pimpl->started();
}
static PyGreenlet*
Extern_PyGreenlet_GET_PARENT(PyGreenlet* self)
{
if (!PyGreenlet_Check(self)) {
PyErr_BadArgument();
return NULL;
}
// This can return NULL even if there is no exception
return self->pimpl->parent().acquire();
}
} // extern C.
/** End C API ****************************************************************/
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#endif

View File

@ -0,0 +1,738 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
#ifndef PYGREENLET_CPP
#define PYGREENLET_CPP
/*****************
The Python slot functions for TGreenlet.
*/
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h" // PyMemberDef
#include "greenlet_internal.hpp"
#include "TThreadStateDestroy.cpp"
#include "TGreenlet.hpp"
// #include "TUserGreenlet.cpp"
// #include "TMainGreenlet.cpp"
// #include "TBrokenGreenlet.cpp"
#include "greenlet_refs.hpp"
#include "greenlet_slp_switch.hpp"
#include "greenlet_thread_support.hpp"
#include "TGreenlet.hpp"
#include "TGreenletGlobals.cpp"
#include "TThreadStateDestroy.cpp"
#include "PyGreenlet.hpp"
// #include "TGreenlet.cpp"
// #include "TExceptionState.cpp"
// #include "TPythonState.cpp"
// #include "TStackState.cpp"
using greenlet::LockGuard;
using greenlet::LockInitError;
using greenlet::PyErrOccurred;
using greenlet::Require;
using greenlet::g_handle_exit;
using greenlet::single_result;
using greenlet::Greenlet;
using greenlet::UserGreenlet;
using greenlet::MainGreenlet;
using greenlet::BrokenGreenlet;
using greenlet::ThreadState;
using greenlet::PythonState;
static PyGreenlet*
green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds))
{
PyGreenlet* o =
(PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs->empty_tuple, mod_globs->empty_dict);
if (o) {
new UserGreenlet(o, GET_THREAD_STATE().state().borrow_current());
assert(Py_REFCNT(o) == 1);
}
return o;
}
// green_init is used in the tp_init slot. So it's important that
// it can be called directly from CPython. Thus, we don't use
// BorrowedGreenlet and BorrowedObject --- although in theory
// these should be binary layout compatible, that may not be
// guaranteed to be the case (32-bit linux ppc possibly).
static int
green_init(PyGreenlet* self, PyObject* args, PyObject* kwargs)
{
PyArgParseParam run;
PyArgParseParam nparent;
static const char* kwlist[] = {
"run",
"parent",
NULL
};
// recall: The O specifier does NOT increase the reference count.
if (!PyArg_ParseTupleAndKeywords(
args, kwargs, "|OO:green", (char**)kwlist, &run, &nparent)) {
return -1;
}
if (run) {
if (green_setrun(self, run, NULL)) {
return -1;
}
}
if (nparent && !nparent.is_None()) {
return green_setparent(self, nparent, NULL);
}
return 0;
}
static int
green_traverse(PyGreenlet* self, visitproc visit, void* arg)
{
// We must only visit referenced objects, i.e. only objects
// Py_INCREF'ed by this greenlet (directly or indirectly):
//
// - stack_prev is not visited: holds previous stack pointer, but it's not
// referenced
// - frames are not visited as we don't strongly reference them;
// alive greenlets are not garbage collected
// anyway. This can be a problem, however, if this greenlet is
// never allowed to finish, and is referenced from the frame: we
// have an uncollectible cycle in that case. Note that the
// frame object itself is also frequently not even tracked by the GC
// starting with Python 3.7 (frames are allocated by the
// interpreter untracked, and only become tracked when their
// evaluation is finished if they have a refcount > 1). All of
// this is to say that we should probably strongly reference
// the frame object. Doing so, while always allowing GC on a
// greenlet, solves several leaks for us.
Py_VISIT(self->dict);
if (!self->pimpl) {
// Hmm. I have seen this at interpreter shutdown time,
// I think. That's very odd because this doesn't go away until
// we're ``green_dealloc()``, at which point we shouldn't be
// traversed anymore.
return 0;
}
return self->pimpl->tp_traverse(visit, arg);
}
static int
green_is_gc(PyObject* _self)
{
BorrowedGreenlet self(_self);
int result = 0;
/* Main greenlet can be garbage collected since it can only
become unreachable if the underlying thread exited.
Active greenlets --- including those that are suspended ---
cannot be garbage collected, however.
*/
if (self->main() || !self->active()) {
result = 1;
}
// The main greenlet pointer will eventually go away after the thread dies.
if (self->was_running_in_dead_thread()) {
// Our thread is dead! We can never run again. Might as well
// GC us. Note that if a tuple containing only us and other
// immutable objects had been scanned before this, when we
// would have returned 0, the tuple will take itself out of GC
// tracking and never be investigated again. So that could
// result in both us and the tuple leaking due to an
// unreachable/uncollectible reference. The same goes for
// dictionaries.
//
// It's not a great idea to be changing our GC state on the
// fly.
result = 1;
}
return result;
}
static int
green_clear(PyGreenlet* self)
{
/* Greenlet is only cleared if it is about to be collected.
Since active greenlets are not garbage collectable, we can
be sure that, even if they are deallocated during clear,
nothing they reference is in unreachable or finalizers,
so even if it switches we are relatively safe. */
// XXX: Are we responsible for clearing weakrefs here?
Py_CLEAR(self->dict);
return self->pimpl->tp_clear();
}
/**
* Returns 0 on failure (the object was resurrected) or 1 on success.
**/
static int
_green_dealloc_kill_started_non_main_greenlet(BorrowedGreenlet self)
{
/* Hacks hacks hacks copied from instance_dealloc() */
/* Temporarily resurrect the greenlet. */
assert(self.REFCNT() == 0);
Py_SET_REFCNT(self.borrow(), 1);
/* Save the current exception, if any. */
PyErrPieces saved_err;
try {
// BY THE TIME WE GET HERE, the state may actually be going
// away
// if we're shutting down the interpreter and freeing thread
// entries,
// this could result in freeing greenlets that were leaked. So
// we can't try to read the state.
self->deallocing_greenlet_in_thread(
self->thread_state()
? static_cast<ThreadState*>(GET_THREAD_STATE())
: nullptr);
}
catch (const PyErrOccurred&) {
PyErr_WriteUnraisable(self.borrow_o());
/* XXX what else should we do? */
}
/* Check for no resurrection must be done while we keep
* our internal reference, otherwise PyFile_WriteObject
* causes recursion if using Py_INCREF/Py_DECREF
*/
if (self.REFCNT() == 1 && self->active()) {
/* Not resurrected, but still not dead!
XXX what else should we do? we complain. */
PyObject* f = PySys_GetObject("stderr");
Py_INCREF(self.borrow_o()); /* leak! */
if (f != NULL) {
PyFile_WriteString("GreenletExit did not kill ", f);
PyFile_WriteObject(self.borrow_o(), f, 0);
PyFile_WriteString("\n", f);
}
}
/* Restore the saved exception. */
saved_err.PyErrRestore();
/* Undo the temporary resurrection; can't use DECREF here,
* it would cause a recursive call.
*/
assert(self.REFCNT() > 0);
Py_ssize_t refcnt = self.REFCNT() - 1;
Py_SET_REFCNT(self.borrow_o(), refcnt);
if (refcnt != 0) {
/* Resurrected! */
_Py_NewReference(self.borrow_o());
Py_SET_REFCNT(self.borrow_o(), refcnt);
/* Better to use tp_finalizer slot (PEP 442)
* and call ``PyObject_CallFinalizerFromDealloc``,
* but that's only supported in Python 3.4+; see
* Modules/_io/iobase.c for an example.
*
* The following approach is copied from iobase.c in CPython 2.7.
* (along with much of this function in general). Here's their
* comment:
*
* When called from a heap type's dealloc, the type will be
* decref'ed on return (see e.g. subtype_dealloc in typeobject.c). */
if (PyType_HasFeature(self.TYPE(), Py_TPFLAGS_HEAPTYPE)) {
Py_INCREF(self.TYPE());
}
PyObject_GC_Track((PyObject*)self);
_Py_DEC_REFTOTAL;
#ifdef COUNT_ALLOCS
--Py_TYPE(self)->tp_frees;
--Py_TYPE(self)->tp_allocs;
#endif /* COUNT_ALLOCS */
return 0;
}
return 1;
}
static void
green_dealloc(PyGreenlet* self)
{
PyObject_GC_UnTrack(self);
BorrowedGreenlet me(self);
if (me->active()
&& me->started()
&& !me->main()) {
if (!_green_dealloc_kill_started_non_main_greenlet(me)) {
return;
}
}
if (self->weakreflist != NULL) {
PyObject_ClearWeakRefs((PyObject*)self);
}
Py_CLEAR(self->dict);
if (self->pimpl) {
// In case deleting this, which frees some memory,
// somehow winds up calling back into us. That's usually a
//bug in our code.
Greenlet* p = self->pimpl;
self->pimpl = nullptr;
delete p;
}
// and finally we're done. self is now invalid.
Py_TYPE(self)->tp_free((PyObject*)self);
}
static OwnedObject
internal_green_throw(BorrowedGreenlet self, PyErrPieces& err_pieces)
{
PyObject* result = nullptr;
err_pieces.PyErrRestore();
assert(PyErr_Occurred());
if (self->started() && !self->active()) {
/* dead greenlet: turn GreenletExit into a regular return */
result = g_handle_exit(OwnedObject()).relinquish_ownership();
}
self->args() <<= result;
return single_result(self->g_switch());
}
PyDoc_STRVAR(
green_switch_doc,
"switch(*args, **kwargs)\n"
"\n"
"Switch execution to this greenlet.\n"
"\n"
"If this greenlet has never been run, then this greenlet\n"
"will be switched to using the body of ``self.run(*args, **kwargs)``.\n"
"\n"
"If the greenlet is active (has been run, but was switch()'ed\n"
"out before leaving its run function), then this greenlet will\n"
"be resumed and the return value to its switch call will be\n"
"None if no arguments are given, the given argument if one\n"
"argument is given, or the args tuple and keyword args dict if\n"
"multiple arguments are given.\n"
"\n"
"If the greenlet is dead, or is the current greenlet then this\n"
"function will simply return the arguments using the same rules as\n"
"above.\n");
static PyObject*
green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs)
{
using greenlet::SwitchingArgs;
SwitchingArgs switch_args(OwnedObject::owning(args), OwnedObject::owning(kwargs));
self->pimpl->may_switch_away();
self->pimpl->args() <<= switch_args;
// If we're switching out of a greenlet, and that switch is the
// last thing the greenlet does, the greenlet ought to be able to
// go ahead and die at that point. Currently, someone else must
// manually switch back to the greenlet so that we "fall off the
// end" and can perform cleanup. You'd think we'd be able to
// figure out that this is happening using the frame's ``f_lasti``
// member, which is supposed to be an index into
// ``frame->f_code->co_code``, the bytecode string. However, in
// recent interpreters, ``f_lasti`` tends not to be updated thanks
// to things like the PREDICT() macros in ceval.c. So it doesn't
// really work to do that in many cases. For example, the Python
// code:
// def run():
// greenlet.getcurrent().parent.switch()
// produces bytecode of len 16, with the actual call to switch()
// being at index 10 (in Python 3.10). However, the reported
// ``f_lasti`` we actually see is...5! (Which happens to be the
// second byte of the CALL_METHOD op for ``getcurrent()``).
try {
//OwnedObject result = single_result(self->pimpl->g_switch());
OwnedObject result(single_result(self->pimpl->g_switch()));
#ifndef NDEBUG
// Note that the current greenlet isn't necessarily self. If self
// finished, we went to one of its parents.
assert(!self->pimpl->args());
const BorrowedGreenlet& current = GET_THREAD_STATE().state().borrow_current();
// It's possible it's never been switched to.
assert(!current->args());
#endif
PyObject* p = result.relinquish_ownership();
if (!p && !PyErr_Occurred()) {
// This shouldn't be happening anymore, so the asserts
// are there for debug builds. Non-debug builds
// crash "gracefully" in this case, although there is an
// argument to be made for killing the process in all
// cases --- for this to be the case, our switches
// probably nested in an incorrect way, so the state is
// suspicious. Nothing should be corrupt though, just
// confused at the Python level. Letting this propagate is
// probably good enough.
assert(p || PyErr_Occurred());
throw PyErrOccurred(
mod_globs->PyExc_GreenletError,
"Greenlet.switch() returned NULL without an exception set."
);
}
return p;
}
catch(const PyErrOccurred&) {
return nullptr;
}
}
PyDoc_STRVAR(
green_throw_doc,
"Switches execution to this greenlet, but immediately raises the\n"
"given exception in this greenlet. If no argument is provided, the "
"exception\n"
"defaults to `greenlet.GreenletExit`. The normal exception\n"
"propagation rules apply, as described for `switch`. Note that calling "
"this\n"
"method is almost equivalent to the following::\n"
"\n"
" def raiser():\n"
" raise typ, val, tb\n"
" g_raiser = greenlet(raiser, parent=g)\n"
" g_raiser.switch()\n"
"\n"
"except that this trick does not work for the\n"
"`greenlet.GreenletExit` exception, which would not propagate\n"
"from ``g_raiser`` to ``g``.\n");
static PyObject*
green_throw(PyGreenlet* self, PyObject* args)
{
PyArgParseParam typ(mod_globs->PyExc_GreenletExit);
PyArgParseParam val;
PyArgParseParam tb;
if (!PyArg_ParseTuple(args, "|OOO:throw", &typ, &val, &tb)) {
return nullptr;
}
assert(typ.borrow() || val.borrow());
self->pimpl->may_switch_away();
try {
// Both normalizing the error and the actual throw_greenlet
// could throw PyErrOccurred.
PyErrPieces err_pieces(typ.borrow(), val.borrow(), tb.borrow());
return internal_green_throw(self, err_pieces).relinquish_ownership();
}
catch (const PyErrOccurred&) {
return nullptr;
}
}
static int
green_bool(PyGreenlet* self)
{
return self->pimpl->active();
}
/**
* CAUTION: Allocates memory, may run GC and arbitrary Python code.
*/
static PyObject*
green_getdict(PyGreenlet* self, void* UNUSED(context))
{
if (self->dict == NULL) {
self->dict = PyDict_New();
if (self->dict == NULL) {
return NULL;
}
}
Py_INCREF(self->dict);
return self->dict;
}
static int
green_setdict(PyGreenlet* self, PyObject* val, void* UNUSED(context))
{
PyObject* tmp;
if (val == NULL) {
PyErr_SetString(PyExc_TypeError, "__dict__ may not be deleted");
return -1;
}
if (!PyDict_Check(val)) {
PyErr_SetString(PyExc_TypeError, "__dict__ must be a dictionary");
return -1;
}
tmp = self->dict;
Py_INCREF(val);
self->dict = val;
Py_XDECREF(tmp);
return 0;
}
static bool
_green_not_dead(BorrowedGreenlet self)
{
// XXX: Where else should we do this?
// Probably on entry to most Python-facing functions?
if (self->was_running_in_dead_thread()) {
self->deactivate_and_free();
return false;
}
return self->active() || !self->started();
}
static PyObject*
green_getdead(PyGreenlet* self, void* UNUSED(context))
{
if (_green_not_dead(self)) {
Py_RETURN_FALSE;
}
else {
Py_RETURN_TRUE;
}
}
static PyObject*
green_get_stack_saved(PyGreenlet* self, void* UNUSED(context))
{
return PyLong_FromSsize_t(self->pimpl->stack_saved());
}
static PyObject*
green_getrun(PyGreenlet* self, void* UNUSED(context))
{
try {
OwnedObject result(BorrowedGreenlet(self)->run());
return result.relinquish_ownership();
}
catch(const PyErrOccurred&) {
return nullptr;
}
}
static int
green_setrun(PyGreenlet* self, PyObject* nrun, void* UNUSED(context))
{
try {
BorrowedGreenlet(self)->run(nrun);
return 0;
}
catch(const PyErrOccurred&) {
return -1;
}
}
static PyObject*
green_getparent(PyGreenlet* self, void* UNUSED(context))
{
return BorrowedGreenlet(self)->parent().acquire_or_None();
}
static int
green_setparent(PyGreenlet* self, PyObject* nparent, void* UNUSED(context))
{
try {
BorrowedGreenlet(self)->parent(nparent);
}
catch(const PyErrOccurred&) {
return -1;
}
return 0;
}
static PyObject*
green_getcontext(const PyGreenlet* self, void* UNUSED(context))
{
const Greenlet *const g = self->pimpl;
try {
OwnedObject result(g->context());
return result.relinquish_ownership();
}
catch(const PyErrOccurred&) {
return nullptr;
}
}
static int
green_setcontext(PyGreenlet* self, PyObject* nctx, void* UNUSED(context))
{
try {
BorrowedGreenlet(self)->context(nctx);
return 0;
}
catch(const PyErrOccurred&) {
return -1;
}
}
static PyObject*
green_getframe(PyGreenlet* self, void* UNUSED(context))
{
const PythonState::OwnedFrame& top_frame = BorrowedGreenlet(self)->top_frame();
return top_frame.acquire_or_None();
}
static PyObject*
green_getstate(PyGreenlet* self)
{
PyErr_Format(PyExc_TypeError,
"cannot serialize '%s' object",
Py_TYPE(self)->tp_name);
return nullptr;
}
static PyObject*
green_repr(PyGreenlet* _self)
{
BorrowedGreenlet self(_self);
/*
Return a string like
<greenlet.greenlet at 0xdeadbeef [current][active started]|dead main>
The handling of greenlets across threads is not super good.
We mostly use the internal definitions of these terms, but they
generally should make sense to users as well.
*/
PyObject* result;
int never_started = !self->started() && !self->active();
const char* const tp_name = Py_TYPE(self)->tp_name;
if (_green_not_dead(self)) {
/* XXX: The otid= is almost useless because you can't correlate it to
any thread identifier exposed to Python. We could use
PyThreadState_GET()->thread_id, but we'd need to save that in the
greenlet, or save the whole PyThreadState object itself.
As it stands, its only useful for identifying greenlets from the same thread.
*/
const char* state_in_thread;
if (self->was_running_in_dead_thread()) {
// The thread it was running in is dead!
// This can happen, especially at interpreter shut down.
// It complicates debugging output because it may be
// impossible to access the current thread state at that
// time. Thus, don't access the current thread state.
state_in_thread = " (thread exited)";
}
else {
state_in_thread = GET_THREAD_STATE().state().is_current(self)
? " current"
: (self->started() ? " suspended" : "");
}
result = PyUnicode_FromFormat(
"<%s object at %p (otid=%p)%s%s%s%s>",
tp_name,
self.borrow_o(),
self->thread_state(),
state_in_thread,
self->active() ? " active" : "",
never_started ? " pending" : " started",
self->main() ? " main" : ""
);
}
else {
result = PyUnicode_FromFormat(
"<%s object at %p (otid=%p) %sdead>",
tp_name,
self.borrow_o(),
self->thread_state(),
self->was_running_in_dead_thread()
? "(thread exited) "
: ""
);
}
return result;
}
static PyMethodDef green_methods[] = {
{
.ml_name="switch",
.ml_meth=reinterpret_cast<PyCFunction>(green_switch),
.ml_flags=METH_VARARGS | METH_KEYWORDS,
.ml_doc=green_switch_doc
},
{.ml_name="throw", .ml_meth=(PyCFunction)green_throw, .ml_flags=METH_VARARGS, .ml_doc=green_throw_doc},
{.ml_name="__getstate__", .ml_meth=(PyCFunction)green_getstate, .ml_flags=METH_NOARGS, .ml_doc=NULL},
{.ml_name=NULL, .ml_meth=NULL} /* sentinel */
};
static PyGetSetDef green_getsets[] = {
/* name, getter, setter, doc, context pointer */
{.name="__dict__", .get=(getter)green_getdict, .set=(setter)green_setdict},
{.name="run", .get=(getter)green_getrun, .set=(setter)green_setrun},
{.name="parent", .get=(getter)green_getparent, .set=(setter)green_setparent},
{.name="gr_frame", .get=(getter)green_getframe },
{
.name="gr_context",
.get=(getter)green_getcontext,
.set=(setter)green_setcontext
},
{.name="dead", .get=(getter)green_getdead},
{.name="_stack_saved", .get=(getter)green_get_stack_saved},
{.name=NULL}
};
static PyMemberDef green_members[] = {
{.name=NULL}
};
static PyNumberMethods green_as_number = {
.nb_bool=(inquiry)green_bool,
};
PyTypeObject PyGreenlet_Type = {
.ob_base=PyVarObject_HEAD_INIT(NULL, 0)
.tp_name="greenlet.greenlet", /* tp_name */
.tp_basicsize=sizeof(PyGreenlet), /* tp_basicsize */
/* methods */
.tp_dealloc=(destructor)green_dealloc, /* tp_dealloc */
.tp_repr=(reprfunc)green_repr, /* tp_repr */
.tp_as_number=&green_as_number, /* tp_as _number*/
.tp_flags=G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
.tp_doc="greenlet(run=None, parent=None) -> greenlet\n\n"
"Creates a new greenlet object (without running it).\n\n"
" - *run* -- The callable to invoke.\n"
" - *parent* -- The parent greenlet. The default is the current "
"greenlet.", /* tp_doc */
.tp_traverse=(traverseproc)green_traverse, /* tp_traverse */
.tp_clear=(inquiry)green_clear, /* tp_clear */
.tp_weaklistoffset=offsetof(PyGreenlet, weakreflist), /* tp_weaklistoffset */
.tp_methods=green_methods, /* tp_methods */
.tp_members=green_members, /* tp_members */
.tp_getset=green_getsets, /* tp_getset */
.tp_dictoffset=offsetof(PyGreenlet, dict), /* tp_dictoffset */
.tp_init=(initproc)green_init, /* tp_init */
.tp_alloc=PyType_GenericAlloc, /* tp_alloc */
.tp_new=(newfunc)green_new, /* tp_new */
.tp_free=PyObject_GC_Del, /* tp_free */
.tp_is_gc=(inquiry)green_is_gc, /* tp_is_gc */
};
#endif
// Local Variables:
// flycheck-clang-include-path: ("/opt/local/Library/Frameworks/Python.framework/Versions/3.8/include/python3.8")
// End:

View File

@ -0,0 +1,35 @@
#ifndef PYGREENLET_HPP
#define PYGREENLET_HPP
#include "greenlet.h"
#include "greenlet_compiler_compat.hpp"
#include "greenlet_refs.hpp"
using greenlet::refs::OwnedGreenlet;
using greenlet::refs::BorrowedGreenlet;
using greenlet::refs::BorrowedObject;;
using greenlet::refs::OwnedObject;
using greenlet::refs::PyErrPieces;
// XXX: These doesn't really belong here, it's not a Python slot.
static OwnedObject internal_green_throw(BorrowedGreenlet self, PyErrPieces& err_pieces);
static PyGreenlet* green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds));
static int green_clear(PyGreenlet* self);
static int green_init(PyGreenlet* self, PyObject* args, PyObject* kwargs);
static int green_setparent(PyGreenlet* self, PyObject* nparent, void* UNUSED(context));
static int green_setrun(PyGreenlet* self, PyObject* nrun, void* UNUSED(context));
static int green_traverse(PyGreenlet* self, visitproc visit, void* arg);
static void green_dealloc(PyGreenlet* self);
static PyObject* green_getparent(PyGreenlet* self, void* UNUSED(context));
static int green_is_gc(PyObject* self);
static PyObject* green_getdead(PyGreenlet* self, void* UNUSED(context));
static PyObject* green_getrun(PyGreenlet* self, void* UNUSED(context));
static int green_setcontext(PyGreenlet* self, PyObject* nctx, void* UNUSED(context));
static PyObject* green_getframe(PyGreenlet* self, void* UNUSED(context));
static PyObject* green_repr(PyGreenlet* self);
#endif

View File

@ -0,0 +1,147 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
Implementation of the Python slots for PyGreenletUnswitchable_Type
*/
#ifndef PY_GREENLET_UNSWITCHABLE_CPP
#define PY_GREENLET_UNSWITCHABLE_CPP
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h" // PyMemberDef
#include "greenlet_internal.hpp"
// Code after this point can assume access to things declared in stdint.h,
// including the fixed-width types. This goes for the platform-specific switch functions
// as well.
#include "greenlet_refs.hpp"
#include "greenlet_slp_switch.hpp"
#include "greenlet_thread_support.hpp"
#include "TGreenlet.hpp"
#include "TGreenlet.cpp"
#include "TGreenletGlobals.cpp"
#include "TThreadStateDestroy.cpp"
using greenlet::LockGuard;
using greenlet::LockInitError;
using greenlet::PyErrOccurred;
using greenlet::Require;
using greenlet::g_handle_exit;
using greenlet::single_result;
using greenlet::Greenlet;
using greenlet::UserGreenlet;
using greenlet::MainGreenlet;
using greenlet::BrokenGreenlet;
using greenlet::ThreadState;
using greenlet::PythonState;
#include "PyGreenlet.hpp"
static PyGreenlet*
green_unswitchable_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds))
{
PyGreenlet* o =
(PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs->empty_tuple, mod_globs->empty_dict);
if (o) {
new BrokenGreenlet(o, GET_THREAD_STATE().state().borrow_current());
assert(Py_REFCNT(o) == 1);
}
return o;
}
static PyObject*
green_unswitchable_getforce(PyGreenlet* self, void* UNUSED(context))
{
BrokenGreenlet* broken = dynamic_cast<BrokenGreenlet*>(self->pimpl);
return PyBool_FromLong(broken->_force_switch_error);
}
static int
green_unswitchable_setforce(PyGreenlet* self, PyObject* nforce, void* UNUSED(context))
{
if (!nforce) {
PyErr_SetString(
PyExc_AttributeError,
"Cannot delete force_switch_error"
);
return -1;
}
BrokenGreenlet* broken = dynamic_cast<BrokenGreenlet*>(self->pimpl);
int is_true = PyObject_IsTrue(nforce);
if (is_true == -1) {
return -1;
}
broken->_force_switch_error = is_true;
return 0;
}
static PyObject*
green_unswitchable_getforceslp(PyGreenlet* self, void* UNUSED(context))
{
BrokenGreenlet* broken = dynamic_cast<BrokenGreenlet*>(self->pimpl);
return PyBool_FromLong(broken->_force_slp_switch_error);
}
static int
green_unswitchable_setforceslp(PyGreenlet* self, PyObject* nforce, void* UNUSED(context))
{
if (!nforce) {
PyErr_SetString(
PyExc_AttributeError,
"Cannot delete force_slp_switch_error"
);
return -1;
}
BrokenGreenlet* broken = dynamic_cast<BrokenGreenlet*>(self->pimpl);
int is_true = PyObject_IsTrue(nforce);
if (is_true == -1) {
return -1;
}
broken->_force_slp_switch_error = is_true;
return 0;
}
static PyGetSetDef green_unswitchable_getsets[] = {
/* name, getter, setter, doc, closure (context pointer) */
{
.name="force_switch_error",
.get=(getter)green_unswitchable_getforce,
.set=(setter)green_unswitchable_setforce,
.doc=NULL
},
{
.name="force_slp_switch_error",
.get=(getter)green_unswitchable_getforceslp,
.set=(setter)green_unswitchable_setforceslp,
.doc=nullptr
},
{.name=nullptr}
};
PyTypeObject PyGreenletUnswitchable_Type = {
.ob_base=PyVarObject_HEAD_INIT(NULL, 0)
.tp_name="greenlet._greenlet.UnswitchableGreenlet",
.tp_dealloc= (destructor)green_dealloc, /* tp_dealloc */
.tp_flags=G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
.tp_doc="Undocumented internal class", /* tp_doc */
.tp_traverse=(traverseproc)green_traverse, /* tp_traverse */
.tp_clear=(inquiry)green_clear, /* tp_clear */
.tp_getset=green_unswitchable_getsets, /* tp_getset */
.tp_base=&PyGreenlet_Type, /* tp_base */
.tp_init=(initproc)green_init, /* tp_init */
.tp_alloc=PyType_GenericAlloc, /* tp_alloc */
.tp_new=(newfunc)green_unswitchable_new, /* tp_new */
.tp_free=PyObject_GC_Del, /* tp_free */
.tp_is_gc=(inquiry)green_is_gc, /* tp_is_gc */
};
#endif

View File

@ -0,0 +1,292 @@
#ifndef PY_MODULE_CPP
#define PY_MODULE_CPP
#include "greenlet_internal.hpp"
#include "TGreenletGlobals.cpp"
#include "TMainGreenlet.cpp"
#include "TThreadStateDestroy.cpp"
using greenlet::LockGuard;
using greenlet::ThreadState;
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-function"
# pragma clang diagnostic ignored "-Wunused-variable"
#endif
PyDoc_STRVAR(mod_getcurrent_doc,
"getcurrent() -> greenlet\n"
"\n"
"Returns the current greenlet (i.e. the one which called this "
"function).\n");
static PyObject*
mod_getcurrent(PyObject* UNUSED(module))
{
return GET_THREAD_STATE().state().get_current().relinquish_ownership_o();
}
PyDoc_STRVAR(mod_settrace_doc,
"settrace(callback) -> object\n"
"\n"
"Sets a new tracing function and returns the previous one.\n");
static PyObject*
mod_settrace(PyObject* UNUSED(module), PyObject* args)
{
PyArgParseParam tracefunc;
if (!PyArg_ParseTuple(args, "O", &tracefunc)) {
return NULL;
}
ThreadState& state = GET_THREAD_STATE();
OwnedObject previous = state.get_tracefunc();
if (!previous) {
previous = Py_None;
}
state.set_tracefunc(tracefunc);
return previous.relinquish_ownership();
}
PyDoc_STRVAR(mod_gettrace_doc,
"gettrace() -> object\n"
"\n"
"Returns the currently set tracing function, or None.\n");
static PyObject*
mod_gettrace(PyObject* UNUSED(module))
{
OwnedObject tracefunc = GET_THREAD_STATE().state().get_tracefunc();
if (!tracefunc) {
tracefunc = Py_None;
}
return tracefunc.relinquish_ownership();
}
PyDoc_STRVAR(mod_set_thread_local_doc,
"set_thread_local(key, value) -> None\n"
"\n"
"Set a value in the current thread-local dictionary. Debugging only.\n");
static PyObject*
mod_set_thread_local(PyObject* UNUSED(module), PyObject* args)
{
PyArgParseParam key;
PyArgParseParam value;
PyObject* result = NULL;
if (PyArg_UnpackTuple(args, "set_thread_local", 2, 2, &key, &value)) {
if(PyDict_SetItem(
PyThreadState_GetDict(), // borrow
key,
value) == 0 ) {
// success
Py_INCREF(Py_None);
result = Py_None;
}
}
return result;
}
PyDoc_STRVAR(mod_get_pending_cleanup_count_doc,
"get_pending_cleanup_count() -> Integer\n"
"\n"
"Get the number of greenlet cleanup operations pending. Testing only.\n");
static PyObject*
mod_get_pending_cleanup_count(PyObject* UNUSED(module))
{
LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock);
return PyLong_FromSize_t(mod_globs->thread_states_to_destroy.size());
}
PyDoc_STRVAR(mod_get_total_main_greenlets_doc,
"get_total_main_greenlets() -> Integer\n"
"\n"
"Quickly return the number of main greenlets that exist. Testing only.\n");
static PyObject*
mod_get_total_main_greenlets(PyObject* UNUSED(module))
{
return PyLong_FromSize_t(G_TOTAL_MAIN_GREENLETS);
}
PyDoc_STRVAR(mod_get_clocks_used_doing_optional_cleanup_doc,
"get_clocks_used_doing_optional_cleanup() -> Integer\n"
"\n"
"Get the number of clock ticks the program has used doing optional "
"greenlet cleanup.\n"
"Beginning in greenlet 2.0, greenlet tries to find and dispose of greenlets\n"
"that leaked after a thread exited. This requires invoking Python's garbage collector,\n"
"which may have a performance cost proportional to the number of live objects.\n"
"This function returns the amount of processor time\n"
"greenlet has used to do this. In programs that run with very large amounts of live\n"
"objects, this metric can be used to decide whether the cost of doing this cleanup\n"
"is worth the memory leak being corrected. If not, you can disable the cleanup\n"
"using ``enable_optional_cleanup(False)``.\n"
"The units are arbitrary and can only be compared to themselves (similarly to ``time.clock()``);\n"
"for example, to see how it scales with your heap. You can attempt to convert them into seconds\n"
"by dividing by the value of CLOCKS_PER_SEC."
"If cleanup has been disabled, returns None."
"\n"
"This is an implementation specific, provisional API. It may be changed or removed\n"
"in the future.\n"
".. versionadded:: 2.0"
);
static PyObject*
mod_get_clocks_used_doing_optional_cleanup(PyObject* UNUSED(module))
{
std::clock_t& clocks = ThreadState::clocks_used_doing_gc();
if (clocks == std::clock_t(-1)) {
Py_RETURN_NONE;
}
// This might not actually work on some implementations; clock_t
// is an opaque type.
return PyLong_FromSsize_t(clocks);
}
PyDoc_STRVAR(mod_enable_optional_cleanup_doc,
"mod_enable_optional_cleanup(bool) -> None\n"
"\n"
"Enable or disable optional cleanup operations.\n"
"See ``get_clocks_used_doing_optional_cleanup()`` for details.\n"
);
static PyObject*
mod_enable_optional_cleanup(PyObject* UNUSED(module), PyObject* flag)
{
int is_true = PyObject_IsTrue(flag);
if (is_true == -1) {
return nullptr;
}
std::clock_t& clocks = ThreadState::clocks_used_doing_gc();
if (is_true) {
// If we already have a value, we don't want to lose it.
if (clocks == std::clock_t(-1)) {
clocks = 0;
}
}
else {
clocks = std::clock_t(-1);
}
Py_RETURN_NONE;
}
#if !GREENLET_PY313
PyDoc_STRVAR(mod_get_tstate_trash_delete_nesting_doc,
"get_tstate_trash_delete_nesting() -> Integer\n"
"\n"
"Return the 'trash can' nesting level. Testing only.\n");
static PyObject*
mod_get_tstate_trash_delete_nesting(PyObject* UNUSED(module))
{
PyThreadState* tstate = PyThreadState_GET();
#if GREENLET_PY312
return PyLong_FromLong(tstate->trash.delete_nesting);
#else
return PyLong_FromLong(tstate->trash_delete_nesting);
#endif
}
#endif
static PyMethodDef GreenMethods[] = {
{
.ml_name="getcurrent",
.ml_meth=(PyCFunction)mod_getcurrent,
.ml_flags=METH_NOARGS,
.ml_doc=mod_getcurrent_doc
},
{
.ml_name="settrace",
.ml_meth=(PyCFunction)mod_settrace,
.ml_flags=METH_VARARGS,
.ml_doc=mod_settrace_doc
},
{
.ml_name="gettrace",
.ml_meth=(PyCFunction)mod_gettrace,
.ml_flags=METH_NOARGS,
.ml_doc=mod_gettrace_doc
},
{
.ml_name="set_thread_local",
.ml_meth=(PyCFunction)mod_set_thread_local,
.ml_flags=METH_VARARGS,
.ml_doc=mod_set_thread_local_doc
},
{
.ml_name="get_pending_cleanup_count",
.ml_meth=(PyCFunction)mod_get_pending_cleanup_count,
.ml_flags=METH_NOARGS,
.ml_doc=mod_get_pending_cleanup_count_doc
},
{
.ml_name="get_total_main_greenlets",
.ml_meth=(PyCFunction)mod_get_total_main_greenlets,
.ml_flags=METH_NOARGS,
.ml_doc=mod_get_total_main_greenlets_doc
},
{
.ml_name="get_clocks_used_doing_optional_cleanup",
.ml_meth=(PyCFunction)mod_get_clocks_used_doing_optional_cleanup,
.ml_flags=METH_NOARGS,
.ml_doc=mod_get_clocks_used_doing_optional_cleanup_doc
},
{
.ml_name="enable_optional_cleanup",
.ml_meth=(PyCFunction)mod_enable_optional_cleanup,
.ml_flags=METH_O,
.ml_doc=mod_enable_optional_cleanup_doc
},
#if !GREENLET_PY313
{
.ml_name="get_tstate_trash_delete_nesting",
.ml_meth=(PyCFunction)mod_get_tstate_trash_delete_nesting,
.ml_flags=METH_NOARGS,
.ml_doc=mod_get_tstate_trash_delete_nesting_doc
},
#endif
{.ml_name=NULL, .ml_meth=NULL} /* Sentinel */
};
static const char* const copy_on_greentype[] = {
"getcurrent",
"error",
"GreenletExit",
"settrace",
"gettrace",
NULL
};
static struct PyModuleDef greenlet_module_def = {
.m_base=PyModuleDef_HEAD_INIT,
.m_name="greenlet._greenlet",
.m_doc=NULL,
.m_size=-1,
.m_methods=GreenMethods,
};
#endif
#ifdef __clang__
# pragma clang diagnostic pop
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
#endif

View File

@ -0,0 +1,45 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
* Implementation of greenlet::UserGreenlet.
*
* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#include "TGreenlet.hpp"
namespace greenlet {
void* BrokenGreenlet::operator new(size_t UNUSED(count))
{
return allocator.allocate(1);
}
void BrokenGreenlet::operator delete(void* ptr)
{
return allocator.deallocate(static_cast<BrokenGreenlet*>(ptr),
1);
}
greenlet::PythonAllocator<greenlet::BrokenGreenlet> greenlet::BrokenGreenlet::allocator;
bool
BrokenGreenlet::force_slp_switch_error() const noexcept
{
return this->_force_slp_switch_error;
}
UserGreenlet::switchstack_result_t BrokenGreenlet::g_switchstack(void)
{
if (this->_force_switch_error) {
return switchstack_result_t(-1);
}
return UserGreenlet::g_switchstack();
}
}; //namespace greenlet

View File

@ -0,0 +1,62 @@
#ifndef GREENLET_EXCEPTION_STATE_CPP
#define GREENLET_EXCEPTION_STATE_CPP
#include <Python.h>
#include "TGreenlet.hpp"
namespace greenlet {
ExceptionState::ExceptionState()
{
this->clear();
}
void ExceptionState::operator<<(const PyThreadState *const tstate) noexcept
{
this->exc_info = tstate->exc_info;
this->exc_state = tstate->exc_state;
}
void ExceptionState::operator>>(PyThreadState *const tstate) noexcept
{
tstate->exc_state = this->exc_state;
tstate->exc_info =
this->exc_info ? this->exc_info : &tstate->exc_state;
this->clear();
}
void ExceptionState::clear() noexcept
{
this->exc_info = nullptr;
this->exc_state.exc_value = nullptr;
#if !GREENLET_PY311
this->exc_state.exc_type = nullptr;
this->exc_state.exc_traceback = nullptr;
#endif
this->exc_state.previous_item = nullptr;
}
int ExceptionState::tp_traverse(visitproc visit, void* arg) noexcept
{
Py_VISIT(this->exc_state.exc_value);
#if !GREENLET_PY311
Py_VISIT(this->exc_state.exc_type);
Py_VISIT(this->exc_state.exc_traceback);
#endif
return 0;
}
void ExceptionState::tp_clear() noexcept
{
Py_CLEAR(this->exc_state.exc_value);
#if !GREENLET_PY311
Py_CLEAR(this->exc_state.exc_type);
Py_CLEAR(this->exc_state.exc_traceback);
#endif
}
}; // namespace greenlet
#endif // GREENLET_EXCEPTION_STATE_CPP

View File

@ -0,0 +1,718 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
* Implementation of greenlet::Greenlet.
*
* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#ifndef TGREENLET_CPP
#define TGREENLET_CPP
#include "greenlet_internal.hpp"
#include "TGreenlet.hpp"
#include "TGreenletGlobals.cpp"
#include "TThreadStateDestroy.cpp"
namespace greenlet {
Greenlet::Greenlet(PyGreenlet* p)
: Greenlet(p, StackState())
{
}
Greenlet::Greenlet(PyGreenlet* p, const StackState& initial_stack)
: _self(p), stack_state(initial_stack)
{
assert(p->pimpl == nullptr);
p->pimpl = this;
}
Greenlet::~Greenlet()
{
// XXX: Can't do this. tp_clear is a virtual function, and by the
// time we're here, we've sliced off our child classes.
//this->tp_clear();
this->_self->pimpl = nullptr;
}
bool
Greenlet::force_slp_switch_error() const noexcept
{
return false;
}
void
Greenlet::release_args()
{
this->switch_args.CLEAR();
}
/**
* CAUTION: This will allocate memory and may trigger garbage
* collection and arbitrary Python code.
*/
OwnedObject
Greenlet::throw_GreenletExit_during_dealloc(const ThreadState& UNUSED(current_thread_state))
{
// If we're killed because we lost all references in the
// middle of a switch, that's ok. Don't reset the args/kwargs,
// we still want to pass them to the parent.
PyErr_SetString(mod_globs->PyExc_GreenletExit,
"Killing the greenlet because all references have vanished.");
// To get here it had to have run before
return this->g_switch();
}
inline void
Greenlet::slp_restore_state() noexcept
{
#ifdef SLP_BEFORE_RESTORE_STATE
SLP_BEFORE_RESTORE_STATE();
#endif
this->stack_state.copy_heap_to_stack(
this->thread_state()->borrow_current()->stack_state);
}
inline int
Greenlet::slp_save_state(char *const stackref) noexcept
{
// XXX: This used to happen in the middle, before saving, but
// after finding the next owner. Does that matter? This is
// only defined for Sparc/GCC where it flushes register
// windows to the stack (I think)
#ifdef SLP_BEFORE_SAVE_STATE
SLP_BEFORE_SAVE_STATE();
#endif
return this->stack_state.copy_stack_to_heap(stackref,
this->thread_state()->borrow_current()->stack_state);
}
/**
* CAUTION: This will allocate memory and may trigger garbage
* collection and arbitrary Python code.
*/
OwnedObject
Greenlet::on_switchstack_or_initialstub_failure(
Greenlet* target,
const Greenlet::switchstack_result_t& err,
const bool target_was_me,
const bool was_initial_stub)
{
// If we get here, either g_initialstub()
// failed, or g_switchstack() failed. Either one of those
// cases SHOULD leave us in the original greenlet with a valid stack.
if (!PyErr_Occurred()) {
PyErr_SetString(
PyExc_SystemError,
was_initial_stub
? "Failed to switch stacks into a greenlet for the first time."
: "Failed to switch stacks into a running greenlet.");
}
this->release_args();
if (target && !target_was_me) {
target->murder_in_place();
}
assert(!err.the_new_current_greenlet);
assert(!err.origin_greenlet);
return OwnedObject();
}
OwnedGreenlet
Greenlet::g_switchstack_success() noexcept
{
PyThreadState* tstate = PyThreadState_GET();
// restore the saved state
this->python_state >> tstate;
this->exception_state >> tstate;
// The thread state hasn't been changed yet.
ThreadState* thread_state = this->thread_state();
OwnedGreenlet result(thread_state->get_current());
thread_state->set_current(this->self());
//assert(thread_state->borrow_current().borrow() == this->_self);
return result;
}
Greenlet::switchstack_result_t
Greenlet::g_switchstack(void)
{
// if any of these assertions fail, it's likely because we
// switched away and tried to switch back to us. Early stages of
// switching are not reentrant because we re-use ``this->args()``.
// Switching away would happen if we trigger a garbage collection
// (by just using some Python APIs that happen to allocate Python
// objects) and some garbage had weakref callbacks or __del__ that
// switches (people don't write code like that by hand, but with
// gevent it's possible without realizing it)
assert(this->args() || PyErr_Occurred());
{ /* save state */
if (this->thread_state()->is_current(this->self())) {
// Hmm, nothing to do.
// TODO: Does this bypass trace events that are
// important?
return switchstack_result_t(0,
this, this->thread_state()->borrow_current());
}
BorrowedGreenlet current = this->thread_state()->borrow_current();
PyThreadState* tstate = PyThreadState_GET();
current->python_state << tstate;
current->exception_state << tstate;
this->python_state.will_switch_from(tstate);
switching_thread_state = this;
current->expose_frames();
}
assert(this->args() || PyErr_Occurred());
// If this is the first switch into a greenlet, this will
// return twice, once with 1 in the new greenlet, once with 0
// in the origin.
int err;
if (this->force_slp_switch_error()) {
err = -1;
}
else {
err = slp_switch();
}
if (err < 0) { /* error */
// Tested by
// test_greenlet.TestBrokenGreenlets.test_failed_to_slp_switch_into_running
//
// It's not clear if it's worth trying to clean up and
// continue here. Failing to switch stacks is a big deal which
// may not be recoverable (who knows what state the stack is in).
// Also, we've stolen references in preparation for calling
// ``g_switchstack_success()`` and we don't have a clean
// mechanism for backing that all out.
Py_FatalError("greenlet: Failed low-level slp_switch(). The stack is probably corrupt.");
}
// No stack-based variables are valid anymore.
// But the global is volatile so we can reload it without the
// compiler caching it from earlier.
Greenlet* greenlet_that_switched_in = switching_thread_state; // aka this
switching_thread_state = nullptr;
// except that no stack variables are valid, we would:
// assert(this == greenlet_that_switched_in);
// switchstack success is where we restore the exception state,
// etc. It returns the origin greenlet because its convenient.
OwnedGreenlet origin = greenlet_that_switched_in->g_switchstack_success();
assert(greenlet_that_switched_in->args() || PyErr_Occurred());
return switchstack_result_t(err, greenlet_that_switched_in, origin);
}
inline void
Greenlet::check_switch_allowed() const
{
// TODO: Make this take a parameter of the current greenlet,
// or current main greenlet, to make the check for
// cross-thread switching cheaper. Surely somewhere up the
// call stack we've already accessed the thread local variable.
// We expect to always have a main greenlet now; accessing the thread state
// created it. However, if we get here and cleanup has already
// begun because we're a greenlet that was running in a
// (now dead) thread, these invariants will not hold true. In
// fact, accessing `this->thread_state` may not even be possible.
// If the thread this greenlet was running in is dead,
// we'll still have a reference to a main greenlet, but the
// thread state pointer we have is bogus.
// TODO: Give the objects an API to determine if they belong
// to a dead thread.
const BorrowedMainGreenlet main_greenlet = this->find_main_greenlet_in_lineage();
if (!main_greenlet) {
throw PyErrOccurred(mod_globs->PyExc_GreenletError,
"cannot switch to a garbage collected greenlet");
}
if (!main_greenlet->thread_state()) {
throw PyErrOccurred(mod_globs->PyExc_GreenletError,
"cannot switch to a different thread (which happens to have exited)");
}
// The main greenlet we found was from the .parent lineage.
// That may or may not have any relationship to the main
// greenlet of the running thread. We can't actually access
// our this->thread_state members to try to check that,
// because it could be in the process of getting destroyed,
// but setting the main_greenlet->thread_state member to NULL
// may not be visible yet. So we need to check against the
// current thread state (once the cheaper checks are out of
// the way)
const BorrowedMainGreenlet current_main_greenlet = GET_THREAD_STATE().state().borrow_main_greenlet();
if (
// lineage main greenlet is not this thread's greenlet
current_main_greenlet != main_greenlet
|| (
// atteched to some thread
this->main_greenlet()
// XXX: Same condition as above. Was this supposed to be
// this->main_greenlet()?
&& current_main_greenlet != main_greenlet)
// switching into a known dead thread (XXX: which, if we get here,
// is bad, because we just accessed the thread state, which is
// gone!)
|| (!current_main_greenlet->thread_state())) {
// CAUTION: This may trigger memory allocations, gc, and
// arbitrary Python code.
throw PyErrOccurred(
mod_globs->PyExc_GreenletError,
"Cannot switch to a different thread\n\tCurrent: %R\n\tExpected: %R",
current_main_greenlet, main_greenlet);
}
}
const OwnedObject
Greenlet::context() const
{
using greenlet::PythonStateContext;
OwnedObject result;
if (this->is_currently_running_in_some_thread()) {
/* Currently running greenlet: context is stored in the thread state,
not the greenlet object. */
if (GET_THREAD_STATE().state().is_current(this->self())) {
result = PythonStateContext::context(PyThreadState_GET());
}
else {
throw ValueError(
"cannot get context of a "
"greenlet that is running in a different thread");
}
}
else {
/* Greenlet is not running: just return context. */
result = this->python_state.context();
}
if (!result) {
result = OwnedObject::None();
}
return result;
}
void
Greenlet::context(BorrowedObject given)
{
using greenlet::PythonStateContext;
if (!given) {
throw AttributeError("can't delete context attribute");
}
if (given.is_None()) {
/* "Empty context" is stored as NULL, not None. */
given = nullptr;
}
//checks type, incrs refcnt
greenlet::refs::OwnedContext context(given);
PyThreadState* tstate = PyThreadState_GET();
if (this->is_currently_running_in_some_thread()) {
if (!GET_THREAD_STATE().state().is_current(this->self())) {
throw ValueError("cannot set context of a greenlet"
" that is running in a different thread");
}
/* Currently running greenlet: context is stored in the thread state,
not the greenlet object. */
OwnedObject octx = OwnedObject::consuming(PythonStateContext::context(tstate));
PythonStateContext::context(tstate, context.relinquish_ownership());
}
else {
/* Greenlet is not running: just set context. Note that the
greenlet may be dead.*/
this->python_state.context() = context;
}
}
/**
* CAUTION: May invoke arbitrary Python code.
*
* Figure out what the result of ``greenlet.switch(arg, kwargs)``
* should be and transfers ownership of it to the left-hand-side.
*
* If switch() was just passed an arg tuple, then we'll just return that.
* If only keyword arguments were passed, then we'll pass the keyword
* argument dict. Otherwise, we'll create a tuple of (args, kwargs) and
* return both.
*
* CAUTION: This may allocate a new tuple object, which may
* cause the Python garbage collector to run, which in turn may
* run arbitrary Python code that switches.
*/
OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) noexcept
{
// Because this may invoke arbitrary Python code, which could
// result in switching back to us, we need to get the
// arguments locally on the stack.
assert(rhs);
OwnedObject args = rhs.args();
OwnedObject kwargs = rhs.kwargs();
rhs.CLEAR();
// We shouldn't be called twice for the same switch.
assert(args || kwargs);
assert(!rhs);
if (!kwargs) {
lhs = args;
}
else if (!PyDict_Size(kwargs.borrow())) {
lhs = args;
}
else if (!PySequence_Length(args.borrow())) {
lhs = kwargs;
}
else {
// PyTuple_Pack allocates memory, may GC, may run arbitrary
// Python code.
lhs = OwnedObject::consuming(PyTuple_Pack(2, args.borrow(), kwargs.borrow()));
}
return lhs;
}
static OwnedObject
g_handle_exit(const OwnedObject& greenlet_result)
{
if (!greenlet_result && mod_globs->PyExc_GreenletExit.PyExceptionMatches()) {
/* catch and ignore GreenletExit */
PyErrFetchParam val;
PyErr_Fetch(PyErrFetchParam(), val, PyErrFetchParam());
if (!val) {
return OwnedObject::None();
}
return OwnedObject(val);
}
if (greenlet_result) {
// package the result into a 1-tuple
// PyTuple_Pack increments the reference of its arguments,
// so we always need to decref the greenlet result;
// the owner will do that.
return OwnedObject::consuming(PyTuple_Pack(1, greenlet_result.borrow()));
}
return OwnedObject();
}
/**
* May run arbitrary Python code.
*/
OwnedObject
Greenlet::g_switch_finish(const switchstack_result_t& err)
{
assert(err.the_new_current_greenlet == this);
ThreadState& state = *this->thread_state();
// Because calling the trace function could do arbitrary things,
// including switching away from this greenlet and then maybe
// switching back, we need to capture the arguments now so that
// they don't change.
OwnedObject result;
if (this->args()) {
result <<= this->args();
}
else {
assert(PyErr_Occurred());
}
assert(!this->args());
try {
// Our only caller handles the bad error case
assert(err.status >= 0);
assert(state.borrow_current() == this->self());
if (OwnedObject tracefunc = state.get_tracefunc()) {
assert(result || PyErr_Occurred());
g_calltrace(tracefunc,
result ? mod_globs->event_switch : mod_globs->event_throw,
err.origin_greenlet,
this->self());
}
// The above could have invoked arbitrary Python code, but
// it couldn't switch back to this object and *also*
// throw an exception, so the args won't have changed.
if (PyErr_Occurred()) {
// We get here if we fell of the end of the run() function
// raising an exception. The switch itself was
// successful, but the function raised.
// valgrind reports that memory allocated here can still
// be reached after a test run.
throw PyErrOccurred::from_current();
}
return result;
}
catch (const PyErrOccurred&) {
/* Turn switch errors into switch throws */
/* Turn trace errors into switch throws */
this->release_args();
throw;
}
}
void
Greenlet::g_calltrace(const OwnedObject& tracefunc,
const greenlet::refs::ImmortalEventName& event,
const BorrowedGreenlet& origin,
const BorrowedGreenlet& target)
{
PyErrPieces saved_exc;
try {
TracingGuard tracing_guard;
// TODO: We have saved the active exception (if any) that's
// about to be raised. In the 'throw' case, we could provide
// the exception to the tracefunction, which seems very helpful.
tracing_guard.CallTraceFunction(tracefunc, event, origin, target);
}
catch (const PyErrOccurred&) {
// In case of exceptions trace function is removed,
// and any existing exception is replaced with the tracing
// exception.
GET_THREAD_STATE().state().set_tracefunc(Py_None);
throw;
}
saved_exc.PyErrRestore();
assert(
(event == mod_globs->event_throw && PyErr_Occurred())
|| (event == mod_globs->event_switch && !PyErr_Occurred())
);
}
void
Greenlet::murder_in_place()
{
if (this->active()) {
assert(!this->is_currently_running_in_some_thread());
this->deactivate_and_free();
}
}
inline void
Greenlet::deactivate_and_free()
{
if (!this->active()) {
return;
}
// Throw away any saved stack.
this->stack_state = StackState();
assert(!this->stack_state.active());
// Throw away any Python references.
// We're holding a borrowed reference to the last
// frame we executed. Since we borrowed it, the
// normal traversal, clear, and dealloc functions
// ignore it, meaning it leaks. (The thread state
// object can't find it to clear it when that's
// deallocated either, because by definition if we
// got an object on this list, it wasn't
// running and the thread state doesn't have
// this frame.)
// So here, we *do* clear it.
this->python_state.tp_clear(true);
}
bool
Greenlet::belongs_to_thread(const ThreadState* thread_state) const
{
if (!this->thread_state() // not running anywhere, or thread
// exited
|| !thread_state) { // same, or there is no thread state.
return false;
}
return true;
}
void
Greenlet::deallocing_greenlet_in_thread(const ThreadState* current_thread_state)
{
/* Cannot raise an exception to kill the greenlet if
it is not running in the same thread! */
if (this->belongs_to_thread(current_thread_state)) {
assert(current_thread_state);
// To get here it had to have run before
/* Send the greenlet a GreenletExit exception. */
// We don't care about the return value, only whether an
// exception happened.
this->throw_GreenletExit_during_dealloc(*current_thread_state);
return;
}
// Not the same thread! Temporarily save the greenlet
// into its thread's deleteme list, *if* it exists.
// If that thread has already exited, and processed its pending
// cleanup, we'll never be able to clean everything up: we won't
// be able to raise an exception.
// That's mostly OK! Since we can't add it to a list, our refcount
// won't increase, and we'll go ahead with the DECREFs later.
ThreadState *const thread_state = this->thread_state();
if (thread_state) {
thread_state->delete_when_thread_running(this->self());
}
else {
// The thread is dead, we can't raise an exception.
// We need to make it look non-active, though, so that dealloc
// finishes killing it.
this->deactivate_and_free();
}
return;
}
int
Greenlet::tp_traverse(visitproc visit, void* arg)
{
int result;
if ((result = this->exception_state.tp_traverse(visit, arg)) != 0) {
return result;
}
//XXX: This is ugly. But so is handling everything having to do
//with the top frame.
bool visit_top_frame = this->was_running_in_dead_thread();
// When true, the thread is dead. Our implicit weak reference to the
// frame is now all that's left; we consider ourselves to
// strongly own it now.
if ((result = this->python_state.tp_traverse(visit, arg, visit_top_frame)) != 0) {
return result;
}
return 0;
}
int
Greenlet::tp_clear()
{
bool own_top_frame = this->was_running_in_dead_thread();
this->exception_state.tp_clear();
this->python_state.tp_clear(own_top_frame);
return 0;
}
bool Greenlet::is_currently_running_in_some_thread() const
{
return this->stack_state.active() && !this->python_state.top_frame();
}
#if GREENLET_PY312
void GREENLET_NOINLINE(Greenlet::expose_frames)()
{
if (!this->python_state.top_frame()) {
return;
}
_PyInterpreterFrame* last_complete_iframe = nullptr;
_PyInterpreterFrame* iframe = this->python_state.top_frame()->f_frame;
while (iframe) {
// We must make a copy before looking at the iframe contents,
// since iframe might point to a portion of the greenlet's C stack
// that was spilled when switching greenlets.
_PyInterpreterFrame iframe_copy;
this->stack_state.copy_from_stack(&iframe_copy, iframe, sizeof(*iframe));
if (!_PyFrame_IsIncomplete(&iframe_copy)) {
// If the iframe were OWNED_BY_CSTACK then it would always be
// incomplete. Since it's not incomplete, it's not on the C stack
// and we can access it through the original `iframe` pointer
// directly. This is important since GetFrameObject might
// lazily _create_ the frame object and we don't want the
// interpreter to lose track of it.
assert(iframe_copy.owner != FRAME_OWNED_BY_CSTACK);
// We really want to just write:
// PyFrameObject* frame = _PyFrame_GetFrameObject(iframe);
// but _PyFrame_GetFrameObject calls _PyFrame_MakeAndSetFrameObject
// which is not a visible symbol in libpython. The easiest
// way to get a public function to call it is using
// PyFrame_GetBack, which is defined as follows:
// assert(frame != NULL);
// assert(!_PyFrame_IsIncomplete(frame->f_frame));
// PyFrameObject *back = frame->f_back;
// if (back == NULL) {
// _PyInterpreterFrame *prev = frame->f_frame->previous;
// prev = _PyFrame_GetFirstComplete(prev);
// if (prev) {
// back = _PyFrame_GetFrameObject(prev);
// }
// }
// return (PyFrameObject*)Py_XNewRef(back);
if (!iframe->frame_obj) {
PyFrameObject dummy_frame;
_PyInterpreterFrame dummy_iframe;
dummy_frame.f_back = nullptr;
dummy_frame.f_frame = &dummy_iframe;
// force the iframe to be considered complete without
// needing to check its code object:
dummy_iframe.owner = FRAME_OWNED_BY_GENERATOR;
dummy_iframe.previous = iframe;
assert(!_PyFrame_IsIncomplete(&dummy_iframe));
// Drop the returned reference immediately; the iframe
// continues to hold a strong reference
Py_XDECREF(PyFrame_GetBack(&dummy_frame));
assert(iframe->frame_obj);
}
// This is a complete frame, so make the last one of those we saw
// point at it, bypassing any incomplete frames (which may have
// been on the C stack) in between the two. We're overwriting
// last_complete_iframe->previous and need that to be reversible,
// so we store the original previous ptr in the frame object
// (which we must have created on a previous iteration through
// this loop). The frame object has a bunch of storage that is
// only used when its iframe is OWNED_BY_FRAME_OBJECT, which only
// occurs when the frame object outlives the frame's execution,
// which can't have happened yet because the frame is currently
// executing as far as the interpreter is concerned. So, we can
// reuse it for our own purposes.
assert(iframe->owner == FRAME_OWNED_BY_THREAD
|| iframe->owner == FRAME_OWNED_BY_GENERATOR);
if (last_complete_iframe) {
assert(last_complete_iframe->frame_obj);
memcpy(&last_complete_iframe->frame_obj->_f_frame_data[0],
&last_complete_iframe->previous, sizeof(void *));
last_complete_iframe->previous = iframe;
}
last_complete_iframe = iframe;
}
// Frames that are OWNED_BY_FRAME_OBJECT are linked via the
// frame's f_back while all others are linked via the iframe's
// previous ptr. Since all the frames we traverse are running
// as far as the interpreter is concerned, we don't have to
// worry about the OWNED_BY_FRAME_OBJECT case.
iframe = iframe_copy.previous;
}
// Give the outermost complete iframe a null previous pointer to
// account for any potential incomplete/C-stack iframes between it
// and the actual top-of-stack
if (last_complete_iframe) {
assert(last_complete_iframe->frame_obj);
memcpy(&last_complete_iframe->frame_obj->_f_frame_data[0],
&last_complete_iframe->previous, sizeof(void *));
last_complete_iframe->previous = nullptr;
}
}
#else
void Greenlet::expose_frames()
{
}
#endif
}; // namespace greenlet
#endif

View File

@ -0,0 +1,818 @@
#ifndef GREENLET_GREENLET_HPP
#define GREENLET_GREENLET_HPP
/*
* Declarations of the core data structures.
*/
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "greenlet_compiler_compat.hpp"
#include "greenlet_refs.hpp"
#include "greenlet_cpython_compat.hpp"
#include "greenlet_allocator.hpp"
using greenlet::refs::OwnedObject;
using greenlet::refs::OwnedGreenlet;
using greenlet::refs::OwnedMainGreenlet;
using greenlet::refs::BorrowedGreenlet;
#if PY_VERSION_HEX < 0x30B00A6
# define _PyCFrame CFrame
# define _PyInterpreterFrame _interpreter_frame
#endif
#if GREENLET_PY312
# define Py_BUILD_CORE
# include "internal/pycore_frame.h"
#endif
#if GREENLET_PY314
# include "internal/pycore_interpframe_structs.h"
# include "internal/pycore_interpframe.h"
#endif
// XXX: TODO: Work to remove all virtual functions
// for speed of calling and size of objects (no vtable).
// One pattern is the Curiously Recurring Template
namespace greenlet
{
class ExceptionState
{
private:
G_NO_COPIES_OF_CLS(ExceptionState);
// Even though these are borrowed objects, we actually own
// them, when they're not null.
// XXX: Express that in the API.
private:
_PyErr_StackItem* exc_info;
_PyErr_StackItem exc_state;
public:
ExceptionState();
void operator<<(const PyThreadState *const tstate) noexcept;
void operator>>(PyThreadState* tstate) noexcept;
void clear() noexcept;
int tp_traverse(visitproc visit, void* arg) noexcept;
void tp_clear() noexcept;
};
template<typename T>
void operator<<(const PyThreadState *const tstate, T& exc);
class PythonStateContext
{
protected:
greenlet::refs::OwnedContext _context;
public:
inline const greenlet::refs::OwnedContext& context() const
{
return this->_context;
}
inline greenlet::refs::OwnedContext& context()
{
return this->_context;
}
inline void tp_clear()
{
this->_context.CLEAR();
}
template<typename T>
inline static PyObject* context(T* tstate)
{
return tstate->context;
}
template<typename T>
inline static void context(T* tstate, PyObject* new_context)
{
tstate->context = new_context;
tstate->context_ver++;
}
};
class SwitchingArgs;
class PythonState : public PythonStateContext
{
public:
typedef greenlet::refs::OwnedReference<struct _frame> OwnedFrame;
private:
G_NO_COPIES_OF_CLS(PythonState);
// We own this if we're suspended (although currently we don't
// tp_traverse into it; that's a TODO). If we're running, it's
// empty. If we get deallocated and *still* have a frame, it
// won't be reachable from the place that normally decref's
// it, so we need to do it (hence owning it).
OwnedFrame _top_frame;
#if GREENLET_USE_CFRAME
_PyCFrame* cframe;
int use_tracing;
#endif
#if GREENLET_PY312
int py_recursion_depth;
int c_recursion_depth;
#else
int recursion_depth;
#endif
#if GREENLET_PY313
PyObject *delete_later;
#else
int trash_delete_nesting;
#endif
#if GREENLET_PY311
_PyInterpreterFrame* current_frame;
_PyStackChunk* datastack_chunk;
PyObject** datastack_top;
PyObject** datastack_limit;
#endif
// The PyInterpreterFrame list on 3.12+ contains some entries that are
// on the C stack, which can't be directly accessed while a greenlet is
// suspended. In order to keep greenlet gr_frame introspection working,
// we adjust stack switching to rewrite the interpreter frame list
// to skip these C-stack frames; we call this "exposing" the greenlet's
// frames because it makes them valid to work with in Python. Then when
// the greenlet is resumed we need to remember to reverse the operation
// we did. The C-stack frames are "entry frames" which are a low-level
// interpreter detail; they're not needed for introspection, but do
// need to be present for the eval loop to work.
void unexpose_frames();
public:
PythonState();
// You can use this for testing whether we have a frame
// or not. It returns const so they can't modify it.
const OwnedFrame& top_frame() const noexcept;
inline void operator<<(const PyThreadState *const tstate) noexcept;
inline void operator>>(PyThreadState* tstate) noexcept;
void clear() noexcept;
int tp_traverse(visitproc visit, void* arg, bool visit_top_frame) noexcept;
void tp_clear(bool own_top_frame) noexcept;
void set_initial_state(const PyThreadState* const tstate) noexcept;
#if GREENLET_USE_CFRAME
void set_new_cframe(_PyCFrame& frame) noexcept;
#endif
void may_switch_away() noexcept;
inline void will_switch_from(PyThreadState *const origin_tstate) noexcept;
void did_finish(PyThreadState* tstate) noexcept;
};
class StackState
{
// By having only plain C (POD) members, no virtual functions
// or bases, we get a trivial assignment operator generated
// for us. However, that's not safe since we do manage memory.
// So we declare an assignment operator that only works if we
// don't have any memory allocated. (We don't use
// std::shared_ptr for reference counting just to keep this
// object small)
private:
char* _stack_start;
char* stack_stop;
char* stack_copy;
intptr_t _stack_saved;
StackState* stack_prev;
inline int copy_stack_to_heap_up_to(const char* const stop) noexcept;
inline void free_stack_copy() noexcept;
public:
/**
* Creates a started, but inactive, state, using *current*
* as the previous.
*/
StackState(void* mark, StackState& current);
/**
* Creates an inactive, unstarted, state.
*/
StackState();
~StackState();
StackState(const StackState& other);
StackState& operator=(const StackState& other);
inline void copy_heap_to_stack(const StackState& current) noexcept;
inline int copy_stack_to_heap(char* const stackref, const StackState& current) noexcept;
inline bool started() const noexcept;
inline bool main() const noexcept;
inline bool active() const noexcept;
inline void set_active() noexcept;
inline void set_inactive() noexcept;
inline intptr_t stack_saved() const noexcept;
inline char* stack_start() const noexcept;
static inline StackState make_main() noexcept;
#ifdef GREENLET_USE_STDIO
friend std::ostream& operator<<(std::ostream& os, const StackState& s);
#endif
// Fill in [dest, dest + n) with the values that would be at
// [src, src + n) while this greenlet is running. This is like memcpy
// except that if the greenlet is suspended it accounts for the portion
// of the greenlet's stack that was spilled to the heap. `src` may
// be on this greenlet's stack, or on the heap, but not on a different
// greenlet's stack.
void copy_from_stack(void* dest, const void* src, size_t n) const;
};
#ifdef GREENLET_USE_STDIO
std::ostream& operator<<(std::ostream& os, const StackState& s);
#endif
class SwitchingArgs
{
private:
G_NO_ASSIGNMENT_OF_CLS(SwitchingArgs);
// If args and kwargs are both false (NULL), this is a *throw*, not a
// switch. PyErr_... must have been called already.
OwnedObject _args;
OwnedObject _kwargs;
public:
SwitchingArgs()
{}
SwitchingArgs(const OwnedObject& args, const OwnedObject& kwargs)
: _args(args),
_kwargs(kwargs)
{}
SwitchingArgs(const SwitchingArgs& other)
: _args(other._args),
_kwargs(other._kwargs)
{}
const OwnedObject& args()
{
return this->_args;
}
const OwnedObject& kwargs()
{
return this->_kwargs;
}
/**
* Moves ownership from the argument to this object.
*/
SwitchingArgs& operator<<=(SwitchingArgs& other)
{
if (this != &other) {
this->_args = other._args;
this->_kwargs = other._kwargs;
other.CLEAR();
}
return *this;
}
/**
* Acquires ownership of the argument (consumes the reference).
*/
SwitchingArgs& operator<<=(PyObject* args)
{
this->_args = OwnedObject::consuming(args);
this->_kwargs.CLEAR();
return *this;
}
/**
* Acquires ownership of the argument.
*
* Sets the args to be the given value; clears the kwargs.
*/
SwitchingArgs& operator<<=(OwnedObject& args)
{
assert(&args != &this->_args);
this->_args = args;
this->_kwargs.CLEAR();
args.CLEAR();
return *this;
}
explicit operator bool() const noexcept
{
return this->_args || this->_kwargs;
}
inline void CLEAR()
{
this->_args.CLEAR();
this->_kwargs.CLEAR();
}
const std::string as_str() const noexcept
{
return PyUnicode_AsUTF8(
OwnedObject::consuming(
PyUnicode_FromFormat(
"SwitchingArgs(args=%R, kwargs=%R)",
this->_args.borrow(),
this->_kwargs.borrow()
)
).borrow()
);
}
};
class ThreadState;
class UserGreenlet;
class MainGreenlet;
class Greenlet
{
private:
G_NO_COPIES_OF_CLS(Greenlet);
PyGreenlet* const _self;
private:
// XXX: Work to remove these.
friend class ThreadState;
friend class UserGreenlet;
friend class MainGreenlet;
protected:
ExceptionState exception_state;
SwitchingArgs switch_args;
StackState stack_state;
PythonState python_state;
Greenlet(PyGreenlet* p, const StackState& initial_state);
public:
// This constructor takes ownership of the PyGreenlet, by
// setting ``p->pimpl = this;``.
Greenlet(PyGreenlet* p);
virtual ~Greenlet();
const OwnedObject context() const;
// You MUST call this _very_ early in the switching process to
// prepare anything that may need prepared. This might perform
// garbage collections or otherwise run arbitrary Python code.
//
// One specific use of it is for Python 3.11+, preventing
// running arbitrary code at unsafe times. See
// PythonState::may_switch_away().
inline void may_switch_away()
{
this->python_state.may_switch_away();
}
inline void context(refs::BorrowedObject new_context);
inline SwitchingArgs& args()
{
return this->switch_args;
}
virtual const refs::BorrowedMainGreenlet main_greenlet() const = 0;
inline intptr_t stack_saved() const noexcept
{
return this->stack_state.stack_saved();
}
// This is used by the macro SLP_SAVE_STATE to compute the
// difference in stack sizes. It might be nice to handle the
// computation ourself, but the type of the result
// varies by platform, so doing it in the macro is the
// simplest way.
inline const char* stack_start() const noexcept
{
return this->stack_state.stack_start();
}
virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state);
virtual OwnedObject g_switch() = 0;
/**
* Force the greenlet to appear dead. Used when it's not
* possible to throw an exception into a greenlet anymore.
*
* This losses access to the thread state and the main greenlet.
*/
virtual void murder_in_place();
/**
* Called when somebody notices we were running in a dead
* thread to allow cleaning up resources (because we can't
* raise GreenletExit into it anymore).
* This is very similar to ``murder_in_place()``, except that
* it DOES NOT lose the main greenlet or thread state.
*/
inline void deactivate_and_free();
// Called when some thread wants to deallocate a greenlet
// object.
// The thread may or may not be the same thread the greenlet
// was running in.
// The thread state will be null if the thread the greenlet
// was running in was known to have exited.
void deallocing_greenlet_in_thread(const ThreadState* current_state);
// Must be called on 3.12+ before exposing a suspended greenlet's
// frames to user code. This rewrites the linked list of interpreter
// frames to skip the ones that are being stored on the C stack (which
// can't be safely accessed while the greenlet is suspended because
// that stack space might be hosting a different greenlet), and
// sets PythonState::frames_were_exposed so we remember to restore
// the original list before resuming the greenlet. The C-stack frames
// are a low-level interpreter implementation detail; while they're
// important to the bytecode eval loop, they're superfluous for
// introspection purposes.
void expose_frames();
// TODO: Figure out how to make these non-public.
inline void slp_restore_state() noexcept;
inline int slp_save_state(char *const stackref) noexcept;
inline bool is_currently_running_in_some_thread() const;
virtual bool belongs_to_thread(const ThreadState* state) const;
inline bool started() const
{
return this->stack_state.started();
}
inline bool active() const
{
return this->stack_state.active();
}
inline bool main() const
{
return this->stack_state.main();
}
virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const = 0;
virtual const OwnedGreenlet parent() const = 0;
virtual void parent(const refs::BorrowedObject new_parent) = 0;
inline const PythonState::OwnedFrame& top_frame()
{
return this->python_state.top_frame();
}
virtual const OwnedObject& run() const = 0;
virtual void run(const refs::BorrowedObject nrun) = 0;
virtual int tp_traverse(visitproc visit, void* arg);
virtual int tp_clear();
// Return the thread state that the greenlet is running in, or
// null if the greenlet is not running or the thread is known
// to have exited.
virtual ThreadState* thread_state() const noexcept = 0;
// Return true if the greenlet is known to have been running
// (active) in a thread that has now exited.
virtual bool was_running_in_dead_thread() const noexcept = 0;
// Return a borrowed greenlet that is the Python object
// this object represents.
inline BorrowedGreenlet self() const noexcept
{
return BorrowedGreenlet(this->_self);
}
// For testing. If this returns true, we should pretend that
// slp_switch() failed.
virtual bool force_slp_switch_error() const noexcept;
protected:
inline void release_args();
// The functions that must not be inlined are declared virtual.
// We also mark them as protected, not private, so that the
// compiler is forced to call them through a function pointer.
// (A sufficiently smart compiler could directly call a private
// virtual function since it can never be overridden in a
// subclass).
// Also TODO: Switch away from integer error codes and to enums,
// or throw exceptions when possible.
struct switchstack_result_t
{
int status;
Greenlet* the_new_current_greenlet;
OwnedGreenlet origin_greenlet;
switchstack_result_t()
: status(0),
the_new_current_greenlet(nullptr)
{}
switchstack_result_t(int err)
: status(err),
the_new_current_greenlet(nullptr)
{}
switchstack_result_t(int err, Greenlet* state, OwnedGreenlet& origin)
: status(err),
the_new_current_greenlet(state),
origin_greenlet(origin)
{
}
switchstack_result_t(int err, Greenlet* state, const BorrowedGreenlet& origin)
: status(err),
the_new_current_greenlet(state),
origin_greenlet(origin)
{
}
switchstack_result_t(const switchstack_result_t& other)
: status(other.status),
the_new_current_greenlet(other.the_new_current_greenlet),
origin_greenlet(other.origin_greenlet)
{}
switchstack_result_t& operator=(const switchstack_result_t& other)
{
this->status = other.status;
this->the_new_current_greenlet = other.the_new_current_greenlet;
this->origin_greenlet = other.origin_greenlet;
return *this;
}
};
OwnedObject on_switchstack_or_initialstub_failure(
Greenlet* target,
const switchstack_result_t& err,
const bool target_was_me=false,
const bool was_initial_stub=false);
// Returns the previous greenlet we just switched away from.
virtual OwnedGreenlet g_switchstack_success() noexcept;
// Check the preconditions for switching to this greenlet; if they
// aren't met, throws PyErrOccurred. Most callers will want to
// catch this and clear the arguments
inline void check_switch_allowed() const;
class GreenletStartedWhileInPython : public std::runtime_error
{
public:
GreenletStartedWhileInPython() : std::runtime_error("")
{}
};
protected:
/**
Perform a stack switch into this greenlet.
This temporarily sets the global variable
``switching_thread_state`` to this greenlet; as soon as the
call to ``slp_switch`` completes, this is reset to NULL.
Consequently, this depends on the GIL.
TODO: Adopt the stackman model and pass ``slp_switch`` a
callback function and context pointer; this eliminates the
need for global variables altogether.
Because the stack switch happens in this function, this
function can't use its own stack (local) variables, set
before the switch, and then accessed after the switch.
Further, you con't even access ``g_thread_state_global``
before and after the switch from the global variable.
Because it is thread local some compilers cache it in a
register/on the stack, notably new versions of MSVC; this
breaks with strange crashes sometime later, because writing
to anything in ``g_thread_state_global`` after the switch
is actually writing to random memory. For this reason, we
call a non-inlined function to finish the operation. (XXX:
The ``/GT`` MSVC compiler argument probably fixes that.)
It is very important that stack switch is 'atomic', i.e. no
calls into other Python code allowed (except very few that
are safe), because global variables are very fragile. (This
should no longer be the case with thread-local variables.)
*/
// Made virtual to facilitate subclassing UserGreenlet for testing.
virtual switchstack_result_t g_switchstack(void);
class TracingGuard
{
private:
PyThreadState* tstate;
public:
TracingGuard()
: tstate(PyThreadState_GET())
{
PyThreadState_EnterTracing(this->tstate);
}
~TracingGuard()
{
PyThreadState_LeaveTracing(this->tstate);
this->tstate = nullptr;
}
inline void CallTraceFunction(const OwnedObject& tracefunc,
const greenlet::refs::ImmortalEventName& event,
const BorrowedGreenlet& origin,
const BorrowedGreenlet& target)
{
// TODO: This calls tracefunc(event, (origin, target)). Add a shortcut
// function for that that's specialized to avoid the Py_BuildValue
// string parsing, or start with just using "ON" format with PyTuple_Pack(2,
// origin, target). That seems like what the N format is meant
// for.
// XXX: Why does event not automatically cast back to a PyObject?
// It tries to call the "deleted constructor ImmortalEventName
// const" instead.
assert(tracefunc);
assert(event);
assert(origin);
assert(target);
greenlet::refs::NewReference retval(
PyObject_CallFunction(
tracefunc.borrow(),
"O(OO)",
event.borrow(),
origin.borrow(),
target.borrow()
));
if (!retval) {
throw PyErrOccurred::from_current();
}
}
};
static void
g_calltrace(const OwnedObject& tracefunc,
const greenlet::refs::ImmortalEventName& event,
const greenlet::refs::BorrowedGreenlet& origin,
const BorrowedGreenlet& target);
private:
OwnedObject g_switch_finish(const switchstack_result_t& err);
};
class UserGreenlet : public Greenlet
{
private:
static greenlet::PythonAllocator<UserGreenlet> allocator;
OwnedMainGreenlet _main_greenlet;
OwnedObject _run_callable;
OwnedGreenlet _parent;
public:
static void* operator new(size_t UNUSED(count));
static void operator delete(void* ptr);
UserGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent);
virtual ~UserGreenlet();
virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const;
virtual bool was_running_in_dead_thread() const noexcept;
virtual ThreadState* thread_state() const noexcept;
virtual OwnedObject g_switch();
virtual const OwnedObject& run() const
{
if (this->started() || !this->_run_callable) {
throw AttributeError("run");
}
return this->_run_callable;
}
virtual void run(const refs::BorrowedObject nrun);
virtual const OwnedGreenlet parent() const;
virtual void parent(const refs::BorrowedObject new_parent);
virtual const refs::BorrowedMainGreenlet main_greenlet() const;
virtual void murder_in_place();
virtual bool belongs_to_thread(const ThreadState* state) const;
virtual int tp_traverse(visitproc visit, void* arg);
virtual int tp_clear();
class ParentIsCurrentGuard
{
private:
OwnedGreenlet oldparent;
UserGreenlet* greenlet;
G_NO_COPIES_OF_CLS(ParentIsCurrentGuard);
public:
ParentIsCurrentGuard(UserGreenlet* p, const ThreadState& thread_state);
~ParentIsCurrentGuard();
};
virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state);
protected:
virtual switchstack_result_t g_initialstub(void* mark);
private:
// This function isn't meant to return.
// This accepts raw pointers and the ownership of them at the
// same time. The caller should use ``inner_bootstrap(origin.relinquish_ownership())``.
void inner_bootstrap(PyGreenlet* origin_greenlet, PyObject* run);
};
class BrokenGreenlet : public UserGreenlet
{
private:
static greenlet::PythonAllocator<BrokenGreenlet> allocator;
public:
bool _force_switch_error = false;
bool _force_slp_switch_error = false;
static void* operator new(size_t UNUSED(count));
static void operator delete(void* ptr);
BrokenGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent)
: UserGreenlet(p, the_parent)
{}
virtual ~BrokenGreenlet()
{}
virtual switchstack_result_t g_switchstack(void);
virtual bool force_slp_switch_error() const noexcept;
};
class MainGreenlet : public Greenlet
{
private:
static greenlet::PythonAllocator<MainGreenlet> allocator;
refs::BorrowedMainGreenlet _self;
ThreadState* _thread_state;
G_NO_COPIES_OF_CLS(MainGreenlet);
public:
static void* operator new(size_t UNUSED(count));
static void operator delete(void* ptr);
MainGreenlet(refs::BorrowedMainGreenlet::PyType*, ThreadState*);
virtual ~MainGreenlet();
virtual const OwnedObject& run() const;
virtual void run(const refs::BorrowedObject nrun);
virtual const OwnedGreenlet parent() const;
virtual void parent(const refs::BorrowedObject new_parent);
virtual const refs::BorrowedMainGreenlet main_greenlet() const;
virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const;
virtual bool was_running_in_dead_thread() const noexcept;
virtual ThreadState* thread_state() const noexcept;
void thread_state(ThreadState*) noexcept;
virtual OwnedObject g_switch();
virtual int tp_traverse(visitproc visit, void* arg);
};
// Instantiate one on the stack to save the GC state,
// and then disable GC. When it goes out of scope, GC will be
// restored to its original state. Sadly, these APIs are only
// available on 3.10+; luckily, we only need them on 3.11+.
#if GREENLET_PY310
class GCDisabledGuard
{
private:
int was_enabled = 0;
public:
GCDisabledGuard()
: was_enabled(PyGC_IsEnabled())
{
PyGC_Disable();
}
~GCDisabledGuard()
{
if (this->was_enabled) {
PyGC_Enable();
}
}
};
#endif
OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) noexcept;
//TODO: Greenlet::g_switch() should call this automatically on its
//return value. As it is, the module code is calling it.
static inline OwnedObject
single_result(const OwnedObject& results)
{
if (results
&& PyTuple_Check(results.borrow())
&& PyTuple_GET_SIZE(results.borrow()) == 1) {
PyObject* result = PyTuple_GET_ITEM(results.borrow(), 0);
assert(result);
return OwnedObject::owning(result);
}
return results;
}
static OwnedObject
g_handle_exit(const OwnedObject& greenlet_result);
template<typename T>
void operator<<(const PyThreadState *const lhs, T& rhs)
{
rhs.operator<<(lhs);
}
} // namespace greenlet ;
#endif

View File

@ -0,0 +1,94 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
* Implementation of GreenletGlobals.
*
* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#ifndef T_GREENLET_GLOBALS
#define T_GREENLET_GLOBALS
#include "greenlet_refs.hpp"
#include "greenlet_exceptions.hpp"
#include "greenlet_thread_support.hpp"
#include "greenlet_internal.hpp"
namespace greenlet {
// This encapsulates what were previously module global "constants"
// established at init time.
// This is a step towards Python3 style module state that allows
// reloading.
//
// In an earlier iteration of this code, we used placement new to be
// able to allocate this object statically still, so that references
// to its members don't incur an extra pointer indirection.
// But under some scenarios, that could result in crashes at
// shutdown because apparently the destructor was getting run twice?
class GreenletGlobals
{
public:
const greenlet::refs::ImmortalEventName event_switch;
const greenlet::refs::ImmortalEventName event_throw;
const greenlet::refs::ImmortalException PyExc_GreenletError;
const greenlet::refs::ImmortalException PyExc_GreenletExit;
const greenlet::refs::ImmortalObject empty_tuple;
const greenlet::refs::ImmortalObject empty_dict;
const greenlet::refs::ImmortalString str_run;
Mutex* const thread_states_to_destroy_lock;
greenlet::cleanup_queue_t thread_states_to_destroy;
GreenletGlobals() :
event_switch("switch"),
event_throw("throw"),
PyExc_GreenletError("greenlet.error"),
PyExc_GreenletExit("greenlet.GreenletExit", PyExc_BaseException),
empty_tuple(Require(PyTuple_New(0))),
empty_dict(Require(PyDict_New())),
str_run("run"),
thread_states_to_destroy_lock(new Mutex())
{}
~GreenletGlobals()
{
// This object is (currently) effectively immortal, and not
// just because of those placement new tricks; if we try to
// deallocate the static object we allocated, and overwrote,
// we would be doing so at C++ teardown time, which is after
// the final Python GIL is released, and we can't use the API
// then.
// (The members will still be destructed, but they also don't
// do any deallocation.)
}
void queue_to_destroy(ThreadState* ts) const
{
// we're currently accessed through a static const object,
// implicitly marking our members as const, so code can't just
// call push_back (or pop_back) without casting away the
// const.
//
// Do that for callers.
greenlet::cleanup_queue_t& q = const_cast<greenlet::cleanup_queue_t&>(this->thread_states_to_destroy);
q.push_back(ts);
}
ThreadState* take_next_to_destroy() const
{
greenlet::cleanup_queue_t& q = const_cast<greenlet::cleanup_queue_t&>(this->thread_states_to_destroy);
ThreadState* result = q.back();
q.pop_back();
return result;
}
};
}; // namespace greenlet
static const greenlet::GreenletGlobals* mod_globs;
#endif // T_GREENLET_GLOBALS

View File

@ -0,0 +1,153 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
* Implementation of greenlet::MainGreenlet.
*
* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#ifndef T_MAIN_GREENLET_CPP
#define T_MAIN_GREENLET_CPP
#include "TGreenlet.hpp"
// Protected by the GIL. Incremented when we create a main greenlet,
// in a new thread, decremented when it is destroyed.
static Py_ssize_t G_TOTAL_MAIN_GREENLETS;
namespace greenlet {
greenlet::PythonAllocator<MainGreenlet> MainGreenlet::allocator;
void* MainGreenlet::operator new(size_t UNUSED(count))
{
return allocator.allocate(1);
}
void MainGreenlet::operator delete(void* ptr)
{
return allocator.deallocate(static_cast<MainGreenlet*>(ptr),
1);
}
MainGreenlet::MainGreenlet(PyGreenlet* p, ThreadState* state)
: Greenlet(p, StackState::make_main()),
_self(p),
_thread_state(state)
{
G_TOTAL_MAIN_GREENLETS++;
}
MainGreenlet::~MainGreenlet()
{
G_TOTAL_MAIN_GREENLETS--;
this->tp_clear();
}
ThreadState*
MainGreenlet::thread_state() const noexcept
{
return this->_thread_state;
}
void
MainGreenlet::thread_state(ThreadState* t) noexcept
{
assert(!t);
this->_thread_state = t;
}
const BorrowedMainGreenlet
MainGreenlet::main_greenlet() const
{
return this->_self;
}
BorrowedMainGreenlet
MainGreenlet::find_main_greenlet_in_lineage() const
{
return BorrowedMainGreenlet(this->_self);
}
bool
MainGreenlet::was_running_in_dead_thread() const noexcept
{
return !this->_thread_state;
}
OwnedObject
MainGreenlet::g_switch()
{
try {
this->check_switch_allowed();
}
catch (const PyErrOccurred&) {
this->release_args();
throw;
}
switchstack_result_t err = this->g_switchstack();
if (err.status < 0) {
// XXX: This code path is untested, but it is shared
// with the UserGreenlet path that is tested.
return this->on_switchstack_or_initialstub_failure(
this,
err,
true, // target was me
false // was initial stub
);
}
return err.the_new_current_greenlet->g_switch_finish(err);
}
int
MainGreenlet::tp_traverse(visitproc visit, void* arg)
{
if (this->_thread_state) {
// we've already traversed main, (self), don't do it again.
int result = this->_thread_state->tp_traverse(visit, arg, false);
if (result) {
return result;
}
}
return Greenlet::tp_traverse(visit, arg);
}
const OwnedObject&
MainGreenlet::run() const
{
throw AttributeError("Main greenlets do not have a run attribute.");
}
void
MainGreenlet::run(const BorrowedObject UNUSED(nrun))
{
throw AttributeError("Main greenlets do not have a run attribute.");
}
void
MainGreenlet::parent(const BorrowedObject raw_new_parent)
{
if (!raw_new_parent) {
throw AttributeError("can't delete attribute");
}
throw AttributeError("cannot set the parent of a main greenlet");
}
const OwnedGreenlet
MainGreenlet::parent() const
{
return OwnedGreenlet(); // null becomes None
}
}; // namespace greenlet
#endif

View File

@ -0,0 +1,393 @@
#ifndef GREENLET_PYTHON_STATE_CPP
#define GREENLET_PYTHON_STATE_CPP
#include <Python.h>
#include "TGreenlet.hpp"
namespace greenlet {
PythonState::PythonState()
: _top_frame()
#if GREENLET_USE_CFRAME
,cframe(nullptr)
,use_tracing(0)
#endif
#if GREENLET_PY312
,py_recursion_depth(0)
,c_recursion_depth(0)
#else
,recursion_depth(0)
#endif
#if GREENLET_PY313
,delete_later(nullptr)
#else
,trash_delete_nesting(0)
#endif
#if GREENLET_PY311
,current_frame(nullptr)
,datastack_chunk(nullptr)
,datastack_top(nullptr)
,datastack_limit(nullptr)
#endif
{
#if GREENLET_USE_CFRAME
/*
The PyThreadState->cframe pointer usually points to memory on
the stack, alloceted in a call into PyEval_EvalFrameDefault.
Initially, before any evaluation begins, it points to the
initial PyThreadState object's ``root_cframe`` object, which is
statically allocated for the lifetime of the thread.
A greenlet can last for longer than a call to
PyEval_EvalFrameDefault, so we can't set its ``cframe`` pointer
to be the current ``PyThreadState->cframe``; nor could we use
one from the greenlet parent for the same reason. Yet a further
no: we can't allocate one scoped to the greenlet and then
destroy it when the greenlet is deallocated, because inside the
interpreter the _PyCFrame objects form a linked list, and that too
can result in accessing memory beyond its dynamic lifetime (if
the greenlet doesn't actually finish before it dies, its entry
could still be in the list).
Using the ``root_cframe`` is problematic, though, because its
members are never modified by the interpreter and are set to 0,
meaning that its ``use_tracing`` flag is never updated. We don't
want to modify that value in the ``root_cframe`` ourself: it
*shouldn't* matter much because we should probably never get
back to the point where that's the only cframe on the stack;
even if it did matter, the major consequence of an incorrect
value for ``use_tracing`` is that if its true the interpreter
does some extra work --- however, it's just good code hygiene.
Our solution: before a greenlet runs, after its initial
creation, it uses the ``root_cframe`` just to have something to
put there. However, once the greenlet is actually switched to
for the first time, ``g_initialstub`` (which doesn't actually
"return" while the greenlet is running) stores a new _PyCFrame on
its local stack, and copies the appropriate values from the
currently running _PyCFrame; this is then made the _PyCFrame for the
newly-minted greenlet. ``g_initialstub`` then proceeds to call
``glet.run()``, which results in ``PyEval_...`` adding the
_PyCFrame to the list. Switches continue as normal. Finally, when
the greenlet finishes, the call to ``glet.run()`` returns and
the _PyCFrame is taken out of the linked list and the stack value
is now unused and free to expire.
XXX: I think we can do better. If we're deallocing in the same
thread, can't we traverse the list and unlink our frame?
Can we just keep a reference to the thread state in case we
dealloc in another thread? (Is that even possible if we're still
running and haven't returned from g_initialstub?)
*/
this->cframe = &PyThreadState_GET()->root_cframe;
#endif
}
inline void PythonState::may_switch_away() noexcept
{
#if GREENLET_PY311
// PyThreadState_GetFrame is probably going to have to allocate a
// new frame object. That may trigger garbage collection. Because
// we call this during the early phases of a switch (it doesn't
// matter to which greenlet, as this has a global effect), if a GC
// triggers a switch away, two things can happen, both bad:
// - We might not get switched back to, halting forward progress.
// this is pathological, but possible.
// - We might get switched back to with a different set of
// arguments or a throw instead of a switch. That would corrupt
// our state (specifically, PyErr_Occurred() and this->args()
// would no longer agree).
//
// Thus, when we call this API, we need to have GC disabled.
// This method serves as a bottleneck we call when maybe beginning
// a switch. In this way, it is always safe -- no risk of GC -- to
// use ``_GetFrame()`` whenever we need to, just as it was in
// <=3.10 (because subsequent calls will be cached and not
// allocate memory).
GCDisabledGuard no_gc;
Py_XDECREF(PyThreadState_GetFrame(PyThreadState_GET()));
#endif
}
void PythonState::operator<<(const PyThreadState *const tstate) noexcept
{
this->_context.steal(tstate->context);
#if GREENLET_USE_CFRAME
/*
IMPORTANT: ``cframe`` is a pointer into the STACK. Thus, because
the call to ``slp_switch()`` changes the contents of the stack,
you cannot read from ``ts_current->cframe`` after that call and
necessarily get the same values you get from reading it here.
Anything you need to restore from now to then must be saved in a
global/threadlocal variable (because we can't use stack
variables here either). For things that need to persist across
the switch, use `will_switch_from`.
*/
this->cframe = tstate->cframe;
#if !GREENLET_PY312
this->use_tracing = tstate->cframe->use_tracing;
#endif
#endif // GREENLET_USE_CFRAME
#if GREENLET_PY311
#if GREENLET_PY312
this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
this->c_recursion_depth = Py_C_RECURSION_LIMIT - tstate->c_recursion_remaining;
#else // not 312
this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining;
#endif // GREENLET_PY312
#if GREENLET_PY313
this->current_frame = tstate->current_frame;
#elif GREENLET_USE_CFRAME
this->current_frame = tstate->cframe->current_frame;
#endif
this->datastack_chunk = tstate->datastack_chunk;
this->datastack_top = tstate->datastack_top;
this->datastack_limit = tstate->datastack_limit;
PyFrameObject *frame = PyThreadState_GetFrame((PyThreadState *)tstate);
Py_XDECREF(frame); // PyThreadState_GetFrame gives us a new
// reference.
this->_top_frame.steal(frame);
#if GREENLET_PY313
this->delete_later = Py_XNewRef(tstate->delete_later);
#elif GREENLET_PY312
this->trash_delete_nesting = tstate->trash.delete_nesting;
#else // not 312
this->trash_delete_nesting = tstate->trash_delete_nesting;
#endif // GREENLET_PY312
#else // Not 311
this->recursion_depth = tstate->recursion_depth;
this->_top_frame.steal(tstate->frame);
this->trash_delete_nesting = tstate->trash_delete_nesting;
#endif // GREENLET_PY311
}
#if GREENLET_PY312
void GREENLET_NOINLINE(PythonState::unexpose_frames)()
{
if (!this->top_frame()) {
return;
}
// See GreenletState::expose_frames() and the comment on frames_were_exposed
// for more information about this logic.
_PyInterpreterFrame *iframe = this->_top_frame->f_frame;
while (iframe != nullptr) {
_PyInterpreterFrame *prev_exposed = iframe->previous;
assert(iframe->frame_obj);
memcpy(&iframe->previous, &iframe->frame_obj->_f_frame_data[0],
sizeof(void *));
iframe = prev_exposed;
}
}
#else
void PythonState::unexpose_frames()
{}
#endif
void PythonState::operator>>(PyThreadState *const tstate) noexcept
{
tstate->context = this->_context.relinquish_ownership();
/* Incrementing this value invalidates the contextvars cache,
which would otherwise remain valid across switches */
tstate->context_ver++;
#if GREENLET_USE_CFRAME
tstate->cframe = this->cframe;
/*
If we were tracing, we need to keep tracing.
There should never be the possibility of hitting the
root_cframe here. See note above about why we can't
just copy this from ``origin->cframe->use_tracing``.
*/
#if !GREENLET_PY312
tstate->cframe->use_tracing = this->use_tracing;
#endif
#endif // GREENLET_USE_CFRAME
#if GREENLET_PY311
#if GREENLET_PY312
tstate->py_recursion_remaining = tstate->py_recursion_limit - this->py_recursion_depth;
tstate->c_recursion_remaining = Py_C_RECURSION_LIMIT - this->c_recursion_depth;
this->unexpose_frames();
#else // \/ 3.11
tstate->recursion_remaining = tstate->recursion_limit - this->recursion_depth;
#endif // GREENLET_PY312
#if GREENLET_PY313
tstate->current_frame = this->current_frame;
#elif GREENLET_USE_CFRAME
tstate->cframe->current_frame = this->current_frame;
#endif
tstate->datastack_chunk = this->datastack_chunk;
tstate->datastack_top = this->datastack_top;
tstate->datastack_limit = this->datastack_limit;
this->_top_frame.relinquish_ownership();
#if GREENLET_PY313
Py_XDECREF(tstate->delete_later);
tstate->delete_later = this->delete_later;
Py_CLEAR(this->delete_later);
#elif GREENLET_PY312
tstate->trash.delete_nesting = this->trash_delete_nesting;
#else // not 3.12
tstate->trash_delete_nesting = this->trash_delete_nesting;
#endif // GREENLET_PY312
#else // not 3.11
tstate->frame = this->_top_frame.relinquish_ownership();
tstate->recursion_depth = this->recursion_depth;
tstate->trash_delete_nesting = this->trash_delete_nesting;
#endif // GREENLET_PY311
}
inline void PythonState::will_switch_from(PyThreadState *const origin_tstate) noexcept
{
#if GREENLET_USE_CFRAME && !GREENLET_PY312
// The weird thing is, we don't actually save this for an
// effect on the current greenlet, it's saved for an
// effect on the target greenlet. That is, we want
// continuity of this setting across the greenlet switch.
this->use_tracing = origin_tstate->cframe->use_tracing;
#endif
}
void PythonState::set_initial_state(const PyThreadState* const tstate) noexcept
{
this->_top_frame = nullptr;
#if GREENLET_PY312
this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
// XXX: TODO: Comment from a reviewer:
// Should this be ``Py_C_RECURSION_LIMIT - tstate->c_recursion_remaining``?
// But to me it looks more like that might not be the right
// initialization either?
this->c_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
#elif GREENLET_PY311
this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining;
#else
this->recursion_depth = tstate->recursion_depth;
#endif
}
// TODO: Better state management about when we own the top frame.
int PythonState::tp_traverse(visitproc visit, void* arg, bool own_top_frame) noexcept
{
Py_VISIT(this->_context.borrow());
if (own_top_frame) {
Py_VISIT(this->_top_frame.borrow());
}
return 0;
}
void PythonState::tp_clear(bool own_top_frame) noexcept
{
PythonStateContext::tp_clear();
// If we get here owning a frame,
// we got dealloc'd without being finished. We may or may not be
// in the same thread.
if (own_top_frame) {
this->_top_frame.CLEAR();
}
}
#if GREENLET_USE_CFRAME
void PythonState::set_new_cframe(_PyCFrame& frame) noexcept
{
frame = *PyThreadState_GET()->cframe;
/* Make the target greenlet refer to the stack value. */
this->cframe = &frame;
/*
And restore the link to the previous frame so this one gets
unliked appropriately.
*/
this->cframe->previous = &PyThreadState_GET()->root_cframe;
}
#endif
const PythonState::OwnedFrame& PythonState::top_frame() const noexcept
{
return this->_top_frame;
}
void PythonState::did_finish(PyThreadState* tstate) noexcept
{
#if GREENLET_PY311
// See https://github.com/gevent/gevent/issues/1924 and
// https://github.com/python-greenlet/greenlet/issues/328. In
// short, Python 3.11 allocates memory for frames as a sort of
// linked list that's kept as part of PyThreadState in the
// ``datastack_chunk`` member and friends. These are saved and
// restored as part of switching greenlets.
//
// When we initially switch to a greenlet, we set those to NULL.
// That causes the frame management code to treat this like a
// brand new thread and start a fresh list of chunks, beginning
// with a new "root" chunk. As we make calls in this greenlet,
// those chunks get added, and as calls return, they get popped.
// But the frame code (pystate.c) is careful to make sure that the
// root chunk never gets popped.
//
// Thus, when a greenlet exits for the last time, there will be at
// least a single root chunk that we must be responsible for
// deallocating.
//
// The complex part is that these chunks are allocated and freed
// using ``_PyObject_VirtualAlloc``/``Free``. Those aren't public
// functions, and they aren't exported for linking. It so happens
// that we know they are just thin wrappers around the Arena
// allocator, so we can use that directly to deallocate in a
// compatible way.
//
// CAUTION: Check this implementation detail on every major version.
//
// It might be nice to be able to do this in our destructor, but
// can we be sure that no one else is using that memory? Plus, as
// described below, our pointers may not even be valid anymore. As
// a special case, there is one time that we know we can do this,
// and that's from the destructor of the associated UserGreenlet
// (NOT main greenlet)
PyObjectArenaAllocator alloc;
_PyStackChunk* chunk = nullptr;
if (tstate) {
// We really did finish, we can never be switched to again.
chunk = tstate->datastack_chunk;
// Unfortunately, we can't do much sanity checking. Our
// this->datastack_chunk pointer is out of date (evaluation may
// have popped down through it already) so we can't verify that
// we deallocate it. I don't think we can even check datastack_top
// for the same reason.
PyObject_GetArenaAllocator(&alloc);
tstate->datastack_chunk = nullptr;
tstate->datastack_limit = nullptr;
tstate->datastack_top = nullptr;
}
else if (this->datastack_chunk) {
// The UserGreenlet (NOT the main greenlet!) is being deallocated. If we're
// still holding a stack chunk, it's garbage because we know
// we can never switch back to let cPython clean it up.
// Because the last time we got switched away from, and we
// haven't run since then, we know our chain is valid and can
// be dealloced.
chunk = this->datastack_chunk;
PyObject_GetArenaAllocator(&alloc);
}
if (alloc.free && chunk) {
// In case the arena mechanism has been torn down already.
while (chunk) {
_PyStackChunk *prev = chunk->previous;
chunk->previous = nullptr;
alloc.free(alloc.ctx, chunk, chunk->size);
chunk = prev;
}
}
this->datastack_chunk = nullptr;
this->datastack_limit = nullptr;
this->datastack_top = nullptr;
#endif
}
}; // namespace greenlet
#endif // GREENLET_PYTHON_STATE_CPP

View File

@ -0,0 +1,265 @@
#ifndef GREENLET_STACK_STATE_CPP
#define GREENLET_STACK_STATE_CPP
#include "TGreenlet.hpp"
namespace greenlet {
#ifdef GREENLET_USE_STDIO
#include <iostream>
using std::cerr;
using std::endl;
std::ostream& operator<<(std::ostream& os, const StackState& s)
{
os << "StackState(stack_start=" << (void*)s._stack_start
<< ", stack_stop=" << (void*)s.stack_stop
<< ", stack_copy=" << (void*)s.stack_copy
<< ", stack_saved=" << s._stack_saved
<< ", stack_prev=" << s.stack_prev
<< ", addr=" << &s
<< ")";
return os;
}
#endif
StackState::StackState(void* mark, StackState& current)
: _stack_start(nullptr),
stack_stop((char*)mark),
stack_copy(nullptr),
_stack_saved(0),
/* Skip a dying greenlet */
stack_prev(current._stack_start
? &current
: current.stack_prev)
{
}
StackState::StackState()
: _stack_start(nullptr),
stack_stop(nullptr),
stack_copy(nullptr),
_stack_saved(0),
stack_prev(nullptr)
{
}
StackState::StackState(const StackState& other)
// can't use a delegating constructor because of
// MSVC for Python 2.7
: _stack_start(nullptr),
stack_stop(nullptr),
stack_copy(nullptr),
_stack_saved(0),
stack_prev(nullptr)
{
this->operator=(other);
}
StackState& StackState::operator=(const StackState& other)
{
if (&other == this) {
return *this;
}
if (other._stack_saved) {
throw std::runtime_error("Refusing to steal memory.");
}
//If we have memory allocated, dispose of it
this->free_stack_copy();
this->_stack_start = other._stack_start;
this->stack_stop = other.stack_stop;
this->stack_copy = other.stack_copy;
this->_stack_saved = other._stack_saved;
this->stack_prev = other.stack_prev;
return *this;
}
inline void StackState::free_stack_copy() noexcept
{
PyMem_Free(this->stack_copy);
this->stack_copy = nullptr;
this->_stack_saved = 0;
}
inline void StackState::copy_heap_to_stack(const StackState& current) noexcept
{
/* Restore the heap copy back into the C stack */
if (this->_stack_saved != 0) {
memcpy(this->_stack_start, this->stack_copy, this->_stack_saved);
this->free_stack_copy();
}
StackState* owner = const_cast<StackState*>(&current);
if (!owner->_stack_start) {
owner = owner->stack_prev; /* greenlet is dying, skip it */
}
while (owner && owner->stack_stop <= this->stack_stop) {
// cerr << "\tOwner: " << owner << endl;
owner = owner->stack_prev; /* find greenlet with more stack */
}
this->stack_prev = owner;
// cerr << "\tFinished with: " << *this << endl;
}
inline int StackState::copy_stack_to_heap_up_to(const char* const stop) noexcept
{
/* Save more of g's stack into the heap -- at least up to 'stop'
g->stack_stop |________|
| |
| __ stop . . . . .
| | ==> . .
|________| _______
| | | |
| | | |
g->stack_start | | |_______| g->stack_copy
*/
intptr_t sz1 = this->_stack_saved;
intptr_t sz2 = stop - this->_stack_start;
assert(this->_stack_start);
if (sz2 > sz1) {
char* c = (char*)PyMem_Realloc(this->stack_copy, sz2);
if (!c) {
PyErr_NoMemory();
return -1;
}
memcpy(c + sz1, this->_stack_start + sz1, sz2 - sz1);
this->stack_copy = c;
this->_stack_saved = sz2;
}
return 0;
}
inline int StackState::copy_stack_to_heap(char* const stackref,
const StackState& current) noexcept
{
/* must free all the C stack up to target_stop */
const char* const target_stop = this->stack_stop;
StackState* owner = const_cast<StackState*>(&current);
assert(owner->_stack_saved == 0); // everything is present on the stack
if (!owner->_stack_start) {
owner = owner->stack_prev; /* not saved if dying */
}
else {
owner->_stack_start = stackref;
}
while (owner->stack_stop < target_stop) {
/* ts_current is entierely within the area to free */
if (owner->copy_stack_to_heap_up_to(owner->stack_stop)) {
return -1; /* XXX */
}
owner = owner->stack_prev;
}
if (owner != this) {
if (owner->copy_stack_to_heap_up_to(target_stop)) {
return -1; /* XXX */
}
}
return 0;
}
inline bool StackState::started() const noexcept
{
return this->stack_stop != nullptr;
}
inline bool StackState::main() const noexcept
{
return this->stack_stop == (char*)-1;
}
inline bool StackState::active() const noexcept
{
return this->_stack_start != nullptr;
}
inline void StackState::set_active() noexcept
{
assert(this->_stack_start == nullptr);
this->_stack_start = (char*)1;
}
inline void StackState::set_inactive() noexcept
{
this->_stack_start = nullptr;
// XXX: What if we still have memory out there?
// That case is actually triggered by
// test_issue251_issue252_explicit_reference_not_collectable (greenlet.tests.test_leaks.TestLeaks)
// and
// test_issue251_issue252_need_to_collect_in_background
// (greenlet.tests.test_leaks.TestLeaks)
//
// Those objects never get deallocated, so the destructor never
// runs.
// It *seems* safe to clean up the memory here?
if (this->_stack_saved) {
this->free_stack_copy();
}
}
inline intptr_t StackState::stack_saved() const noexcept
{
return this->_stack_saved;
}
inline char* StackState::stack_start() const noexcept
{
return this->_stack_start;
}
inline StackState StackState::make_main() noexcept
{
StackState s;
s._stack_start = (char*)1;
s.stack_stop = (char*)-1;
return s;
}
StackState::~StackState()
{
if (this->_stack_saved != 0) {
this->free_stack_copy();
}
}
void StackState::copy_from_stack(void* vdest, const void* vsrc, size_t n) const
{
char* dest = static_cast<char*>(vdest);
const char* src = static_cast<const char*>(vsrc);
if (src + n <= this->_stack_start
|| src >= this->_stack_start + this->_stack_saved
|| this->_stack_saved == 0) {
// Nothing we're copying was spilled from the stack
memcpy(dest, src, n);
return;
}
if (src < this->_stack_start) {
// Copy the part before the saved stack.
// We know src + n > _stack_start due to the test above.
const size_t nbefore = this->_stack_start - src;
memcpy(dest, src, nbefore);
dest += nbefore;
src += nbefore;
n -= nbefore;
}
// We know src >= _stack_start after the before-copy, and
// src < _stack_start + _stack_saved due to the first if condition
size_t nspilled = std::min<size_t>(n, this->_stack_start + this->_stack_saved - src);
memcpy(dest, this->stack_copy + (src - this->_stack_start), nspilled);
dest += nspilled;
src += nspilled;
n -= nspilled;
if (n > 0) {
// Copy the part after the saved stack
memcpy(dest, src, n);
}
}
}; // namespace greenlet
#endif // GREENLET_STACK_STATE_CPP

View File

@ -0,0 +1,497 @@
#ifndef GREENLET_THREAD_STATE_HPP
#define GREENLET_THREAD_STATE_HPP
#include <ctime>
#include <stdexcept>
#include "greenlet_internal.hpp"
#include "greenlet_refs.hpp"
#include "greenlet_thread_support.hpp"
using greenlet::refs::BorrowedObject;
using greenlet::refs::BorrowedGreenlet;
using greenlet::refs::BorrowedMainGreenlet;
using greenlet::refs::OwnedMainGreenlet;
using greenlet::refs::OwnedObject;
using greenlet::refs::OwnedGreenlet;
using greenlet::refs::OwnedList;
using greenlet::refs::PyErrFetchParam;
using greenlet::refs::PyArgParseParam;
using greenlet::refs::ImmortalString;
using greenlet::refs::CreatedModule;
using greenlet::refs::PyErrPieces;
using greenlet::refs::NewReference;
namespace greenlet {
/**
* Thread-local state of greenlets.
*
* Each native thread will get exactly one of these objects,
* automatically accessed through the best available thread-local
* mechanism the compiler supports (``thread_local`` for C++11
* compilers or ``__thread``/``declspec(thread)`` for older GCC/clang
* or MSVC, respectively.)
*
* Previously, we kept thread-local state mostly in a bunch of
* ``static volatile`` variables in the main greenlet file.. This had
* the problem of requiring extra checks, loops, and great care
* accessing these variables if we potentially invoked any Python code
* that could release the GIL, because the state could change out from
* under us. Making the variables thread-local solves this problem.
*
* When we detected that a greenlet API accessing the current greenlet
* was invoked from a different thread than the greenlet belonged to,
* we stored a reference to the greenlet in the Python thread
* dictionary for the thread the greenlet belonged to. This could lead
* to memory leaks if the thread then exited (because of a reference
* cycle, as greenlets referred to the thread dictionary, and deleting
* non-current greenlets leaked their frame plus perhaps arguments on
* the C stack). If a thread exited while still having running
* greenlet objects (perhaps that had just switched back to the main
* greenlet), and did not invoke one of the greenlet APIs *in that
* thread, immediately before it exited, without some other thread
* then being invoked*, such a leak was guaranteed.
*
* This can be partly solved by using compiler thread-local variables
* instead of the Python thread dictionary, thus avoiding a cycle.
*
* To fully solve this problem, we need a reliable way to know that a
* thread is done and we should clean up the main greenlet. On POSIX,
* we can use the destructor function of ``pthread_key_create``, but
* there's nothing similar on Windows; a C++11 thread local object
* reliably invokes its destructor when the thread it belongs to exits
* (non-C++11 compilers offer ``__thread`` or ``declspec(thread)`` to
* create thread-local variables, but they can't hold C++ objects that
* invoke destructors; the C++11 version is the most portable solution
* I found). When the thread exits, we can drop references and
* otherwise manipulate greenlets and frames that we know can no
* longer be switched to. For compilers that don't support C++11
* thread locals, we have a solution that uses the python thread
* dictionary, though it may not collect everything as promptly as
* other compilers do, if some other library is using the thread
* dictionary and has a cycle or extra reference.
*
* There are two small wrinkles. The first is that when the thread
* exits, it is too late to actually invoke Python APIs: the Python
* thread state is gone, and the GIL is released. To solve *this*
* problem, our destructor uses ``Py_AddPendingCall`` to transfer the
* destruction work to the main thread. (This is not an issue for the
* dictionary solution.)
*
* The second is that once the thread exits, the thread local object
* is invalid and we can't even access a pointer to it, so we can't
* pass it to ``Py_AddPendingCall``. This is handled by actually using
* a second object that's thread local (ThreadStateCreator) and having
* it dynamically allocate this object so it can live until the
* pending call runs.
*/
class ThreadState {
private:
// As of commit 08ad1dd7012b101db953f492e0021fb08634afad
// this class needed 56 bytes in o Py_DEBUG build
// on 64-bit macOS 11.
// Adding the vector takes us up to 80 bytes ()
/* Strong reference to the main greenlet */
OwnedMainGreenlet main_greenlet;
/* Strong reference to the current greenlet. */
OwnedGreenlet current_greenlet;
/* Strong reference to the trace function, if any. */
OwnedObject tracefunc;
typedef std::vector<PyGreenlet*, PythonAllocator<PyGreenlet*> > deleteme_t;
/* A vector of raw PyGreenlet pointers representing things that need
deleted when this thread is running. The vector owns the
references, but you need to manually INCREF/DECREF as you use
them. We don't use a vector<refs::OwnedGreenlet> because we
make copy of this vector, and that would become O(n) as all the
refcounts are incremented in the copy.
*/
deleteme_t deleteme;
#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED
void* exception_state;
#endif
static std::clock_t _clocks_used_doing_gc;
static ImmortalString get_referrers_name;
static PythonAllocator<ThreadState> allocator;
G_NO_COPIES_OF_CLS(ThreadState);
// Allocates a main greenlet for the thread state. If this fails,
// exits the process. Called only during constructing a ThreadState.
MainGreenlet* alloc_main()
{
PyGreenlet* gmain;
/* create the main greenlet for this thread */
gmain = reinterpret_cast<PyGreenlet*>(PyType_GenericAlloc(&PyGreenlet_Type, 0));
if (gmain == NULL) {
throw PyFatalError("alloc_main failed to alloc"); //exits the process
}
MainGreenlet* const main = new MainGreenlet(gmain, this);
assert(Py_REFCNT(gmain) == 1);
assert(gmain->pimpl == main);
return main;
}
public:
static void* operator new(size_t UNUSED(count))
{
return ThreadState::allocator.allocate(1);
}
static void operator delete(void* ptr)
{
return ThreadState::allocator.deallocate(static_cast<ThreadState*>(ptr),
1);
}
static void init()
{
ThreadState::get_referrers_name = "get_referrers";
ThreadState::_clocks_used_doing_gc = 0;
}
ThreadState()
{
#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED
this->exception_state = slp_get_exception_state();
#endif
// XXX: Potentially dangerous, exposing a not fully
// constructed object.
MainGreenlet* const main = this->alloc_main();
this->main_greenlet = OwnedMainGreenlet::consuming(
main->self()
);
assert(this->main_greenlet);
this->current_greenlet = main->self();
// The main greenlet starts with 1 refs: The returned one. We
// then copied it to the current greenlet.
assert(this->main_greenlet.REFCNT() == 2);
}
inline void restore_exception_state()
{
#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED
// It's probably important this be inlined and only call C
// functions to avoid adding an SEH frame.
slp_set_exception_state(this->exception_state);
#endif
}
inline bool has_main_greenlet() const noexcept
{
return bool(this->main_greenlet);
}
// Called from the ThreadStateCreator when we're in non-standard
// threading mode. In that case, there is an object in the Python
// thread state dictionary that points to us. The main greenlet
// also traverses into us, in which case it's crucial not to
// traverse back into the main greenlet.
int tp_traverse(visitproc visit, void* arg, bool traverse_main=true)
{
if (traverse_main) {
Py_VISIT(main_greenlet.borrow_o());
}
if (traverse_main || current_greenlet != main_greenlet) {
Py_VISIT(current_greenlet.borrow_o());
}
Py_VISIT(tracefunc.borrow());
return 0;
}
inline BorrowedMainGreenlet borrow_main_greenlet() const noexcept
{
assert(this->main_greenlet);
assert(this->main_greenlet.REFCNT() >= 2);
return this->main_greenlet;
};
inline OwnedMainGreenlet get_main_greenlet() const noexcept
{
return this->main_greenlet;
}
/**
* In addition to returning a new reference to the currunt
* greenlet, this performs any maintenance needed.
*/
inline OwnedGreenlet get_current()
{
/* green_dealloc() cannot delete greenlets from other threads, so
it stores them in the thread dict; delete them now. */
this->clear_deleteme_list();
//assert(this->current_greenlet->main_greenlet == this->main_greenlet);
//assert(this->main_greenlet->main_greenlet == this->main_greenlet);
return this->current_greenlet;
}
/**
* As for non-const get_current();
*/
inline BorrowedGreenlet borrow_current()
{
this->clear_deleteme_list();
return this->current_greenlet;
}
/**
* Does no maintenance.
*/
inline OwnedGreenlet get_current() const
{
return this->current_greenlet;
}
template<typename T, refs::TypeChecker TC>
inline bool is_current(const refs::PyObjectPointer<T, TC>& obj) const
{
return this->current_greenlet.borrow_o() == obj.borrow_o();
}
inline void set_current(const OwnedGreenlet& target)
{
this->current_greenlet = target;
}
private:
/**
* Deref and remove the greenlets from the deleteme list. Must be
* holding the GIL.
*
* If *murder* is true, then we must be called from a different
* thread than the one that these greenlets were running in.
* In that case, if the greenlet was actually running, we destroy
* the frame reference and otherwise make it appear dead before
* proceeding; otherwise, we would try (and fail) to raise an
* exception in it and wind up right back in this list.
*/
inline void clear_deleteme_list(const bool murder=false)
{
if (!this->deleteme.empty()) {
// It's possible we could add items to this list while
// running Python code if there's a thread switch, so we
// need to defensively copy it before that can happen.
deleteme_t copy = this->deleteme;
this->deleteme.clear(); // in case things come back on the list
for(deleteme_t::iterator it = copy.begin(), end = copy.end();
it != end;
++it ) {
PyGreenlet* to_del = *it;
if (murder) {
// Force each greenlet to appear dead; we can't raise an
// exception into it anymore anyway.
to_del->pimpl->murder_in_place();
}
// The only reference to these greenlets should be in
// this list, decreffing them should let them be
// deleted again, triggering calls to green_dealloc()
// in the correct thread (if we're not murdering).
// This may run arbitrary Python code and switch
// threads or greenlets!
Py_DECREF(to_del);
if (PyErr_Occurred()) {
PyErr_WriteUnraisable(nullptr);
PyErr_Clear();
}
}
}
}
public:
/**
* Returns a new reference, or a false object.
*/
inline OwnedObject get_tracefunc() const
{
return tracefunc;
};
inline void set_tracefunc(BorrowedObject tracefunc)
{
assert(tracefunc);
if (tracefunc == BorrowedObject(Py_None)) {
this->tracefunc.CLEAR();
}
else {
this->tracefunc = tracefunc;
}
}
/**
* Given a reference to a greenlet that some other thread
* attempted to delete (has a refcount of 0) store it for later
* deletion when the thread this state belongs to is current.
*/
inline void delete_when_thread_running(PyGreenlet* to_del)
{
Py_INCREF(to_del);
this->deleteme.push_back(to_del);
}
/**
* Set to std::clock_t(-1) to disable.
*/
inline static std::clock_t& clocks_used_doing_gc()
{
return ThreadState::_clocks_used_doing_gc;
}
~ThreadState()
{
if (!PyInterpreterState_Head()) {
// We shouldn't get here (our callers protect us)
// but if we do, all we can do is bail early.
return;
}
// We should not have an "origin" greenlet; that only exists
// for the temporary time during a switch, which should not
// be in progress as the thread dies.
//assert(!this->switching_state.origin);
this->tracefunc.CLEAR();
// Forcibly GC as much as we can.
this->clear_deleteme_list(true);
// The pending call did this.
assert(this->main_greenlet->thread_state() == nullptr);
// If the main greenlet is the current greenlet,
// then we "fell off the end" and the thread died.
// It's possible that there is some other greenlet that
// switched to us, leaving a reference to the main greenlet
// on the stack, somewhere uncollectible. Try to detect that.
if (this->current_greenlet == this->main_greenlet && this->current_greenlet) {
assert(this->current_greenlet->is_currently_running_in_some_thread());
// Drop one reference we hold.
this->current_greenlet.CLEAR();
assert(!this->current_greenlet);
// Only our reference to the main greenlet should be left,
// But hold onto the pointer in case we need to do extra cleanup.
PyGreenlet* old_main_greenlet = this->main_greenlet.borrow();
Py_ssize_t cnt = this->main_greenlet.REFCNT();
this->main_greenlet.CLEAR();
if (ThreadState::_clocks_used_doing_gc != std::clock_t(-1)
&& cnt == 2 && Py_REFCNT(old_main_greenlet) == 1) {
// Highly likely that the reference is somewhere on
// the stack, not reachable by GC. Verify.
// XXX: This is O(n) in the total number of objects.
// TODO: Add a way to disable this at runtime, and
// another way to report on it.
std::clock_t begin = std::clock();
NewReference gc(PyImport_ImportModule("gc"));
if (gc) {
OwnedObject get_referrers = gc.PyRequireAttr(ThreadState::get_referrers_name);
OwnedList refs(get_referrers.PyCall(old_main_greenlet));
if (refs && refs.empty()) {
assert(refs.REFCNT() == 1);
// We found nothing! So we left a dangling
// reference: Probably the last thing some
// other greenlet did was call
// 'getcurrent().parent.switch()' to switch
// back to us. Clean it up. This will be the
// case on CPython 3.7 and newer, as they use
// an internal calling conversion that avoids
// creating method objects and storing them on
// the stack.
Py_DECREF(old_main_greenlet);
}
else if (refs
&& refs.size() == 1
&& PyCFunction_Check(refs.at(0))
&& Py_REFCNT(refs.at(0)) == 2) {
assert(refs.REFCNT() == 1);
// Ok, we found a C method that refers to the
// main greenlet, and its only referenced
// twice, once in the list we just created,
// once from...somewhere else. If we can't
// find where else, then this is a leak.
// This happens in older versions of CPython
// that create a bound method object somewhere
// on the stack that we'll never get back to.
if (PyCFunction_GetFunction(refs.at(0).borrow()) == (PyCFunction)green_switch) {
BorrowedObject function_w = refs.at(0);
refs.clear(); // destroy the reference
// from the list.
// back to one reference. Can *it* be
// found?
assert(function_w.REFCNT() == 1);
refs = get_referrers.PyCall(function_w);
if (refs && refs.empty()) {
// Nope, it can't be found so it won't
// ever be GC'd. Drop it.
Py_CLEAR(function_w);
}
}
}
std::clock_t end = std::clock();
ThreadState::_clocks_used_doing_gc += (end - begin);
}
}
}
// We need to make sure this greenlet appears to be dead,
// because otherwise deallocing it would fail to raise an
// exception in it (the thread is dead) and put it back in our
// deleteme list.
if (this->current_greenlet) {
this->current_greenlet->murder_in_place();
this->current_greenlet.CLEAR();
}
if (this->main_greenlet) {
// Couldn't have been the main greenlet that was running
// when the thread exited (because we already cleared this
// pointer if it was). This shouldn't be possible?
// If the main greenlet was current when the thread died (it
// should be, right?) then we cleared its self pointer above
// when we cleared the current greenlet's main greenlet pointer.
// assert(this->main_greenlet->main_greenlet == this->main_greenlet
// || !this->main_greenlet->main_greenlet);
// // self reference, probably gone
// this->main_greenlet->main_greenlet.CLEAR();
// This will actually go away when the ivar is destructed.
this->main_greenlet.CLEAR();
}
if (PyErr_Occurred()) {
PyErr_WriteUnraisable(NULL);
PyErr_Clear();
}
}
};
ImmortalString ThreadState::get_referrers_name(nullptr);
PythonAllocator<ThreadState> ThreadState::allocator;
std::clock_t ThreadState::_clocks_used_doing_gc(0);
}; // namespace greenlet
#endif

View File

@ -0,0 +1,102 @@
#ifndef GREENLET_THREAD_STATE_CREATOR_HPP
#define GREENLET_THREAD_STATE_CREATOR_HPP
#include <ctime>
#include <stdexcept>
#include "greenlet_internal.hpp"
#include "greenlet_refs.hpp"
#include "greenlet_thread_support.hpp"
#include "TThreadState.hpp"
namespace greenlet {
typedef void (*ThreadStateDestructor)(ThreadState* const);
template<ThreadStateDestructor Destructor>
class ThreadStateCreator
{
private:
// Initialized to 1, and, if still 1, created on access.
// Set to 0 on destruction.
ThreadState* _state;
G_NO_COPIES_OF_CLS(ThreadStateCreator);
inline bool has_initialized_state() const noexcept
{
return this->_state != (ThreadState*)1;
}
inline bool has_state() const noexcept
{
return this->has_initialized_state() && this->_state != nullptr;
}
public:
// Only one of these, auto created per thread.
// Constructing the state constructs the MainGreenlet.
ThreadStateCreator() :
_state((ThreadState*)1)
{
}
~ThreadStateCreator()
{
if (this->has_state()) {
Destructor(this->_state);
}
this->_state = nullptr;
}
inline ThreadState& state()
{
// The main greenlet will own this pointer when it is created,
// which will be right after this. The plan is to give every
// greenlet a pointer to the main greenlet for the thread it
// runs in; if we are doing something cross-thread, we need to
// access the pointer from the main greenlet. Deleting the
// thread, and hence the thread-local storage, will delete the
// state pointer in the main greenlet.
if (!this->has_initialized_state()) {
// XXX: Assuming allocation never fails
this->_state = new ThreadState;
// For non-standard threading, we need to store an object
// in the Python thread state dictionary so that it can be
// DECREF'd when the thread ends (ideally; the dict could
// last longer) and clean this object up.
}
if (!this->_state) {
throw std::runtime_error("Accessing state after destruction.");
}
return *this->_state;
}
operator ThreadState&()
{
return this->state();
}
operator ThreadState*()
{
return &this->state();
}
inline int tp_traverse(visitproc visit, void* arg)
{
if (this->has_state()) {
return this->_state->tp_traverse(visit, arg);
}
return 0;
}
};
}; // namespace greenlet
#endif

View File

@ -0,0 +1,217 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
* Implementation of the ThreadState destructors.
*
* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#ifndef T_THREADSTATE_DESTROY
#define T_THREADSTATE_DESTROY
#include "TGreenlet.hpp"
#include "greenlet_thread_support.hpp"
#include "greenlet_compiler_compat.hpp"
#include "TGreenletGlobals.cpp"
#include "TThreadState.hpp"
#include "TThreadStateCreator.hpp"
namespace greenlet {
extern "C" {
struct ThreadState_DestroyNoGIL
{
/**
This function uses the same lock that the PendingCallback does
*/
static void
MarkGreenletDeadAndQueueCleanup(ThreadState* const state)
{
#if GREENLET_BROKEN_THREAD_LOCAL_CLEANUP_JUST_LEAK
return;
#endif
// We are *NOT* holding the GIL. Our thread is in the middle
// of its death throes and the Python thread state is already
// gone so we can't use most Python APIs. One that is safe is
// ``Py_AddPendingCall``, unless the interpreter itself has
// been torn down. There is a limited number of calls that can
// be queued: 32 (NPENDINGCALLS) in CPython 3.10, so we
// coalesce these calls using our own queue.
if (!MarkGreenletDeadIfNeeded(state)) {
// No state, or no greenlet
return;
}
// XXX: Because we don't have the GIL, this is a race condition.
if (!PyInterpreterState_Head()) {
// We have to leak the thread state, if the
// interpreter has shut down when we're getting
// deallocated, we can't run the cleanup code that
// deleting it would imply.
return;
}
AddToCleanupQueue(state);
}
private:
// If the state has an allocated main greenlet:
// - mark the greenlet as dead by disassociating it from the state;
// - return 1
// Otherwise, return 0.
static bool
MarkGreenletDeadIfNeeded(ThreadState* const state)
{
if (state && state->has_main_greenlet()) {
// mark the thread as dead ASAP.
// this is racy! If we try to throw or switch to a
// greenlet from this thread from some other thread before
// we clear the state pointer, it won't realize the state
// is dead which can crash the process.
PyGreenlet* p(state->borrow_main_greenlet().borrow());
assert(p->pimpl->thread_state() == state || p->pimpl->thread_state() == nullptr);
dynamic_cast<MainGreenlet*>(p->pimpl)->thread_state(nullptr);
return true;
}
return false;
}
static void
AddToCleanupQueue(ThreadState* const state)
{
assert(state && state->has_main_greenlet());
// NOTE: Because we're not holding the GIL here, some other
// Python thread could run and call ``os.fork()``, which would
// be bad if that happened while we are holding the cleanup
// lock (it wouldn't function in the child process).
// Make a best effort to try to keep the duration we hold the
// lock short.
// TODO: On platforms that support it, use ``pthread_atfork`` to
// drop this lock.
LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock);
mod_globs->queue_to_destroy(state);
if (mod_globs->thread_states_to_destroy.size() == 1) {
// We added the first item to the queue. We need to schedule
// the cleanup.
// A size greater than 1 means that we have already added the pending call,
// and in fact, it may be executing now.
// If it is executing, our lock makes sure that it will see the item we just added
// to the queue on its next iteration (after we release the lock)
//
// A size of 1 means there is no pending call, OR the pending call is
// currently executing, has dropped the lock, and is deleting the last item
// from the queue; its next iteration will go ahead and delete the item we just added.
// And the pending call we schedule here will have no work to do.
int result = AddPendingCall(
PendingCallback_DestroyQueueWithGIL,
nullptr);
if (result < 0) {
// Hmm, what can we do here?
fprintf(stderr,
"greenlet: WARNING: failed in call to Py_AddPendingCall; "
"expect a memory leak.\n");
}
}
}
static int
PendingCallback_DestroyQueueWithGIL(void* UNUSED(arg))
{
// We're holding the GIL here, so no Python code should be able to
// run to call ``os.fork()``.
while (1) {
ThreadState* to_destroy;
{
LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock);
if (mod_globs->thread_states_to_destroy.empty()) {
break;
}
to_destroy = mod_globs->take_next_to_destroy();
}
assert(to_destroy);
assert(to_destroy->has_main_greenlet());
// Drop the lock while we do the actual deletion.
// This allows other calls to MarkGreenletDeadAndQueueCleanup
// to enter and add to our queue.
DestroyOneWithGIL(to_destroy);
}
return 0;
}
static void
DestroyOneWithGIL(const ThreadState* const state)
{
// Holding the GIL.
// Passed a non-shared pointer to the actual thread state.
// state -> main greenlet
assert(state->has_main_greenlet());
PyGreenlet* main(state->borrow_main_greenlet());
// When we need to do cross-thread operations, we check this.
// A NULL value means the thread died some time ago.
// We do this here, rather than in a Python dealloc function
// for the greenlet, in case there's still a reference out
// there.
dynamic_cast<MainGreenlet*>(main->pimpl)->thread_state(nullptr);
delete state; // Deleting this runs the destructor, DECREFs the main greenlet.
}
static int AddPendingCall(int (*func)(void*), void* arg)
{
// If the interpreter is in the middle of finalizing, we can't add a
// pending call. Trying to do so will end up in a SIGSEGV, as
// Py_AddPendingCall will not be able to get the interpreter and will
// try to dereference a NULL pointer. It's possible this can still
// segfault if we happen to get context switched, and maybe we should
// just always implement our own AddPendingCall, but I'd like to see if
// this works first
#if GREENLET_PY313
if (Py_IsFinalizing()) {
#else
if (_Py_IsFinalizing()) {
#endif
#ifdef GREENLET_DEBUG
// No need to log in the general case. Yes, we'll leak,
// but we're shutting down so it should be ok.
fprintf(stderr,
"greenlet: WARNING: Interpreter is finalizing. Ignoring "
"call to Py_AddPendingCall; \n");
#endif
return 0;
}
return Py_AddPendingCall(func, arg);
}
};
};
}; // namespace greenlet
// The intent when GET_THREAD_STATE() is needed multiple times in a
// function is to take a reference to its return value in a local
// variable, to avoid the thread-local indirection. On some platforms
// (macOS), accessing a thread-local involves a function call (plus an
// initial function call in each function that uses a thread local);
// in contrast, static volatile variables are at some pre-computed
// offset.
typedef greenlet::ThreadStateCreator<greenlet::ThreadState_DestroyNoGIL::MarkGreenletDeadAndQueueCleanup> ThreadStateCreator;
static thread_local ThreadStateCreator g_thread_state_global;
#define GET_THREAD_STATE() g_thread_state_global
#endif //T_THREADSTATE_DESTROY

View File

@ -0,0 +1,662 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/**
* Implementation of greenlet::UserGreenlet.
*
* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#ifndef T_USER_GREENLET_CPP
#define T_USER_GREENLET_CPP
#include "greenlet_internal.hpp"
#include "TGreenlet.hpp"
#include "TThreadStateDestroy.cpp"
namespace greenlet {
using greenlet::refs::BorrowedMainGreenlet;
greenlet::PythonAllocator<UserGreenlet> UserGreenlet::allocator;
void* UserGreenlet::operator new(size_t UNUSED(count))
{
return allocator.allocate(1);
}
void UserGreenlet::operator delete(void* ptr)
{
return allocator.deallocate(static_cast<UserGreenlet*>(ptr),
1);
}
UserGreenlet::UserGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent)
: Greenlet(p), _parent(the_parent)
{
}
UserGreenlet::~UserGreenlet()
{
// Python 3.11: If we don't clear out the raw frame datastack
// when deleting an unfinished greenlet,
// TestLeaks.test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_main fails.
this->python_state.did_finish(nullptr);
this->tp_clear();
}
const BorrowedMainGreenlet
UserGreenlet::main_greenlet() const
{
return this->_main_greenlet;
}
BorrowedMainGreenlet
UserGreenlet::find_main_greenlet_in_lineage() const
{
if (this->started()) {
assert(this->_main_greenlet);
return BorrowedMainGreenlet(this->_main_greenlet);
}
if (!this->_parent) {
/* garbage collected greenlet in chain */
// XXX: WHAT?
return BorrowedMainGreenlet(nullptr);
}
return this->_parent->find_main_greenlet_in_lineage();
}
/**
* CAUTION: This will allocate memory and may trigger garbage
* collection and arbitrary Python code.
*/
OwnedObject
UserGreenlet::throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state)
{
/* The dying greenlet cannot be a parent of ts_current
because the 'parent' field chain would hold a
reference */
UserGreenlet::ParentIsCurrentGuard with_current_parent(this, current_thread_state);
// We don't care about the return value, only whether an
// exception happened. Whether or not an exception happens,
// we need to restore the parent in case the greenlet gets
// resurrected.
return Greenlet::throw_GreenletExit_during_dealloc(current_thread_state);
}
ThreadState*
UserGreenlet::thread_state() const noexcept
{
// TODO: maybe make this throw, if the thread state isn't there?
// if (!this->main_greenlet) {
// throw std::runtime_error("No thread state"); // TODO: Better exception
// }
if (!this->_main_greenlet) {
return nullptr;
}
return this->_main_greenlet->thread_state();
}
bool
UserGreenlet::was_running_in_dead_thread() const noexcept
{
return this->_main_greenlet && !this->thread_state();
}
OwnedObject
UserGreenlet::g_switch()
{
assert(this->args() || PyErr_Occurred());
try {
this->check_switch_allowed();
}
catch (const PyErrOccurred&) {
this->release_args();
throw;
}
// Switching greenlets used to attempt to clean out ones that need
// deleted *if* we detected a thread switch. Should it still do
// that?
// An issue is that if we delete a greenlet from another thread,
// it gets queued to this thread, and ``kill_greenlet()`` switches
// back into the greenlet
/* find the real target by ignoring dead greenlets,
and if necessary starting a greenlet. */
switchstack_result_t err;
Greenlet* target = this;
// TODO: probably cleaner to handle the case where we do
// switch to ourself separately from the other cases.
// This can probably even further be simplified if we keep
// track of the switching_state we're going for and just call
// into g_switch() if it's not ourself. The main problem with that
// is that we would be using more stack space.
bool target_was_me = true;
bool was_initial_stub = false;
while (target) {
if (target->active()) {
if (!target_was_me) {
target->args() <<= this->args();
assert(!this->args());
}
err = target->g_switchstack();
break;
}
if (!target->started()) {
// We never encounter a main greenlet that's not started.
assert(!target->main());
UserGreenlet* real_target = static_cast<UserGreenlet*>(target);
assert(real_target);
void* dummymarker;
was_initial_stub = true;
if (!target_was_me) {
target->args() <<= this->args();
assert(!this->args());
}
try {
// This can only throw back to us while we're
// still in this greenlet. Once the new greenlet
// is bootstrapped, it has its own exception state.
err = real_target->g_initialstub(&dummymarker);
}
catch (const PyErrOccurred&) {
this->release_args();
throw;
}
catch (const GreenletStartedWhileInPython&) {
// The greenlet was started sometime before this
// greenlet actually switched to it, i.e.,
// "concurrent" calls to switch() or throw().
// We need to retry the switch.
// Note that the current greenlet has been reset
// to this one (or we wouldn't be running!)
continue;
}
break;
}
target = target->parent();
target_was_me = false;
}
// The ``this`` pointer and all other stack or register based
// variables are invalid now, at least where things succeed
// above.
// But this one, probably not so much? It's not clear if it's
// safe to throw an exception at this point.
if (err.status < 0) {
// If we get here, either g_initialstub()
// failed, or g_switchstack() failed. Either one of those
// cases SHOULD leave us in the original greenlet with a valid
// stack.
return this->on_switchstack_or_initialstub_failure(target, err, target_was_me, was_initial_stub);
}
// err.the_new_current_greenlet would be the same as ``target``,
// if target wasn't probably corrupt.
return err.the_new_current_greenlet->g_switch_finish(err);
}
Greenlet::switchstack_result_t
UserGreenlet::g_initialstub(void* mark)
{
OwnedObject run;
// We need to grab a reference to the current switch arguments
// in case we're entered concurrently during the call to
// GetAttr() and have to try again.
// We'll restore them when we return in that case.
// Scope them tightly to avoid ref leaks.
{
SwitchingArgs args(this->args());
/* save exception in case getattr clears it */
PyErrPieces saved;
/*
self.run is the object to call in the new greenlet.
This could run arbitrary python code and switch greenlets!
*/
run = this->self().PyRequireAttr(mod_globs->str_run);
/* restore saved exception */
saved.PyErrRestore();
/* recheck that it's safe to switch in case greenlet reparented anywhere above */
this->check_switch_allowed();
/* by the time we got here another start could happen elsewhere,
* that means it should now be a regular switch.
* This can happen if the Python code is a subclass that implements
* __getattribute__ or __getattr__, or makes ``run`` a descriptor;
* all of those can run arbitrary code that switches back into
* this greenlet.
*/
if (this->stack_state.started()) {
// the successful switch cleared these out, we need to
// restore our version. They will be copied on up to the
// next target.
assert(!this->args());
this->args() <<= args;
throw GreenletStartedWhileInPython();
}
}
// Sweet, if we got here, we have the go-ahead and will switch
// greenlets.
// Nothing we do from here on out should allow for a thread or
// greenlet switch: No arbitrary calls to Python, including
// decref'ing
#if GREENLET_USE_CFRAME
/* OK, we need it, we're about to switch greenlets, save the state. */
/*
See green_new(). This is a stack-allocated variable used
while *self* is in PyObject_Call().
We want to defer copying the state info until we're sure
we need it and are in a stable place to do so.
*/
_PyCFrame trace_info;
this->python_state.set_new_cframe(trace_info);
#endif
/* start the greenlet */
ThreadState& thread_state = GET_THREAD_STATE().state();
this->stack_state = StackState(mark,
thread_state.borrow_current()->stack_state);
this->python_state.set_initial_state(PyThreadState_GET());
this->exception_state.clear();
this->_main_greenlet = thread_state.get_main_greenlet();
/* perform the initial switch */
switchstack_result_t err = this->g_switchstack();
/* returns twice!
The 1st time with ``err == 1``: we are in the new greenlet.
This one owns a greenlet that used to be current.
The 2nd time with ``err <= 0``: back in the caller's
greenlet; this happens if the child finishes or switches
explicitly to us. Either way, the ``err`` variable is
created twice at the same memory location, but possibly
having different ``origin`` values. Note that it's not
constructed for the second time until the switch actually happens.
*/
if (err.status == 1) {
// In the new greenlet.
// This never returns! Calling inner_bootstrap steals
// the contents of our run object within this stack frame, so
// it is not valid to do anything with it.
try {
this->inner_bootstrap(err.origin_greenlet.relinquish_ownership(),
run.relinquish_ownership());
}
// Getting a C++ exception here isn't good. It's probably a
// bug in the underlying greenlet, meaning it's probably a
// C++ extension. We're going to abort anyway, but try to
// display some nice information *if* possible. Some obscure
// platforms don't properly support this (old 32-bit Arm, see see
// https://github.com/python-greenlet/greenlet/issues/385); that's not
// great, but should usually be OK because, as mentioned above, we're
// terminating anyway.
//
// The catching is tested by
// ``test_cpp.CPPTests.test_unhandled_exception_in_greenlet_aborts``.
//
// PyErrOccurred can theoretically be thrown by
// inner_bootstrap() -> g_switch_finish(), but that should
// never make it back to here. It is a std::exception and
// would be caught if it is.
catch (const std::exception& e) {
std::string base = "greenlet: Unhandled C++ exception: ";
base += e.what();
Py_FatalError(base.c_str());
}
catch (...) {
// Some compilers/runtimes use exceptions internally.
// It appears that GCC on Linux with libstdc++ throws an
// exception internally at process shutdown time to unwind
// stacks and clean up resources. Depending on exactly
// where we are when the process exits, that could result
// in an unknown exception getting here. If we
// Py_FatalError() or abort() here, we interfere with
// orderly process shutdown. Throwing the exception on up
// is the right thing to do.
//
// gevent's ``examples/dns_mass_resolve.py`` demonstrates this.
#ifndef NDEBUG
fprintf(stderr,
"greenlet: inner_bootstrap threw unknown exception; "
"is the process terminating?\n");
#endif
throw;
}
Py_FatalError("greenlet: inner_bootstrap returned with no exception.\n");
}
// In contrast, notice that we're keeping the origin greenlet
// around as an owned reference; we need it to call the trace
// function for the switch back into the parent. It was only
// captured at the time the switch actually happened, though,
// so we haven't been keeping an extra reference around this
// whole time.
/* back in the parent */
if (err.status < 0) {
/* start failed badly, restore greenlet state */
this->stack_state = StackState();
this->_main_greenlet.CLEAR();
// CAUTION: This may run arbitrary Python code.
run.CLEAR(); // inner_bootstrap didn't run, we own the reference.
}
// In the success case, the spawned code (inner_bootstrap) will
// take care of decrefing this, so we relinquish ownership so as
// to not double-decref.
run.relinquish_ownership();
return err;
}
void
UserGreenlet::inner_bootstrap(PyGreenlet* origin_greenlet, PyObject* run)
{
// The arguments here would be another great place for move.
// As it is, we take them as a reference so that when we clear
// them we clear what's on the stack above us. Do that NOW, and
// without using a C++ RAII object,
// so there's no way that exiting the parent frame can clear it,
// or we clear it unexpectedly. This arises in the context of the
// interpreter shutting down. See https://github.com/python-greenlet/greenlet/issues/325
//PyObject* run = _run.relinquish_ownership();
/* in the new greenlet */
assert(this->thread_state()->borrow_current() == BorrowedGreenlet(this->_self));
// C++ exceptions cannot propagate to the parent greenlet from
// here. (TODO: Do we need a catch(...) clause, perhaps on the
// function itself? ALl we could do is terminate the program.)
// NOTE: On 32-bit Windows, the call chain is extremely
// important here in ways that are subtle, having to do with
// the depth of the SEH list. The call to restore it MUST NOT
// add a new SEH handler to the list, or we'll restore it to
// the wrong thing.
this->thread_state()->restore_exception_state();
/* stack variables from above are no good and also will not unwind! */
// EXCEPT: That can't be true, we access run, among others, here.
this->stack_state.set_active(); /* running */
// We're about to possibly run Python code again, which
// could switch back/away to/from us, so we need to grab the
// arguments locally.
SwitchingArgs args;
args <<= this->args();
assert(!this->args());
// XXX: We could clear this much earlier, right?
// Or would that introduce the possibility of running Python
// code when we don't want to?
// CAUTION: This may run arbitrary Python code.
this->_run_callable.CLEAR();
// The first switch we need to manually call the trace
// function here instead of in g_switch_finish, because we
// never return there.
if (OwnedObject tracefunc = this->thread_state()->get_tracefunc()) {
OwnedGreenlet trace_origin;
trace_origin = origin_greenlet;
try {
g_calltrace(tracefunc,
args ? mod_globs->event_switch : mod_globs->event_throw,
trace_origin,
this->_self);
}
catch (const PyErrOccurred&) {
/* Turn trace errors into switch throws */
args.CLEAR();
}
}
// We no longer need the origin, it was only here for
// tracing.
// We may never actually exit this stack frame so we need
// to explicitly clear it.
// This could run Python code and switch.
Py_CLEAR(origin_greenlet);
OwnedObject result;
if (!args) {
/* pending exception */
result = NULL;
}
else {
/* call g.run(*args, **kwargs) */
// This could result in further switches
try {
//result = run.PyCall(args.args(), args.kwargs());
// CAUTION: Just invoking this, before the function even
// runs, may cause memory allocations, which may trigger
// GC, which may run arbitrary Python code.
result = OwnedObject::consuming(PyObject_Call(run, args.args().borrow(), args.kwargs().borrow()));
}
catch (...) {
// Unhandled C++ exception!
// If we declare ourselves as noexcept, if we don't catch
// this here, most platforms will just abort() the
// process. But on 64-bit Windows with older versions of
// the C runtime, this can actually corrupt memory and
// just return. We see this when compiling with the
// Windows 7.0 SDK targeting Windows Server 2008, but not
// when using the Appveyor Visual Studio 2019 image. So
// this currently only affects Python 2.7 on Windows 64.
// That is, the tests pass and the runtime aborts
// everywhere else.
//
// However, if we catch it and try to continue with a
// Python error, then all Windows 64 bit platforms corrupt
// memory. So all we can do is manually abort, hopefully
// with a good error message. (Note that the above was
// tested WITHOUT the `/EHr` switch being used at compile
// time, so MSVC may have "optimized" out important
// checking. Using that switch, we may be in a better
// place in terms of memory corruption.) But sometimes it
// can't be caught here at all, which is confusing but not
// terribly surprising; so again, the G_NOEXCEPT_WIN32
// plus "/EHr".
//
// Hopefully the basic C stdlib is still functional enough
// for us to at least print an error.
//
// It gets more complicated than that, though, on some
// platforms, specifically at least Linux/gcc/libstdc++. They use
// an exception to unwind the stack when a background
// thread exits. (See comments about noexcept.) So this
// may not actually represent anything untoward. On those
// platforms we allow throws of this to propagate, or
// attempt to anyway.
# if defined(WIN32) || defined(_WIN32)
Py_FatalError(
"greenlet: Unhandled C++ exception from a greenlet run function. "
"Because memory is likely corrupted, terminating process.");
std::abort();
#else
throw;
#endif
}
}
// These lines may run arbitrary code
args.CLEAR();
Py_CLEAR(run);
if (!result
&& mod_globs->PyExc_GreenletExit.PyExceptionMatches()
&& (this->args())) {
// This can happen, for example, if our only reference
// goes away after we switch back to the parent.
// See test_dealloc_switch_args_not_lost
PyErrPieces clear_error;
result <<= this->args();
result = single_result(result);
}
this->release_args();
this->python_state.did_finish(PyThreadState_GET());
result = g_handle_exit(result);
assert(this->thread_state()->borrow_current() == this->_self);
/* jump back to parent */
this->stack_state.set_inactive(); /* dead */
// TODO: Can we decref some things here? Release our main greenlet
// and maybe parent?
for (Greenlet* parent = this->_parent;
parent;
parent = parent->parent()) {
// We need to somewhere consume a reference to
// the result; in most cases we'll never have control
// back in this stack frame again. Calling
// green_switch actually adds another reference!
// This would probably be clearer with a specific API
// to hand results to the parent.
parent->args() <<= result;
assert(!result);
// The parent greenlet now owns the result; in the
// typical case we'll never get back here to assign to
// result and thus release the reference.
try {
result = parent->g_switch();
}
catch (const PyErrOccurred&) {
// Ignore, keep passing the error on up.
}
/* Return here means switch to parent failed,
* in which case we throw *current* exception
* to the next parent in chain.
*/
assert(!result);
}
/* We ran out of parents, cannot continue */
PyErr_WriteUnraisable(this->self().borrow_o());
Py_FatalError("greenlet: ran out of parent greenlets while propagating exception; "
"cannot continue");
std::abort();
}
void
UserGreenlet::run(const BorrowedObject nrun)
{
if (this->started()) {
throw AttributeError(
"run cannot be set "
"after the start of the greenlet");
}
this->_run_callable = nrun;
}
const OwnedGreenlet
UserGreenlet::parent() const
{
return this->_parent;
}
void
UserGreenlet::parent(const BorrowedObject raw_new_parent)
{
if (!raw_new_parent) {
throw AttributeError("can't delete attribute");
}
BorrowedMainGreenlet main_greenlet_of_new_parent;
BorrowedGreenlet new_parent(raw_new_parent.borrow()); // could
// throw
// TypeError!
for (BorrowedGreenlet p = new_parent; p; p = p->parent()) {
if (p == this->self()) {
throw ValueError("cyclic parent chain");
}
main_greenlet_of_new_parent = p->main_greenlet();
}
if (!main_greenlet_of_new_parent) {
throw ValueError("parent must not be garbage collected");
}
if (this->started()
&& this->_main_greenlet != main_greenlet_of_new_parent) {
throw ValueError("parent cannot be on a different thread");
}
this->_parent = new_parent;
}
void
UserGreenlet::murder_in_place()
{
this->_main_greenlet.CLEAR();
Greenlet::murder_in_place();
}
bool
UserGreenlet::belongs_to_thread(const ThreadState* thread_state) const
{
return Greenlet::belongs_to_thread(thread_state) && this->_main_greenlet == thread_state->borrow_main_greenlet();
}
int
UserGreenlet::tp_traverse(visitproc visit, void* arg)
{
Py_VISIT(this->_parent.borrow_o());
Py_VISIT(this->_main_greenlet.borrow_o());
Py_VISIT(this->_run_callable.borrow_o());
return Greenlet::tp_traverse(visit, arg);
}
int
UserGreenlet::tp_clear()
{
Greenlet::tp_clear();
this->_parent.CLEAR();
this->_main_greenlet.CLEAR();
this->_run_callable.CLEAR();
return 0;
}
UserGreenlet::ParentIsCurrentGuard::ParentIsCurrentGuard(UserGreenlet* p,
const ThreadState& thread_state)
: oldparent(p->_parent),
greenlet(p)
{
p->_parent = thread_state.get_current();
}
UserGreenlet::ParentIsCurrentGuard::~ParentIsCurrentGuard()
{
this->greenlet->_parent = oldparent;
oldparent.CLEAR();
}
}; //namespace greenlet
#endif

View File

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
"""
The root of the greenlet package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = [
'__version__',
'_C_API',
'GreenletExit',
'error',
'getcurrent',
'greenlet',
'gettrace',
'settrace',
]
# pylint:disable=no-name-in-module
###
# Metadata
###
__version__ = '3.2.0'
from ._greenlet import _C_API # pylint:disable=no-name-in-module
###
# Exceptions
###
from ._greenlet import GreenletExit
from ._greenlet import error
###
# greenlets
###
from ._greenlet import getcurrent
from ._greenlet import greenlet
###
# tracing
###
try:
from ._greenlet import gettrace
from ._greenlet import settrace
except ImportError:
# Tracing wasn't supported.
# XXX: The option to disable it was removed in 1.0,
# so this branch should be dead code.
pass
###
# Constants
# These constants aren't documented and aren't recommended.
# In 1.0, USE_GC and USE_TRACING are always true, and USE_CONTEXT_VARS
# is the same as ``sys.version_info[:2] >= 3.7``
###
from ._greenlet import GREENLET_USE_CONTEXT_VARS # pylint:disable=unused-import
from ._greenlet import GREENLET_USE_GC # pylint:disable=unused-import
from ._greenlet import GREENLET_USE_TRACING # pylint:disable=unused-import
# Controlling the use of the gc module. Provisional API for this greenlet
# implementation in 2.0.
from ._greenlet import CLOCKS_PER_SEC # pylint:disable=unused-import
from ._greenlet import enable_optional_cleanup # pylint:disable=unused-import
from ._greenlet import get_clocks_used_doing_optional_cleanup # pylint:disable=unused-import
# Other APIS in the _greenlet module are for test support.

View File

@ -0,0 +1,320 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/* Format with:
* clang-format -i --style=file src/greenlet/greenlet.c
*
*
* Fix missing braces with:
* clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
*/
#include <cstdlib>
#include <string>
#include <algorithm>
#include <exception>
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h" // PyMemberDef
#include "greenlet_internal.hpp"
// Code after this point can assume access to things declared in stdint.h,
// including the fixed-width types. This goes for the platform-specific switch functions
// as well.
#include "greenlet_refs.hpp"
#include "greenlet_slp_switch.hpp"
#include "greenlet_thread_support.hpp"
#include "TGreenlet.hpp"
#include "TGreenletGlobals.cpp"
#include "TGreenlet.cpp"
#include "TMainGreenlet.cpp"
#include "TUserGreenlet.cpp"
#include "TBrokenGreenlet.cpp"
#include "TExceptionState.cpp"
#include "TPythonState.cpp"
#include "TStackState.cpp"
#include "TThreadState.hpp"
#include "TThreadStateCreator.hpp"
#include "TThreadStateDestroy.cpp"
#include "PyGreenlet.cpp"
#include "PyGreenletUnswitchable.cpp"
#include "CObjects.cpp"
using greenlet::LockGuard;
using greenlet::LockInitError;
using greenlet::PyErrOccurred;
using greenlet::Require;
using greenlet::g_handle_exit;
using greenlet::single_result;
using greenlet::Greenlet;
using greenlet::UserGreenlet;
using greenlet::MainGreenlet;
using greenlet::BrokenGreenlet;
using greenlet::ThreadState;
using greenlet::PythonState;
// ******* Implementation of things from included files
template<typename T, greenlet::refs::TypeChecker TC>
greenlet::refs::_BorrowedGreenlet<T, TC>& greenlet::refs::_BorrowedGreenlet<T, TC>::operator=(const greenlet::refs::BorrowedObject& other)
{
this->_set_raw_pointer(static_cast<PyObject*>(other));
return *this;
}
template <typename T, greenlet::refs::TypeChecker TC>
inline greenlet::refs::_BorrowedGreenlet<T, TC>::operator Greenlet*() const noexcept
{
if (!this->p) {
return nullptr;
}
return reinterpret_cast<PyGreenlet*>(this->p)->pimpl;
}
template<typename T, greenlet::refs::TypeChecker TC>
greenlet::refs::_BorrowedGreenlet<T, TC>::_BorrowedGreenlet(const BorrowedObject& p)
: BorrowedReference<T, TC>(nullptr)
{
this->_set_raw_pointer(p.borrow());
}
template <typename T, greenlet::refs::TypeChecker TC>
inline greenlet::refs::_OwnedGreenlet<T, TC>::operator Greenlet*() const noexcept
{
if (!this->p) {
return nullptr;
}
return reinterpret_cast<PyGreenlet*>(this->p)->pimpl;
}
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wmissing-field-initializers"
# pragma clang diagnostic ignored "-Wwritable-strings"
#elif defined(__GNUC__)
# pragma GCC diagnostic push
// warning: ISO C++ forbids converting a string constant to char*
// (The python APIs aren't const correct and accept writable char*)
# pragma GCC diagnostic ignored "-Wwrite-strings"
#endif
/***********************************************************
A PyGreenlet is a range of C stack addresses that must be
saved and restored in such a way that the full range of the
stack contains valid data when we switch to it.
Stack layout for a greenlet:
| ^^^ |
| older data |
| |
stack_stop . |_______________|
. | |
. | greenlet data |
. | in stack |
. * |_______________| . . _____________ stack_copy + stack_saved
. | | | |
. | data | |greenlet data|
. | unrelated | | saved |
. | to | | in heap |
stack_start . | this | . . |_____________| stack_copy
| greenlet |
| |
| newer data |
| vvv |
Note that a greenlet's stack data is typically partly at its correct
place in the stack, and partly saved away in the heap, but always in
the above configuration: two blocks, the more recent one in the heap
and the older one still in the stack (either block may be empty).
Greenlets are chained: each points to the previous greenlet, which is
the one that owns the data currently in the C stack above my
stack_stop. The currently running greenlet is the first element of
this chain. The main (initial) greenlet is the last one. Greenlets
whose stack is entirely in the heap can be skipped from the chain.
The chain is not related to execution order, but only to the order
in which bits of C stack happen to belong to greenlets at a particular
point in time.
The main greenlet doesn't have a stack_stop: it is responsible for the
complete rest of the C stack, and we don't know where it begins. We
use (char*) -1, the largest possible address.
States:
stack_stop == NULL && stack_start == NULL: did not start yet
stack_stop != NULL && stack_start == NULL: already finished
stack_stop != NULL && stack_start != NULL: active
The running greenlet's stack_start is undefined but not NULL.
***********************************************************/
/***********************************************************/
/* Some functions must not be inlined:
* slp_restore_state, when inlined into slp_switch might cause
it to restore stack over its own local variables
* slp_save_state, when inlined would add its own local
variables to the saved stack, wasting space
* slp_switch, cannot be inlined for obvious reasons
* g_initialstub, when inlined would receive a pointer into its
own stack frame, leading to incomplete stack save/restore
g_initialstub is a member function and declared virtual so that the
compiler always calls it through a vtable.
slp_save_state and slp_restore_state are also member functions. They
are called from trampoline functions that themselves are declared as
not eligible for inlining.
*/
extern "C" {
static int GREENLET_NOINLINE(slp_save_state_trampoline)(char* stackref)
{
return switching_thread_state->slp_save_state(stackref);
}
static void GREENLET_NOINLINE(slp_restore_state_trampoline)()
{
switching_thread_state->slp_restore_state();
}
}
/***********************************************************/
#include "PyModule.cpp"
static PyObject*
greenlet_internal_mod_init() noexcept
{
static void* _PyGreenlet_API[PyGreenlet_API_pointers];
try {
CreatedModule m(greenlet_module_def);
Require(PyType_Ready(&PyGreenlet_Type));
Require(PyType_Ready(&PyGreenletUnswitchable_Type));
mod_globs = new greenlet::GreenletGlobals;
ThreadState::init();
m.PyAddObject("greenlet", PyGreenlet_Type);
m.PyAddObject("UnswitchableGreenlet", PyGreenletUnswitchable_Type);
m.PyAddObject("error", mod_globs->PyExc_GreenletError);
m.PyAddObject("GreenletExit", mod_globs->PyExc_GreenletExit);
m.PyAddObject("GREENLET_USE_GC", 1);
m.PyAddObject("GREENLET_USE_TRACING", 1);
m.PyAddObject("GREENLET_USE_CONTEXT_VARS", 1L);
m.PyAddObject("GREENLET_USE_STANDARD_THREADING", 1L);
OwnedObject clocks_per_sec = OwnedObject::consuming(PyLong_FromSsize_t(CLOCKS_PER_SEC));
m.PyAddObject("CLOCKS_PER_SEC", clocks_per_sec);
/* also publish module-level data as attributes of the greentype. */
// XXX: This is weird, and enables a strange pattern of
// confusing the class greenlet with the module greenlet; with
// the exception of (possibly) ``getcurrent()``, this
// shouldn't be encouraged so don't add new items here.
for (const char* const* p = copy_on_greentype; *p; p++) {
OwnedObject o = m.PyRequireAttr(*p);
PyDict_SetItemString(PyGreenlet_Type.tp_dict, *p, o.borrow());
}
/*
* Expose C API
*/
/* types */
_PyGreenlet_API[PyGreenlet_Type_NUM] = (void*)&PyGreenlet_Type;
/* exceptions */
_PyGreenlet_API[PyExc_GreenletError_NUM] = (void*)mod_globs->PyExc_GreenletError;
_PyGreenlet_API[PyExc_GreenletExit_NUM] = (void*)mod_globs->PyExc_GreenletExit;
/* methods */
_PyGreenlet_API[PyGreenlet_New_NUM] = (void*)PyGreenlet_New;
_PyGreenlet_API[PyGreenlet_GetCurrent_NUM] = (void*)PyGreenlet_GetCurrent;
_PyGreenlet_API[PyGreenlet_Throw_NUM] = (void*)PyGreenlet_Throw;
_PyGreenlet_API[PyGreenlet_Switch_NUM] = (void*)PyGreenlet_Switch;
_PyGreenlet_API[PyGreenlet_SetParent_NUM] = (void*)PyGreenlet_SetParent;
/* Previously macros, but now need to be functions externally. */
_PyGreenlet_API[PyGreenlet_MAIN_NUM] = (void*)Extern_PyGreenlet_MAIN;
_PyGreenlet_API[PyGreenlet_STARTED_NUM] = (void*)Extern_PyGreenlet_STARTED;
_PyGreenlet_API[PyGreenlet_ACTIVE_NUM] = (void*)Extern_PyGreenlet_ACTIVE;
_PyGreenlet_API[PyGreenlet_GET_PARENT_NUM] = (void*)Extern_PyGreenlet_GET_PARENT;
/* XXX: Note that our module name is ``greenlet._greenlet``, but for
backwards compatibility with existing C code, we need the _C_API to
be directly in greenlet.
*/
const NewReference c_api_object(Require(
PyCapsule_New(
(void*)_PyGreenlet_API,
"greenlet._C_API",
NULL)));
m.PyAddObject("_C_API", c_api_object);
assert(c_api_object.REFCNT() == 2);
// cerr << "Sizes:"
// << "\n\tGreenlet : " << sizeof(Greenlet)
// << "\n\tUserGreenlet : " << sizeof(UserGreenlet)
// << "\n\tMainGreenlet : " << sizeof(MainGreenlet)
// << "\n\tExceptionState : " << sizeof(greenlet::ExceptionState)
// << "\n\tPythonState : " << sizeof(greenlet::PythonState)
// << "\n\tStackState : " << sizeof(greenlet::StackState)
// << "\n\tSwitchingArgs : " << sizeof(greenlet::SwitchingArgs)
// << "\n\tOwnedObject : " << sizeof(greenlet::refs::OwnedObject)
// << "\n\tBorrowedObject : " << sizeof(greenlet::refs::BorrowedObject)
// << "\n\tPyGreenlet : " << sizeof(PyGreenlet)
// << endl;
return m.borrow(); // But really it's the main reference.
}
catch (const LockInitError& e) {
PyErr_SetString(PyExc_MemoryError, e.what());
return NULL;
}
catch (const PyErrOccurred&) {
return NULL;
}
}
extern "C" {
PyMODINIT_FUNC
PyInit__greenlet(void)
{
return greenlet_internal_mod_init();
}
}; // extern C
#ifdef __clang__
# pragma clang diagnostic pop
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
#endif

View File

@ -0,0 +1,164 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
/* Greenlet object interface */
#ifndef Py_GREENLETOBJECT_H
#define Py_GREENLETOBJECT_H
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
/* This is deprecated and undocumented. It does not change. */
#define GREENLET_VERSION "1.0.0"
#ifndef GREENLET_MODULE
#define implementation_ptr_t void*
#endif
typedef struct _greenlet {
PyObject_HEAD
PyObject* weakreflist;
PyObject* dict;
implementation_ptr_t pimpl;
} PyGreenlet;
#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type))
/* C API functions */
/* Total number of symbols that are exported */
#define PyGreenlet_API_pointers 12
#define PyGreenlet_Type_NUM 0
#define PyExc_GreenletError_NUM 1
#define PyExc_GreenletExit_NUM 2
#define PyGreenlet_New_NUM 3
#define PyGreenlet_GetCurrent_NUM 4
#define PyGreenlet_Throw_NUM 5
#define PyGreenlet_Switch_NUM 6
#define PyGreenlet_SetParent_NUM 7
#define PyGreenlet_MAIN_NUM 8
#define PyGreenlet_STARTED_NUM 9
#define PyGreenlet_ACTIVE_NUM 10
#define PyGreenlet_GET_PARENT_NUM 11
#ifndef GREENLET_MODULE
/* This section is used by modules that uses the greenlet C API */
static void** _PyGreenlet_API = NULL;
# define PyGreenlet_Type \
(*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM])
# define PyExc_GreenletError \
((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM])
# define PyExc_GreenletExit \
((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM])
/*
* PyGreenlet_New(PyObject *args)
*
* greenlet.greenlet(run, parent=None)
*/
# define PyGreenlet_New \
(*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \
_PyGreenlet_API[PyGreenlet_New_NUM])
/*
* PyGreenlet_GetCurrent(void)
*
* greenlet.getcurrent()
*/
# define PyGreenlet_GetCurrent \
(*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM])
/*
* PyGreenlet_Throw(
* PyGreenlet *greenlet,
* PyObject *typ,
* PyObject *val,
* PyObject *tb)
*
* g.throw(...)
*/
# define PyGreenlet_Throw \
(*(PyObject * (*)(PyGreenlet * self, \
PyObject * typ, \
PyObject * val, \
PyObject * tb)) \
_PyGreenlet_API[PyGreenlet_Throw_NUM])
/*
* PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args)
*
* g.switch(*args, **kwargs)
*/
# define PyGreenlet_Switch \
(*(PyObject * \
(*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \
_PyGreenlet_API[PyGreenlet_Switch_NUM])
/*
* PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent)
*
* g.parent = new_parent
*/
# define PyGreenlet_SetParent \
(*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \
_PyGreenlet_API[PyGreenlet_SetParent_NUM])
/*
* PyGreenlet_GetParent(PyObject* greenlet)
*
* return greenlet.parent;
*
* This could return NULL even if there is no exception active.
* If it does not return NULL, you are responsible for decrementing the
* reference count.
*/
# define PyGreenlet_GetParent \
(*(PyGreenlet* (*)(PyGreenlet*)) \
_PyGreenlet_API[PyGreenlet_GET_PARENT_NUM])
/*
* deprecated, undocumented alias.
*/
# define PyGreenlet_GET_PARENT PyGreenlet_GetParent
# define PyGreenlet_MAIN \
(*(int (*)(PyGreenlet*)) \
_PyGreenlet_API[PyGreenlet_MAIN_NUM])
# define PyGreenlet_STARTED \
(*(int (*)(PyGreenlet*)) \
_PyGreenlet_API[PyGreenlet_STARTED_NUM])
# define PyGreenlet_ACTIVE \
(*(int (*)(PyGreenlet*)) \
_PyGreenlet_API[PyGreenlet_ACTIVE_NUM])
/* Macro that imports greenlet and initializes C API */
/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we
keep the older definition to be sure older code that might have a copy of
the header still works. */
# define PyGreenlet_Import() \
{ \
_PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \
}
#endif /* GREENLET_MODULE */
#ifdef __cplusplus
}
#endif
#endif /* !Py_GREENLETOBJECT_H */

View File

@ -0,0 +1,63 @@
#ifndef GREENLET_ALLOCATOR_HPP
#define GREENLET_ALLOCATOR_HPP
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <memory>
#include "greenlet_compiler_compat.hpp"
namespace greenlet
{
// This allocator is stateless; all instances are identical.
// It can *ONLY* be used when we're sure we're holding the GIL
// (Python's allocators require the GIL).
template <class T>
struct PythonAllocator : public std::allocator<T> {
PythonAllocator(const PythonAllocator& UNUSED(other))
: std::allocator<T>()
{
}
PythonAllocator(const std::allocator<T> other)
: std::allocator<T>(other)
{}
template <class U>
PythonAllocator(const std::allocator<U>& other)
: std::allocator<T>(other)
{
}
PythonAllocator() : std::allocator<T>() {}
T* allocate(size_t number_objects, const void* UNUSED(hint)=0)
{
void* p;
if (number_objects == 1)
p = PyObject_Malloc(sizeof(T));
else
p = PyMem_Malloc(sizeof(T) * number_objects);
return static_cast<T*>(p);
}
void deallocate(T* t, size_t n)
{
void* p = t;
if (n == 1) {
PyObject_Free(p);
}
else
PyMem_Free(p);
}
// This member is deprecated in C++17 and removed in C++20
template< class U >
struct rebind {
typedef PythonAllocator<U> other;
};
};
}
#endif

View File

@ -0,0 +1,98 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
#ifndef GREENLET_COMPILER_COMPAT_HPP
#define GREENLET_COMPILER_COMPAT_HPP
/**
* Definitions to aid with compatibility with different compilers.
*
* .. caution:: Use extreme care with noexcept.
* Some compilers and runtimes, specifically gcc/libgcc/libstdc++ on
* Linux, implement stack unwinding by throwing an uncatchable
* exception, one that specifically does not appear to be an active
* exception to the rest of the runtime. If this happens while we're in a noexcept function,
* we have violated our dynamic exception contract, and so the runtime
* will call std::terminate(), which kills the process with the
* unhelpful message "terminate called without an active exception".
*
* This has happened in this scenario: A background thread is running
* a greenlet that has made a native call and released the GIL.
* Meanwhile, the main thread finishes and starts shutting down the
* interpreter. When the background thread is scheduled again and
* attempts to obtain the GIL, it notices that the interpreter is
* exiting and calls ``pthread_exit()``. This in turn starts to unwind
* the stack by throwing that exception. But we had the ``PyCall``
* functions annotated as noexcept, so the runtime terminated us.
*
* #2 0x00007fab26fec2b7 in std::terminate() () from /lib/x86_64-linux-gnu/libstdc++.so.6
* #3 0x00007fab26febb3c in __gxx_personality_v0 () from /lib/x86_64-linux-gnu/libstdc++.so.6
* #4 0x00007fab26f34de6 in ?? () from /lib/x86_64-linux-gnu/libgcc_s.so.1
* #6 0x00007fab276a34c6 in __GI___pthread_unwind at ./nptl/unwind.c:130
* #7 0x00007fab2769bd3a in __do_cancel () at ../sysdeps/nptl/pthreadP.h:280
* #8 __GI___pthread_exit (value=value@entry=0x0) at ./nptl/pthread_exit.c:36
* #9 0x000000000052e567 in PyThread_exit_thread () at ../Python/thread_pthread.h:370
* #10 0x00000000004d60b5 in take_gil at ../Python/ceval_gil.h:224
* #11 0x00000000004d65f9 in PyEval_RestoreThread at ../Python/ceval.c:467
* #12 0x000000000060cce3 in setipaddr at ../Modules/socketmodule.c:1203
* #13 0x00000000006101cd in socket_gethostbyname
*/
#include <cstdint>
# define G_NO_COPIES_OF_CLS(Cls) private: \
Cls(const Cls& other) = delete; \
Cls& operator=(const Cls& other) = delete
# define G_NO_ASSIGNMENT_OF_CLS(Cls) private: \
Cls& operator=(const Cls& other) = delete
# define G_NO_COPY_CONSTRUCTOR_OF_CLS(Cls) private: \
Cls(const Cls& other) = delete;
// CAUTION: MSVC is stupidly picky:
//
// "The compiler ignores, without warning, any __declspec keywords
// placed after * or & and in front of the variable identifier in a
// declaration."
// (https://docs.microsoft.com/en-us/cpp/cpp/declspec?view=msvc-160)
//
// So pointer return types must be handled differently (because of the
// trailing *), or you get inscrutable compiler warnings like "error
// C2059: syntax error: ''"
//
// In C++ 11, there is a standard syntax for attributes, and
// GCC defines an attribute to use with this: [[gnu:noinline]].
// In the future, this is expected to become standard.
#if defined(__GNUC__) || defined(__clang__)
/* We used to check for GCC 4+ or 3.4+, but those compilers are
laughably out of date. Just assume they support it. */
# define GREENLET_NOINLINE(name) __attribute__((noinline)) name
# define GREENLET_NOINLINE_P(rtype, name) rtype __attribute__((noinline)) name
# define UNUSED(x) UNUSED_ ## x __attribute__((__unused__))
#elif defined(_MSC_VER)
/* We used to check for && (_MSC_VER >= 1300) but that's also out of date. */
# define GREENLET_NOINLINE(name) __declspec(noinline) name
# define GREENLET_NOINLINE_P(rtype, name) __declspec(noinline) rtype name
# define UNUSED(x) UNUSED_ ## x
#endif
#if defined(_MSC_VER)
# define G_NOEXCEPT_WIN32 noexcept
#else
# define G_NOEXCEPT_WIN32
#endif
#if defined(__GNUC__) && defined(__POWERPC__) && defined(__APPLE__)
// 32-bit PPC/MacOSX. Only known to be tested on unreleased versions
// of macOS 10.6 using a macports build gcc 14. It appears that
// running C++ destructors of thread-local variables is broken.
// See https://github.com/python-greenlet/greenlet/pull/419
# define GREENLET_BROKEN_THREAD_LOCAL_CLEANUP_JUST_LEAK 1
#else
# define GREENLET_BROKEN_THREAD_LOCAL_CLEANUP_JUST_LEAK 0
#endif
#endif

View File

@ -0,0 +1,148 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
#ifndef GREENLET_CPYTHON_COMPAT_H
#define GREENLET_CPYTHON_COMPAT_H
/**
* Helpers for compatibility with multiple versions of CPython.
*/
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#if PY_VERSION_HEX >= 0x30A00B1
# define GREENLET_PY310 1
#else
# define GREENLET_PY310 0
#endif
/*
Python 3.10 beta 1 changed tstate->use_tracing to a nested cframe member.
See https://github.com/python/cpython/pull/25276
We have to save and restore this as well.
Python 3.13 removed PyThreadState.cframe (GH-108035).
*/
#if GREENLET_PY310 && PY_VERSION_HEX < 0x30D0000
# define GREENLET_USE_CFRAME 1
#else
# define GREENLET_USE_CFRAME 0
#endif
#if PY_VERSION_HEX >= 0x30B00A4
/*
Greenlet won't compile on anything older than Python 3.11 alpha 4 (see
https://bugs.python.org/issue46090). Summary of breaking internal changes:
- Python 3.11 alpha 1 changed how frame objects are represented internally.
- https://github.com/python/cpython/pull/30122
- Python 3.11 alpha 3 changed how recursion limits are stored.
- https://github.com/python/cpython/pull/29524
- Python 3.11 alpha 4 changed how exception state is stored. It also includes a
change to help greenlet save and restore the interpreter frame "data stack".
- https://github.com/python/cpython/pull/30122
- https://github.com/python/cpython/pull/30234
*/
# define GREENLET_PY311 1
#else
# define GREENLET_PY311 0
#endif
#if PY_VERSION_HEX >= 0x30C0000
# define GREENLET_PY312 1
#else
# define GREENLET_PY312 0
#endif
#if PY_VERSION_HEX >= 0x30D0000
# define GREENLET_PY313 1
#else
# define GREENLET_PY313 0
#endif
#if PY_VERSION_HEX >= 0x30E0000
# define GREENLET_PY314 1
#else
# define GREENLET_PY314 0
#endif
#ifndef Py_SET_REFCNT
/* Py_REFCNT and Py_SIZE macros are converted to functions
https://bugs.python.org/issue39573 */
# define Py_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#endif
#ifndef _Py_DEC_REFTOTAL
/* _Py_DEC_REFTOTAL macro has been removed from Python 3.9 by:
https://github.com/python/cpython/commit/49932fec62c616ec88da52642339d83ae719e924
The symbol we use to replace it was removed by at least 3.12.
*/
# ifdef Py_REF_DEBUG
# if GREENLET_PY312
# define _Py_DEC_REFTOTAL
# else
# define _Py_DEC_REFTOTAL _Py_RefTotal--
# endif
# else
# define _Py_DEC_REFTOTAL
# endif
#endif
// Define these flags like Cython does if we're on an old version.
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_VERSION_TAG
#define Py_TPFLAGS_HAVE_VERSION_TAG 0
#endif
#define G_TPFLAGS_DEFAULT Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_VERSION_TAG | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_NEWBUFFER | Py_TPFLAGS_HAVE_GC
#if PY_VERSION_HEX < 0x03090000
// The official version only became available in 3.9
# define PyObject_GC_IsTracked(o) _PyObject_GC_IS_TRACKED(o)
#endif
// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2
#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
static inline void PyThreadState_EnterTracing(PyThreadState *tstate)
{
tstate->tracing++;
#if PY_VERSION_HEX >= 0x030A00A1
tstate->cframe->use_tracing = 0;
#else
tstate->use_tracing = 0;
#endif
}
#endif
// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2
#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
static inline void PyThreadState_LeaveTracing(PyThreadState *tstate)
{
tstate->tracing--;
int use_tracing = (tstate->c_tracefunc != NULL
|| tstate->c_profilefunc != NULL);
#if PY_VERSION_HEX >= 0x030A00A1
tstate->cframe->use_tracing = use_tracing;
#else
tstate->use_tracing = use_tracing;
#endif
}
#endif
#if !defined(Py_C_RECURSION_LIMIT) && defined(C_RECURSION_LIMIT)
# define Py_C_RECURSION_LIMIT C_RECURSION_LIMIT
#endif
#endif /* GREENLET_CPYTHON_COMPAT_H */

View File

@ -0,0 +1,171 @@
#ifndef GREENLET_EXCEPTIONS_HPP
#define GREENLET_EXCEPTIONS_HPP
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <stdexcept>
#include <string>
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-function"
#endif
namespace greenlet {
class PyErrOccurred : public std::runtime_error
{
public:
// CAUTION: In debug builds, may run arbitrary Python code.
static const PyErrOccurred
from_current()
{
assert(PyErr_Occurred());
#ifndef NDEBUG
// This is not exception safe, and
// not necessarily safe in general (what if it switches?)
// But we only do this in debug mode, where we are in
// tight control of what exceptions are getting raised and
// can prevent those issues.
// You can't call PyObject_Str with a pending exception.
PyObject* typ;
PyObject* val;
PyObject* tb;
PyErr_Fetch(&typ, &val, &tb);
PyObject* typs = PyObject_Str(typ);
PyObject* vals = PyObject_Str(val ? val : typ);
const char* typ_msg = PyUnicode_AsUTF8(typs);
const char* val_msg = PyUnicode_AsUTF8(vals);
PyErr_Restore(typ, val, tb);
std::string msg(typ_msg);
msg += ": ";
msg += val_msg;
PyErrOccurred ex(msg);
Py_XDECREF(typs);
Py_XDECREF(vals);
return ex;
#else
return PyErrOccurred();
#endif
}
PyErrOccurred() : std::runtime_error("")
{
assert(PyErr_Occurred());
}
PyErrOccurred(const std::string& msg) : std::runtime_error(msg)
{
assert(PyErr_Occurred());
}
PyErrOccurred(PyObject* exc_kind, const char* const msg)
: std::runtime_error(msg)
{
PyErr_SetString(exc_kind, msg);
}
PyErrOccurred(PyObject* exc_kind, const std::string msg)
: std::runtime_error(msg)
{
// This copies the c_str, so we don't have any lifetime
// issues to worry about.
PyErr_SetString(exc_kind, msg.c_str());
}
PyErrOccurred(PyObject* exc_kind,
const std::string msg, //This is the format
//string; that's not
//usually safe!
PyObject* borrowed_obj_one, PyObject* borrowed_obj_two)
: std::runtime_error(msg)
{
//This is designed specifically for the
//``check_switch_allowed`` function.
// PyObject_Str and PyObject_Repr are safe to call with
// NULL pointers; they return the string "<NULL>" in that
// case.
// This function always returns null.
PyErr_Format(exc_kind,
msg.c_str(),
borrowed_obj_one, borrowed_obj_two);
}
};
class TypeError : public PyErrOccurred
{
public:
TypeError(const char* const what)
: PyErrOccurred(PyExc_TypeError, what)
{
}
TypeError(const std::string what)
: PyErrOccurred(PyExc_TypeError, what)
{
}
};
class ValueError : public PyErrOccurred
{
public:
ValueError(const char* const what)
: PyErrOccurred(PyExc_ValueError, what)
{
}
};
class AttributeError : public PyErrOccurred
{
public:
AttributeError(const char* const what)
: PyErrOccurred(PyExc_AttributeError, what)
{
}
};
/**
* Calls `Py_FatalError` when constructed, so you can't actually
* throw this. It just makes static analysis easier.
*/
class PyFatalError : public std::runtime_error
{
public:
PyFatalError(const char* const msg)
: std::runtime_error(msg)
{
Py_FatalError(msg);
}
};
static inline PyObject*
Require(PyObject* p, const std::string& msg="")
{
if (!p) {
throw PyErrOccurred(msg);
}
return p;
};
static inline void
Require(const int retval)
{
if (retval < 0) {
throw PyErrOccurred();
}
};
};
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#endif

View File

@ -0,0 +1,107 @@
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
#ifndef GREENLET_INTERNAL_H
#define GREENLET_INTERNAL_H
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-function"
#endif
/**
* Implementation helpers.
*
* C++ templates and inline functions should go here.
*/
#define PY_SSIZE_T_CLEAN
#include "greenlet_compiler_compat.hpp"
#include "greenlet_cpython_compat.hpp"
#include "greenlet_exceptions.hpp"
#include "TGreenlet.hpp"
#include "greenlet_allocator.hpp"
#include <vector>
#include <string>
#define GREENLET_MODULE
struct _greenlet;
typedef struct _greenlet PyGreenlet;
namespace greenlet {
class ThreadState;
// We can't use the PythonAllocator for this, because we push to it
// from the thread state destructor, which doesn't have the GIL,
// and Python's allocators can only be called with the GIL.
typedef std::vector<ThreadState*> cleanup_queue_t;
};
#define implementation_ptr_t greenlet::Greenlet*
#include "greenlet.h"
void
greenlet::refs::MainGreenletExactChecker(void *p)
{
if (!p) {
return;
}
// We control the class of the main greenlet exactly.
if (Py_TYPE(p) != &PyGreenlet_Type) {
std::string err("MainGreenlet: Expected exactly a greenlet, not a ");
err += Py_TYPE(p)->tp_name;
throw greenlet::TypeError(err);
}
// Greenlets from dead threads no longer respond to main() with a
// true value; so in that case we need to perform an additional
// check.
Greenlet* g = static_cast<PyGreenlet*>(p)->pimpl;
if (g->main()) {
return;
}
if (!dynamic_cast<MainGreenlet*>(g)) {
std::string err("MainGreenlet: Expected exactly a main greenlet, not a ");
err += Py_TYPE(p)->tp_name;
throw greenlet::TypeError(err);
}
}
template <typename T, greenlet::refs::TypeChecker TC>
inline greenlet::Greenlet* greenlet::refs::_OwnedGreenlet<T, TC>::operator->() const noexcept
{
return reinterpret_cast<PyGreenlet*>(this->p)->pimpl;
}
template <typename T, greenlet::refs::TypeChecker TC>
inline greenlet::Greenlet* greenlet::refs::_BorrowedGreenlet<T, TC>::operator->() const noexcept
{
return reinterpret_cast<PyGreenlet*>(this->p)->pimpl;
}
#include <memory>
#include <stdexcept>
extern PyTypeObject PyGreenlet_Type;
/**
* Forward declarations needed in multiple files.
*/
static PyObject* green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs);
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#endif
// Local Variables:
// flycheck-clang-include-path: ("../../include" "/opt/local/Library/Frameworks/Python.framework/Versions/3.10/include/python3.10")
// End:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,99 @@
#ifndef GREENLET_SLP_SWITCH_HPP
#define GREENLET_SLP_SWITCH_HPP
#include "greenlet_compiler_compat.hpp"
#include "greenlet_refs.hpp"
/*
* the following macros are spliced into the OS/compiler
* specific code, in order to simplify maintenance.
*/
// We can save about 10% of the time it takes to switch greenlets if
// we thread the thread state through the slp_save_state() and the
// following slp_restore_state() calls from
// slp_switch()->g_switchstack() (which already needs to access it).
//
// However:
//
// that requires changing the prototypes and implementations of the
// switching functions. If we just change the prototype of
// slp_switch() to accept the argument and update the macros, without
// changing the implementation of slp_switch(), we get crashes on
// 64-bit Linux and 32-bit x86 (for reasons that aren't 100% clear);
// on the other hand, 64-bit macOS seems to be fine. Also, 64-bit
// windows is an issue because slp_switch is written fully in assembly
// and currently ignores its argument so some code would have to be
// adjusted there to pass the argument on to the
// ``slp_save_state_asm()`` function (but interestingly, because of
// the calling convention, the extra argument is just ignored and
// things function fine, albeit slower, if we just modify
// ``slp_save_state_asm`()` to fetch the pointer to pass to the
// macro.)
//
// Our compromise is to use a *glabal*, untracked, weak, pointer
// to the necessary thread state during the process of switching only.
// This is safe because we're protected by the GIL, and if we're
// running this code, the thread isn't exiting. This also nets us a
// 10-12% speed improvement.
static greenlet::Greenlet* volatile switching_thread_state = nullptr;
extern "C" {
static int GREENLET_NOINLINE(slp_save_state_trampoline)(char* stackref);
static void GREENLET_NOINLINE(slp_restore_state_trampoline)();
}
#define SLP_SAVE_STATE(stackref, stsizediff) \
do { \
assert(switching_thread_state); \
stackref += STACK_MAGIC; \
if (slp_save_state_trampoline((char*)stackref)) \
return -1; \
if (!switching_thread_state->active()) \
return 1; \
stsizediff = switching_thread_state->stack_start() - (char*)stackref; \
} while (0)
#define SLP_RESTORE_STATE() slp_restore_state_trampoline()
#define SLP_EVAL
extern "C" {
#define slp_switch GREENLET_NOINLINE(slp_switch)
#include "slp_platformselect.h"
}
#undef slp_switch
#ifndef STACK_MAGIC
# error \
"greenlet needs to be ported to this platform, or taught how to detect your compiler properly."
#endif /* !STACK_MAGIC */
#ifdef EXTERNAL_ASM
/* CCP addition: Make these functions, to be called from assembler.
* The token include file for the given platform should enable the
* EXTERNAL_ASM define so that this is included.
*/
extern "C" {
intptr_t
slp_save_state_asm(intptr_t* ref)
{
intptr_t diff;
SLP_SAVE_STATE(ref, diff);
return diff;
}
void
slp_restore_state_asm(void)
{
SLP_RESTORE_STATE();
}
extern int slp_switch(void);
};
#endif
#endif

View File

@ -0,0 +1,31 @@
#ifndef GREENLET_THREAD_SUPPORT_HPP
#define GREENLET_THREAD_SUPPORT_HPP
/**
* Defines various utility functions to help greenlet integrate well
* with threads. This used to be needed when we supported Python
* 2.7 on Windows, which used a very old compiler. We wrote an
* alternative implementation using Python APIs and POSIX or Windows
* APIs, but that's no longer needed. So this file is a shadow of its
* former self --- but may be needed in the future.
*/
#include <stdexcept>
#include <thread>
#include <mutex>
#include "greenlet_compiler_compat.hpp"
namespace greenlet {
typedef std::mutex Mutex;
typedef std::lock_guard<Mutex> LockGuard;
class LockInitError : public std::runtime_error
{
public:
LockInitError(const char* what) : std::runtime_error(what)
{};
};
};
#endif /* GREENLET_THREAD_SUPPORT_HPP */

View File

@ -0,0 +1,2 @@
call "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\vcvarsall.bat" amd64
ml64 /nologo /c /Fo switch_x64_masm.obj switch_x64_masm.asm

View File

@ -0,0 +1,124 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 07-Sep-16 Add clang support using x register naming. Fredrik Fornwall
* 13-Apr-13 Add support for strange GCC caller-save decisions
* 08-Apr-13 File creation. Michael Matz
*
* NOTES
*
* Simply save all callee saved registers
*
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", \
"x27", "x28", "x30" /* aka lr */, \
"v8", "v9", "v10", "v11", \
"v12", "v13", "v14", "v15"
/*
* Recall:
asm asm-qualifiers ( AssemblerTemplate
: OutputOperands
[ : InputOperands
[ : Clobbers ] ])
or (if asm-qualifiers contains 'goto')
asm asm-qualifiers ( AssemblerTemplate
: OutputOperands
: InputOperands
: Clobbers
: GotoLabels)
and OutputOperands are
[ [asmSymbolicName] ] constraint (cvariablename)
When a name is given, refer to it as ``%[the name]``.
When not given, ``%i`` where ``i`` is the zero-based index.
constraints starting with ``=`` means only writing; ``+`` means
reading and writing.
This is followed by ``r`` (must be register) or ``m`` (must be memory)
and these can be combined.
The ``cvariablename`` is actually an lvalue expression.
In AArch65, 31 general purpose registers. If named X0... they are
64-bit. If named W0... they are the bottom 32 bits of the
corresponding 64 bit register.
XZR and WZR are hardcoded to 0, and ignore writes.
Arguments are in X0..X7. C++ uses X0 for ``this``. X0 holds simple return
values (?)
Whenever a W register is written, the top half of the X register is zeroed.
*/
static int
slp_switch(void)
{
int err;
void *fp;
/* Windowz uses a 32-bit long on a 64-bit platform, unlike the rest of
the world, and in theory we can be compiled with GCC/llvm on 64-bit
windows. So we need a fixed-width type.
*/
int64_t *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("str x29, %0" : "=m"(fp) : : );
__asm__ ("mov %0, sp" : "=r" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"add sp,sp,%0\n"
"add x29,x29,%0\n"
:
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
/* SLP_SAVE_STATE macro contains some return statements
(of -1 and 1). It falls through only when
the return value of slp_save_state() is zero, which
is placed in x0.
In that case we (slp_switch) also want to return zero
(also in x0 of course).
Now, some GCC versions (seen with 4.8) think it's a
good idea to save/restore x0 around the call to
slp_restore_state(), instead of simply zeroing it
at the return below. But slp_restore_state
writes random values to the stack slot used for this
save/restore (from when it once was saved above in
SLP_SAVE_STATE, when it was still uninitialized), so
"restoring" that precious zero actually makes us
return random values. There are some ways to make
GCC not use that zero value in the normal return path
(e.g. making err volatile, but that costs a little
stack space), and the simplest is to call a function
that returns an unknown value (which happens to be zero),
so the saved/restored value is unused.
Thus, this line stores a 0 into the ``err`` variable
(which must be held in a register for this instruction,
of course). The ``w`` qualifier causes the instruction
to use W0 instead of X0, otherwise we get a warning
about a value size mismatch (because err is an int,
and aarch64 platforms are LP64: 32-bit int, 64 bit long
and pointer).
*/
__asm__ volatile ("mov %w0, #0" : "=r" (err));
}
__asm__ volatile ("ldr x29, %0" : : "m" (fp) :);
__asm__ volatile ("" : : : REGS_TO_SAVE);
return err;
}
#endif

View File

@ -0,0 +1,30 @@
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "$9", "$10", "$11", "$12", "$13", "$14", "$15", \
"$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9"
static int
slp_switch(void)
{
int ret;
long *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("mov $30, %0" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"addq $30, %0, $30\n\t"
: /* no outputs */
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("mov $31, %0" : "=r" (ret) : );
return ret;
}
#endif

View File

@ -0,0 +1,87 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 3-May-13 Ralf Schmitt <ralf@systemexit.de>
* Add support for strange GCC caller-save decisions
* (ported from switch_aarch64_gcc.h)
* 18-Aug-11 Alexey Borzenkov <snaury@gmail.com>
* Correctly save rbp, csr and cw
* 01-Apr-04 Hye-Shik Chang <perky@FreeBSD.org>
* Ported from i386 to amd64.
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for spark
* 31-Avr-02 Armin Rigo <arigo@ulb.ac.be>
* Added ebx, esi and edi register-saves.
* 01-Mar-02 Samual M. Rushing <rushing@ironport.com>
* Ported from i386.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
/* #define STACK_MAGIC 3 */
/* the above works fine with gcc 2.96, but 2.95.3 wants this */
#define STACK_MAGIC 0
#define REGS_TO_SAVE "r12", "r13", "r14", "r15"
static int
slp_switch(void)
{
int err;
void* rbp;
void* rbx;
unsigned int csr;
unsigned short cw;
/* This used to be declared 'register', but that does nothing in
modern compilers and is explicitly forbidden in some new
standards. */
long *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("fstcw %0" : "=m" (cw));
__asm__ volatile ("stmxcsr %0" : "=m" (csr));
__asm__ volatile ("movq %%rbp, %0" : "=m" (rbp));
__asm__ volatile ("movq %%rbx, %0" : "=m" (rbx));
__asm__ ("movq %%rsp, %0" : "=g" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"addq %0, %%rsp\n"
"addq %0, %%rbp\n"
:
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
__asm__ volatile ("xorq %%rax, %%rax" : "=a" (err));
}
__asm__ volatile ("movq %0, %%rbx" : : "m" (rbx));
__asm__ volatile ("movq %0, %%rbp" : : "m" (rbp));
__asm__ volatile ("ldmxcsr %0" : : "m" (csr));
__asm__ volatile ("fldcw %0" : : "m" (cw));
__asm__ volatile ("" : : : REGS_TO_SAVE);
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,79 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 14-Aug-06 File creation. Ported from Arm Thumb. Sylvain Baro
* 3-Sep-06 Commented out saving of r1-r3 (r4 already commented out) as I
* read that these do not need to be saved. Also added notes and
* errors related to the frame pointer. Richard Tew.
*
* NOTES
*
* It is not possible to detect if fp is used or not, so the supplied
* switch function needs to support it, so that you can remove it if
* it does not apply to you.
*
* POSSIBLE ERRORS
*
* "fp cannot be used in asm here"
*
* - Try commenting out "fp" in REGS_TO_SAVE.
*
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REG_SP "sp"
#define REG_SPSP "sp,sp"
#ifdef __thumb__
#define REG_FP "r7"
#define REG_FPFP "r7,r7"
#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r8", "r9", "r10", "r11", "lr"
#else
#define REG_FP "fp"
#define REG_FPFP "fp,fp"
#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r7", "r8", "r9", "r10", "lr"
#endif
#if defined(__SOFTFP__)
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL
#elif defined(__VFP_FP__)
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "d8", "d9", "d10", "d11", \
"d12", "d13", "d14", "d15"
#elif defined(__MAVERICK__)
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "mvf4", "mvf5", "mvf6", "mvf7", \
"mvf8", "mvf9", "mvf10", "mvf11", \
"mvf12", "mvf13", "mvf14", "mvf15"
#else
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "f4", "f5", "f6", "f7"
#endif
static int
#ifdef __GNUC__
__attribute__((optimize("no-omit-frame-pointer")))
#endif
slp_switch(void)
{
void *fp;
int *stackref, stsizediff;
int result;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("mov r0," REG_FP "\n\tstr r0,%0" : "=m" (fp) : : "r0");
__asm__ ("mov %0," REG_SP : "=r" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"add " REG_SPSP ",%0\n"
"add " REG_FPFP ",%0\n"
:
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("ldr r0,%1\n\tmov " REG_FP ",r0\n\tmov %0, #0" : "=r" (result) : "m" (fp) : "r0");
__asm__ volatile ("" : : : REGS_TO_SAVE);
return result;
}
#endif

View File

@ -0,0 +1,67 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 31-May-15 iOS support. Ported from arm32. Proton <feisuzhu@163.com>
*
* NOTES
*
* It is not possible to detect if fp is used or not, so the supplied
* switch function needs to support it, so that you can remove it if
* it does not apply to you.
*
* POSSIBLE ERRORS
*
* "fp cannot be used in asm here"
*
* - Try commenting out "fp" in REGS_TO_SAVE.
*
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REG_SP "sp"
#define REG_SPSP "sp,sp"
#define REG_FP "r7"
#define REG_FPFP "r7,r7"
#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r8", "r10", "r11", "lr"
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "d8", "d9", "d10", "d11", \
"d12", "d13", "d14", "d15"
static int
#ifdef __GNUC__
__attribute__((optimize("no-omit-frame-pointer")))
#endif
slp_switch(void)
{
void *fp;
int *stackref, stsizediff, result;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("str " REG_FP ",%0" : "=m" (fp));
__asm__ ("mov %0," REG_SP : "=r" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"add " REG_SPSP ",%0\n"
"add " REG_FPFP ",%0\n"
:
: "r" (stsizediff)
: REGS_TO_SAVE /* Clobber registers, force compiler to
* recalculate address of void *fp from REG_SP or REG_FP */
);
SLP_RESTORE_STATE();
}
__asm__ volatile (
"ldr " REG_FP ", %1\n\t"
"mov %0, #0"
: "=r" (result)
: "m" (fp)
: REGS_TO_SAVE /* Force compiler to restore saved registers after this */
);
return result;
}
#endif

View File

@ -0,0 +1,53 @@
AREA switch_arm64_masm, CODE, READONLY;
GLOBAL slp_switch [FUNC]
EXTERN slp_save_state_asm
EXTERN slp_restore_state_asm
slp_switch
; push callee saved registers to stack
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
stp d8, d9, [sp, #-16]!
stp d10, d11, [sp, #-16]!
stp d12, d13, [sp, #-16]!
stp d14, d15, [sp, #-16]!
; call slp_save_state_asm with stack pointer
mov x0, sp
bl slp_save_state_asm
; early return for return value of 1 and -1
cmp x0, #-1
b.eq RETURN
cmp x0, #1
b.eq RETURN
; increment stack and frame pointer
add sp, sp, x0
add x29, x29, x0
bl slp_restore_state_asm
; store return value for successful completion of routine
mov x0, #0
RETURN
; pop registers from stack
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ldp x29, x30, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
END

View File

@ -0,0 +1,17 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 21-Oct-21 Niyas Sait <niyas.sait@linaro.org>
* First version to enable win/arm64 support.
*/
#define STACK_REFPLUS 1
#define STACK_MAGIC 0
/* Use the generic support for an external assembly language slp_switch function. */
#define EXTERNAL_ASM
#ifdef SLP_EVAL
/* This always uses the external masm assembly file. */
#endif

View File

@ -0,0 +1,48 @@
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REG_FP "r8"
#ifdef __CSKYABIV2__
#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r7", "r9", "r10", "r11", "r15",\
"r16", "r17", "r18", "r19", "r20", "r21", "r22",\
"r23", "r24", "r25"
#if defined (__CSKY_HARD_FLOAT__) || (__CSKY_VDSP__)
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "vr8", "vr9", "vr10", "vr11", "vr12",\
"vr13", "vr14", "vr15"
#else
#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL
#endif
#else
#define REGS_TO_SAVE "r9", "r10", "r11", "r12", "r13", "r15"
#endif
static int
#ifdef __GNUC__
__attribute__((optimize("no-omit-frame-pointer")))
#endif
slp_switch(void)
{
int *stackref, stsizediff;
int result;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ ("mov %0, sp" : "=r" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"addu sp,%0\n"
"addu "REG_FP",%0\n"
:
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("movi %0, 0" : "=r" (result));
__asm__ volatile ("" : : : REGS_TO_SAVE);
return result;
}
#endif

View File

@ -0,0 +1,31 @@
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "s0", "s1", "s2", "s3", "s4", "s5", \
"s6", "s7", "s8", "fp", \
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
static int
slp_switch(void)
{
int ret;
long *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("move %0, $sp" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"add.d $sp, $sp, %0\n\t"
: /* no outputs */
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("move %0, $zero" : "=r" (ret) : );
return ret;
}
#endif

View File

@ -0,0 +1,38 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 2014-01-06 Andreas Schwab <schwab@linux-m68k.org>
* File created.
*/
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "%d2", "%d3", "%d4", "%d5", "%d6", "%d7", \
"%a2", "%a3", "%a4"
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
void *fp, *a5;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("move.l %%fp, %0" : "=m"(fp));
__asm__ volatile ("move.l %%a5, %0" : "=m"(a5));
__asm__ ("move.l %%sp, %0" : "=r"(stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile ("add.l %0, %%sp; add.l %0, %%fp" : : "r"(stsizediff));
SLP_RESTORE_STATE();
__asm__ volatile ("clr.l %0" : "=g" (err));
}
__asm__ volatile ("move.l %0, %%a5" : : "m"(a5));
__asm__ volatile ("move.l %0, %%fp" : : "m"(fp));
__asm__ volatile ("" : : : REGS_TO_SAVE);
return err;
}
#endif

View File

@ -0,0 +1,64 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 20-Sep-14 Matt Madison <madison@bliss-m.org>
* Re-code the saving of the gp register for MIPS64.
* 05-Jan-08 Thiemo Seufer <ths@debian.org>
* Ported from ppc.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "$16", "$17", "$18", "$19", "$20", "$21", "$22", \
"$23", "$30"
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
#ifdef __mips64
uint64_t gpsave;
#endif
__asm__ __volatile__ ("" : : : REGS_TO_SAVE);
#ifdef __mips64
__asm__ __volatile__ ("sd $28,%0" : "=m" (gpsave) : : );
#endif
__asm__ ("move %0, $29" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ __volatile__ (
#ifdef __mips64
"daddu $29, %0\n"
#else
"addu $29, %0\n"
#endif
: /* no outputs */
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
#ifdef __mips64
__asm__ __volatile__ ("ld $28,%0" : : "m" (gpsave) : );
#endif
__asm__ __volatile__ ("" : : : REGS_TO_SAVE);
__asm__ __volatile__ ("move %0, $0" : "=r" (err));
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,103 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 16-Oct-20 Jesse Gorzinski <jgorzins@us.ibm.com>
* Copied from Linux PPC64 implementation
* 04-Sep-18 Alexey Borzenkov <snaury@gmail.com>
* Workaround a gcc bug using manual save/restore of r30
* 21-Mar-18 Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
* Added r30 to the list of saved registers in order to fully comply with
* both ppc64 ELFv1 ABI and the ppc64le ELFv2 ABI, that classify this
* register as a nonvolatile register used for local variables.
* 21-Mar-18 Laszlo Boszormenyi <gcs@debian.org>
* Save r2 (TOC pointer) manually.
* 10-Dec-13 Ulrich Weigand <uweigand@de.ibm.com>
* Support ELFv2 ABI. Save float/vector registers.
* 09-Mar-12 Michael Ellerman <michael@ellerman.id.au>
* 64-bit implementation, copied from 32-bit.
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'r31' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 14-Jan-04 Bob Ippolito <bob@redivi.com>
* added cr2-cr4 to the registers to be saved.
* Open questions: Should we save FP registers?
* What about vector registers?
* Differences between darwin and unix?
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 04-Oct-02 Gustavo Niemeyer <niemeyer@conectiva.com>
* Ported from MacOS version.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 29-Jun-02 Christian Tismer <tismer@tismer.com>
* Added register 13-29, 31 saves. The same way as
* Armin Rigo did for the x86_unix version.
* This seems to be now fully functional!
* 04-Mar-02 Hye-Shik Chang <perky@fallin.lv>
* Ported from i386.
* 31-Jul-12 Trevor Bowen <trevorbowen@gmail.com>
* Changed memory constraints to register only.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 6
#if defined(__ALTIVEC__)
#define ALTIVEC_REGS \
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", \
"v28", "v29", "v30", "v31",
#else
#define ALTIVEC_REGS
#endif
#define REGS_TO_SAVE "r14", "r15", "r16", "r17", "r18", "r19", "r20", \
"r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \
"r31", \
"fr14", "fr15", "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", \
"fr22", "fr23", "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", \
"fr30", "fr31", \
ALTIVEC_REGS \
"cr2", "cr3", "cr4"
static int
slp_switch(void)
{
int err;
long *stackref, stsizediff;
void * toc;
void * r30;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("std 2, %0" : "=m" (toc));
__asm__ volatile ("std 30, %0" : "=m" (r30));
__asm__ ("mr %0, 1" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"mr 11, %0\n"
"add 1, 1, 11\n"
: /* no outputs */
: "r" (stsizediff)
: "11"
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("ld 30, %0" : : "m" (r30));
__asm__ volatile ("ld 2, %0" : : "m" (toc));
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("li %0, 0" : "=r" (err));
return err;
}
#endif

View File

@ -0,0 +1,105 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 04-Sep-18 Alexey Borzenkov <snaury@gmail.com>
* Workaround a gcc bug using manual save/restore of r30
* 21-Mar-18 Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
* Added r30 to the list of saved registers in order to fully comply with
* both ppc64 ELFv1 ABI and the ppc64le ELFv2 ABI, that classify this
* register as a nonvolatile register used for local variables.
* 21-Mar-18 Laszlo Boszormenyi <gcs@debian.org>
* Save r2 (TOC pointer) manually.
* 10-Dec-13 Ulrich Weigand <uweigand@de.ibm.com>
* Support ELFv2 ABI. Save float/vector registers.
* 09-Mar-12 Michael Ellerman <michael@ellerman.id.au>
* 64-bit implementation, copied from 32-bit.
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'r31' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 14-Jan-04 Bob Ippolito <bob@redivi.com>
* added cr2-cr4 to the registers to be saved.
* Open questions: Should we save FP registers?
* What about vector registers?
* Differences between darwin and unix?
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 04-Oct-02 Gustavo Niemeyer <niemeyer@conectiva.com>
* Ported from MacOS version.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 29-Jun-02 Christian Tismer <tismer@tismer.com>
* Added register 13-29, 31 saves. The same way as
* Armin Rigo did for the x86_unix version.
* This seems to be now fully functional!
* 04-Mar-02 Hye-Shik Chang <perky@fallin.lv>
* Ported from i386.
* 31-Jul-12 Trevor Bowen <trevorbowen@gmail.com>
* Changed memory constraints to register only.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#if _CALL_ELF == 2
#define STACK_MAGIC 4
#else
#define STACK_MAGIC 6
#endif
#if defined(__ALTIVEC__)
#define ALTIVEC_REGS \
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", \
"v28", "v29", "v30", "v31",
#else
#define ALTIVEC_REGS
#endif
#define REGS_TO_SAVE "r14", "r15", "r16", "r17", "r18", "r19", "r20", \
"r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \
"r31", \
"fr14", "fr15", "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", \
"fr22", "fr23", "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", \
"fr30", "fr31", \
ALTIVEC_REGS \
"cr2", "cr3", "cr4"
static int
slp_switch(void)
{
int err;
long *stackref, stsizediff;
void * toc;
void * r30;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("std 2, %0" : "=m" (toc));
__asm__ volatile ("std 30, %0" : "=m" (r30));
__asm__ ("mr %0, 1" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"mr 11, %0\n"
"add 1, 1, 11\n"
: /* no outputs */
: "r" (stsizediff)
: "11"
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("ld 30, %0" : : "m" (r30));
__asm__ volatile ("ld 2, %0" : : "m" (toc));
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("li %0, 0" : "=r" (err));
return err;
}
#endif

View File

@ -0,0 +1,87 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 07-Mar-11 Floris Bruynooghe <flub@devork.be>
* Do not add stsizediff to general purpose
* register (GPR) 30 as this is a non-volatile and
* unused by the PowerOpen Environment, therefore
* this was modifying a user register instead of the
* frame pointer (which does not seem to exist).
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'r31' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 14-Jan-04 Bob Ippolito <bob@redivi.com>
* added cr2-cr4 to the registers to be saved.
* Open questions: Should we save FP registers?
* What about vector registers?
* Differences between darwin and unix?
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 04-Oct-02 Gustavo Niemeyer <niemeyer@conectiva.com>
* Ported from MacOS version.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 29-Jun-02 Christian Tismer <tismer@tismer.com>
* Added register 13-29, 31 saves. The same way as
* Armin Rigo did for the x86_unix version.
* This seems to be now fully functional!
* 04-Mar-02 Hye-Shik Chang <perky@fallin.lv>
* Ported from i386.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 3
/* !!!!WARNING!!!! need to add "r31" in the next line if this header file
* is meant to be compiled non-dynamically!
*/
#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \
"r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \
"cr2", "cr3", "cr4"
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ ("mr %0, 1" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"mr 11, %0\n"
"add 1, 1, 11\n"
: /* no outputs */
: "r" (stsizediff)
: "11"
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("li %0, 0" : "=r" (err));
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,84 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'r31' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 14-Jan-04 Bob Ippolito <bob@redivi.com>
* added cr2-cr4 to the registers to be saved.
* Open questions: Should we save FP registers?
* What about vector registers?
* Differences between darwin and unix?
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 04-Oct-02 Gustavo Niemeyer <niemeyer@conectiva.com>
* Ported from MacOS version.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 29-Jun-02 Christian Tismer <tismer@tismer.com>
* Added register 13-29, 31 saves. The same way as
* Armin Rigo did for the x86_unix version.
* This seems to be now fully functional!
* 04-Mar-02 Hye-Shik Chang <perky@fallin.lv>
* Ported from i386.
* 31-Jul-12 Trevor Bowen <trevorbowen@gmail.com>
* Changed memory constraints to register only.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 3
/* !!!!WARNING!!!! need to add "r31" in the next line if this header file
* is meant to be compiled non-dynamically!
*/
#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \
"r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \
"cr2", "cr3", "cr4"
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ ("mr %0, 1" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"mr 11, %0\n"
"add 1, 1, 11\n"
"add 30, 30, 11\n"
: /* no outputs */
: "r" (stsizediff)
: "11"
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("li %0, 0" : "=r" (err));
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,82 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'r31' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 14-Jan-04 Bob Ippolito <bob@redivi.com>
* added cr2-cr4 to the registers to be saved.
* Open questions: Should we save FP registers?
* What about vector registers?
* Differences between darwin and unix?
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 29-Jun-02 Christian Tismer <tismer@tismer.com>
* Added register 13-29, 31 saves. The same way as
* Armin Rigo did for the x86_unix version.
* This seems to be now fully functional!
* 04-Mar-02 Hye-Shik Chang <perky@fallin.lv>
* Ported from i386.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 3
/* !!!!WARNING!!!! need to add "r31" in the next line if this header file
* is meant to be compiled non-dynamically!
*/
#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \
"r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \
"cr2", "cr3", "cr4"
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ ("; asm block 2\n\tmr %0, r1" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"; asm block 3\n"
"\tmr r11, %0\n"
"\tadd r1, r1, r11\n"
"\tadd r30, r30, r11\n"
: /* no outputs */
: "r" (stsizediff)
: "r11"
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("li %0, 0" : "=r" (err));
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,82 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'r31' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 14-Jan-04 Bob Ippolito <bob@redivi.com>
* added cr2-cr4 to the registers to be saved.
* Open questions: Should we save FP registers?
* What about vector registers?
* Differences between darwin and unix?
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 04-Oct-02 Gustavo Niemeyer <niemeyer@conectiva.com>
* Ported from MacOS version.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 29-Jun-02 Christian Tismer <tismer@tismer.com>
* Added register 13-29, 31 saves. The same way as
* Armin Rigo did for the x86_unix version.
* This seems to be now fully functional!
* 04-Mar-02 Hye-Shik Chang <perky@fallin.lv>
* Ported from i386.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 3
/* !!!!WARNING!!!! need to add "r31" in the next line if this header file
* is meant to be compiled non-dynamically!
*/
#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \
"r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \
"cr2", "cr3", "cr4"
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ ("mr %0, 1" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"mr 11, %0\n"
"add 1, 1, 11\n"
"add 30, 30, 11\n"
: /* no outputs */
: "r" (stsizediff)
: "11"
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("li %0, 0" : "=r" (err));
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,45 @@
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "s1", "s2", "s3", "s4", "s5", \
"s6", "s7", "s8", "s9", "s10", "s11", "fs0", "fs1", \
"fs2", "fs3", "fs4", "fs5", "fs6", "fs7", "fs8", "fs9", \
"fs10", "fs11"
static int
slp_switch(void)
{
int ret;
#if __riscv_xlen == 32
long fp;
long *stackref, stsizediff;
#else
int fp;
int *stackref, stsizediff;
#endif
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("mv %0, fp" : "=r" (fp) : );
__asm__ volatile ("mv %0, sp" : "=r" (stackref) : );
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"add sp, sp, %0\n\t"
"add fp, fp, %0\n\t"
: /* no outputs */
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
#if __riscv_xlen == 32
__asm__ volatile ("lw fp, %0" : : "m" (fp));
#else
__asm__ volatile ("ld fp, %0" : : "m" (fp));
#endif
__asm__ volatile ("mv %0, zero" : "=r" (ret) : );
return ret;
}
#endif

View File

@ -0,0 +1,87 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 25-Jan-12 Alexey Borzenkov <snaury@gmail.com>
* Fixed Linux/S390 port to work correctly with
* different optimization options both on 31-bit
* and 64-bit. Thanks to Stefan Raabe for lots
* of testing.
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 06-Oct-02 Gustavo Niemeyer <niemeyer@conectiva.com>
* Ported to Linux/S390.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#ifdef __s390x__
#define STACK_MAGIC 20 /* 20 * 8 = 160 bytes of function call area */
#else
#define STACK_MAGIC 24 /* 24 * 4 = 96 bytes of function call area */
#endif
/* Technically, r11-r13 also need saving, but function prolog starts
with stm(g) and since there are so many saved registers already
it won't be optimized, resulting in all r6-r15 being saved */
#define REGS_TO_SAVE "r6", "r7", "r8", "r9", "r10", "r14", \
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15"
static int
slp_switch(void)
{
int ret;
long *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
#ifdef __s390x__
__asm__ volatile ("lgr %0, 15" : "=r" (stackref) : );
#else
__asm__ volatile ("lr %0, 15" : "=r" (stackref) : );
#endif
{
SLP_SAVE_STATE(stackref, stsizediff);
/* N.B.
r11 may be used as the frame pointer, and in that case it cannot be
clobbered and needs offsetting just like the stack pointer (but in cases
where frame pointer isn't used we might clobber it accidentally). What's
scary is that r11 is 2nd (and even 1st when GOT is used) callee saved
register that gcc would chose for surviving function calls. However,
since r6-r10 are clobbered above, their cost for reuse is reduced, so
gcc IRA will chose them over r11 (not seeing r11 is implicitly saved),
making it relatively safe to offset in all cases. :) */
__asm__ volatile (
#ifdef __s390x__
"agr 15, %0\n\t"
"agr 11, %0"
#else
"ar 15, %0\n\t"
"ar 11, %0"
#endif
: /* no outputs */
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("lhi %0, 0" : "=r" (ret) : );
return ret;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,36 @@
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "r8", "r9", "r10", "r11", "r13", \
"fr12", "fr13", "fr14", "fr15"
// r12 Global context pointer, GP
// r14 Frame pointer, FP
// r15 Stack pointer, SP
static int
slp_switch(void)
{
int err;
void* fp;
int *stackref, stsizediff;
__asm__ volatile("" : : : REGS_TO_SAVE);
__asm__ volatile("mov.l r14, %0" : "=m"(fp) : :);
__asm__("mov r15, %0" : "=r"(stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile(
"add %0, r15\n"
"add %0, r14\n"
: /* no outputs */
: "r"(stsizediff));
SLP_RESTORE_STATE();
__asm__ volatile("mov r0, %0" : "=r"(err) : :);
}
__asm__ volatile("mov.l %0, r14" : : "m"(fp) :);
__asm__ volatile("" : : : REGS_TO_SAVE);
return err;
}
#endif

View File

@ -0,0 +1,92 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 16-May-15 Alexey Borzenkov <snaury@gmail.com>
* Move stack spilling code inside save/restore functions
* 30-Aug-13 Floris Bruynooghe <flub@devork.be>
Clean the register windows again before returning.
This does not clobber the PIC register as it leaves
the current window intact and is required for multi-
threaded code to work correctly.
* 08-Mar-11 Floris Bruynooghe <flub@devork.be>
* No need to set return value register explicitly
* before the stack and framepointer are adjusted
* as none of the other registers are influenced by
* this. Also don't needlessly clean the windows
* ('ta %0" :: "i" (ST_CLEAN_WINDOWS)') as that
* clobbers the gcc PIC register (%l7).
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* added support for SunOS sparc with gcc
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#if defined(__sparcv9)
#define SLP_FLUSHW __asm__ volatile ("flushw")
#else
#define SLP_FLUSHW __asm__ volatile ("ta 3") /* ST_FLUSH_WINDOWS */
#endif
/* On sparc we need to spill register windows inside save/restore functions */
#define SLP_BEFORE_SAVE_STATE() SLP_FLUSHW
#define SLP_BEFORE_RESTORE_STATE() SLP_FLUSHW
static int
slp_switch(void)
{
int err;
int *stackref, stsizediff;
/* Put current stack pointer into stackref.
* Register spilling is done in save/restore.
*/
__asm__ volatile ("mov %%sp, %0" : "=r" (stackref));
{
/* Thou shalt put SLP_SAVE_STATE into a local block */
/* Copy the current stack onto the heap */
SLP_SAVE_STATE(stackref, stsizediff);
/* Increment stack and frame pointer by stsizediff */
__asm__ volatile (
"add %0, %%sp, %%sp\n\t"
"add %0, %%fp, %%fp"
: : "r" (stsizediff));
/* Copy new stack from it's save store on the heap */
SLP_RESTORE_STATE();
__asm__ volatile ("mov %1, %0" : "=r" (err) : "i" (0));
return err;
}
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,63 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 17-Aug-12 Fantix King <fantix.king@gmail.com>
* Ported from amd64.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
#define REGS_TO_SAVE "r12", "r13", "r14", "r15"
static int
slp_switch(void)
{
void* ebp;
void* ebx;
unsigned int csr;
unsigned short cw;
int err;
int *stackref, stsizediff;
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("fstcw %0" : "=m" (cw));
__asm__ volatile ("stmxcsr %0" : "=m" (csr));
__asm__ volatile ("movl %%ebp, %0" : "=m" (ebp));
__asm__ volatile ("movl %%ebx, %0" : "=m" (ebx));
__asm__ ("movl %%esp, %0" : "=g" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"addl %0, %%esp\n"
"addl %0, %%ebp\n"
:
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
}
__asm__ volatile ("movl %0, %%ebx" : : "m" (ebx));
__asm__ volatile ("movl %0, %%ebp" : : "m" (ebp));
__asm__ volatile ("ldmxcsr %0" : : "m" (csr));
__asm__ volatile ("fldcw %0" : : "m" (cw));
__asm__ volatile ("" : : : REGS_TO_SAVE);
__asm__ volatile ("xorl %%eax, %%eax" : "=a" (err));
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,111 @@
;
; stack switching code for MASM on x641
; Kristjan Valur Jonsson, sept 2005
;
;prototypes for our calls
slp_save_state_asm PROTO
slp_restore_state_asm PROTO
pushxmm MACRO reg
sub rsp, 16
.allocstack 16
movaps [rsp], reg ; faster than movups, but we must be aligned
; .savexmm128 reg, offset (don't know what offset is, no documentation)
ENDM
popxmm MACRO reg
movaps reg, [rsp] ; faster than movups, but we must be aligned
add rsp, 16
ENDM
pushreg MACRO reg
push reg
.pushreg reg
ENDM
popreg MACRO reg
pop reg
ENDM
.code
slp_switch PROC FRAME
;realign stack to 16 bytes after return address push, makes the following faster
sub rsp,8
.allocstack 8
pushxmm xmm15
pushxmm xmm14
pushxmm xmm13
pushxmm xmm12
pushxmm xmm11
pushxmm xmm10
pushxmm xmm9
pushxmm xmm8
pushxmm xmm7
pushxmm xmm6
pushreg r15
pushreg r14
pushreg r13
pushreg r12
pushreg rbp
pushreg rbx
pushreg rdi
pushreg rsi
sub rsp, 10h ;allocate the singlefunction argument (must be multiple of 16)
.allocstack 10h
.endprolog
lea rcx, [rsp+10h] ;load stack base that we are saving
call slp_save_state_asm ;pass stackpointer, return offset in eax
cmp rax, 1
je EXIT1
cmp rax, -1
je EXIT2
;actual stack switch:
add rsp, rax
call slp_restore_state_asm
xor rax, rax ;return 0
EXIT:
add rsp, 10h
popreg rsi
popreg rdi
popreg rbx
popreg rbp
popreg r12
popreg r13
popreg r14
popreg r15
popxmm xmm6
popxmm xmm7
popxmm xmm8
popxmm xmm9
popxmm xmm10
popxmm xmm11
popxmm xmm12
popxmm xmm13
popxmm xmm14
popxmm xmm15
add rsp, 8
ret
EXIT1:
mov rax, 1
jmp EXIT
EXIT2:
sar rax, 1
jmp EXIT
slp_switch ENDP
END

View File

@ -0,0 +1,60 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 26-Sep-02 Christian Tismer <tismer@tismer.com>
* again as a result of virtualized stack access,
* the compiler used less registers. Needed to
* explicit mention registers in order to get them saved.
* Thanks to Jeff Senn for pointing this out and help.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 01-Mar-02 Christian Tismer <tismer@tismer.com>
* Initial final version after lots of iterations for i386.
*/
/* Avoid alloca redefined warning on mingw64 */
#ifndef alloca
#define alloca _alloca
#endif
#define STACK_REFPLUS 1
#define STACK_MAGIC 0
/* Use the generic support for an external assembly language slp_switch function. */
#define EXTERNAL_ASM
#ifdef SLP_EVAL
/* This always uses the external masm assembly file. */
#endif
/*
* further self-processing support
*/
/* we have IsBadReadPtr available, so we can peek at objects */
/*
#define STACKLESS_SPY
#ifdef IMPLEMENT_STACKLESSMODULE
#include "Windows.h"
#define CANNOT_READ_MEM(p, bytes) IsBadReadPtr(p, bytes)
static int IS_ON_STACK(void*p)
{
int stackref;
intptr_t stackbase = ((intptr_t)&stackref) & 0xfffff000;
return (intptr_t)p >= stackbase && (intptr_t)p < stackbase + 0x00100000;
}
#endif
*/

View File

@ -0,0 +1,326 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 26-Sep-02 Christian Tismer <tismer@tismer.com>
* again as a result of virtualized stack access,
* the compiler used less registers. Needed to
* explicit mention registers in order to get them saved.
* Thanks to Jeff Senn for pointing this out and help.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for sparc
* 01-Mar-02 Christian Tismer <tismer@tismer.com>
* Initial final version after lots of iterations for i386.
*/
#define alloca _alloca
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
#define STACK_MAGIC 0
/* Some magic to quell warnings and keep slp_switch() from crashing when built
with VC90. Disable global optimizations, and the warning: frame pointer
register 'ebp' modified by inline assembly code.
We used to just disable global optimizations ("g") but upstream stackless
Python, as well as stackman, turn off all optimizations.
References:
https://github.com/stackless-dev/stackman/blob/dbc72fe5207a2055e658c819fdeab9731dee78b9/stackman/platforms/switch_x86_msvc.h
https://github.com/stackless-dev/stackless/blob/main-slp/Stackless/platf/switch_x86_msvc.h
*/
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#pragma optimize("", off) /* so that autos are stored on the stack */
#pragma warning(disable:4731)
#pragma warning(disable:4733) /* disable warning about modifying FS[0] */
/**
* Most modern compilers and environments handle C++ exceptions without any
* special help from us. MSVC on 32-bit windows is an exception. There, C++
* exceptions are dealt with using Windows' Structured Exception Handling
* (SEH).
*
* SEH is implemented as a singly linked list of <function*, prev*> nodes. The
* head of this list is stored in the Thread Information Block, which itself
* is pointed to from the FS register. It's the first field in the structure,
* or offset 0, so we can access it using assembly FS:[0], or the compiler
* intrinsics and field offset information from the headers (as we do below).
* Somewhat unusually, the tail of the list doesn't have prev == NULL, it has
* prev == 0xFFFFFFFF.
*
* SEH was designed for C, and traditionally uses the MSVC compiler
* intrinsincs __try{}/__except{}. It is also utilized for C++ exceptions by
* MSVC; there, every throw of a C++ exception raises a SEH error with the
* ExceptionCode 0xE06D7363; the SEH handler list is then traversed to
* deal with the exception.
*
* If the SEH list is corrupt, then when a C++ exception is thrown the program
* will abruptly exit with exit code 1. This does not use std::terminate(), so
* std::set_terminate() is useless to debug this.
*
* The SEH list is closely tied to the call stack; entering a function that
* uses __try{} or most C++ functions will push a new handler onto the front
* of the list. Returning from the function will remove the handler. Saving
* and restoring the head node of the SEH list (FS:[0]) per-greenlet is NOT
* ENOUGH to make SEH or exceptions work.
*
* Stack switching breaks SEH because the call stack no longer necessarily
* matches the SEH list. For example, given greenlet A that switches to
* greenlet B, at the moment of entering greenlet B, we will have any SEH
* handlers from greenlet A on the SEH list; greenlet B can then add its own
* handlers to the SEH list. When greenlet B switches back to greenlet A,
* greenlet B's handlers would still be on the SEH stack, but when switch()
* returns control to greenlet A, we have replaced the contents of the stack
* in memory, so all the address that greenlet B added to the SEH list are now
* invalid: part of the call stack has been unwound, but the SEH list was out
* of sync with the call stack. The net effect is that exception handling
* stops working.
*
* Thus, when switching greenlets, we need to be sure that the SEH list
* matches the effective call stack, "cutting out" any handlers that were
* pushed by the greenlet that switched out and which are no longer valid.
*
* The easiest way to do this is to capture the SEH list at the time the main
* greenlet for a thread is created, and, when initially starting a greenlet,
* start a new SEH list for it, which contains nothing but the handler
* established for the new greenlet itself, with the tail being the handlers
* for the main greenlet. If we then save and restore the SEH per-greenlet,
* they won't interfere with each others SEH lists. (No greenlet can unwind
* the call stack past the handlers established by the main greenlet).
*
* By observation, a new thread starts with three SEH handlers on the list. By
* the time we get around to creating the main greenlet, though, there can be
* many more, established by transient calls that lead to the creation of the
* main greenlet. Therefore, 3 is a magic constant telling us when to perform
* the initial slice.
*
* All of this can be debugged using a vectored exception handler, which
* operates independently of the SEH handler list, and is called first.
* Walking the SEH list at key points can also be helpful.
*
* References:
* https://en.wikipedia.org/wiki/Win32_Thread_Information_Block
* https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273
* https://docs.microsoft.com/en-us/cpp/cpp/try-except-statement?view=msvc-160
* https://docs.microsoft.com/en-us/cpp/cpp/structured-exception-handling-c-cpp?view=msvc-160
* https://docs.microsoft.com/en-us/windows/win32/debug/structured-exception-handling
* https://docs.microsoft.com/en-us/windows/win32/debug/using-a-vectored-exception-handler
* https://bytepointer.com/resources/pietrek_crash_course_depths_of_win32_seh.htm
*/
#define GREENLET_NEEDS_EXCEPTION_STATE_SAVED
typedef struct _GExceptionRegistration {
struct _GExceptionRegistration* prev;
void* handler_f;
} GExceptionRegistration;
static void
slp_set_exception_state(const void *const seh_state)
{
// Because the stack from from which we do this is ALSO a handler, and
// that one we want to keep, we need to relink the current SEH handler
// frame to point to this one, cutting out the middle men, as it were.
//
// Entering a try block doesn't change the SEH frame, but entering a
// function containing a try block does.
GExceptionRegistration* current_seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList));
current_seh_state->prev = (GExceptionRegistration*)seh_state;
}
static GExceptionRegistration*
x86_slp_get_third_oldest_handler()
{
GExceptionRegistration* a = NULL; /* Closest to the top */
GExceptionRegistration* b = NULL; /* second */
GExceptionRegistration* c = NULL;
GExceptionRegistration* seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList));
a = b = c = seh_state;
while (seh_state && seh_state != (GExceptionRegistration*)0xFFFFFFFF) {
if ((void*)seh_state->prev < (void*)100) {
fprintf(stderr, "\tERROR: Broken SEH chain.\n");
return NULL;
}
a = b;
b = c;
c = seh_state;
seh_state = seh_state->prev;
}
return a ? a : (b ? b : c);
}
static void*
slp_get_exception_state()
{
// XXX: There appear to be three SEH handlers on the stack already at the
// start of the thread. Is that a guarantee? Almost certainly not. Yet in
// all observed cases it has been three. This is consistent with
// faulthandler off or on, and optimizations off or on. It may not be
// consistent with other operating system versions, though: we only have
// CI on one or two versions (don't ask what there are).
// In theory we could capture the number of handlers on the chain when
// PyInit__greenlet is called: there are probably only the default
// handlers at that point (unless we're embedded and people have used
// __try/__except or a C++ handler)?
return x86_slp_get_third_oldest_handler();
}
static int
slp_switch(void)
{
/* MASM syntax is typically reversed from other assemblers.
It is usually <instruction> <destination> <source>
*/
int *stackref, stsizediff;
/* store the structured exception state for this stack */
DWORD seh_state = __readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList));
__asm mov stackref, esp;
/* modify EBX, ESI and EDI in order to get them preserved */
__asm mov ebx, ebx;
__asm xchg esi, edi;
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm {
mov eax, stsizediff
add esp, eax
add ebp, eax
}
SLP_RESTORE_STATE();
}
__writefsdword(FIELD_OFFSET(NT_TIB, ExceptionList), seh_state);
return 0;
}
/* re-enable ebp warning and global optimizations. */
#pragma optimize("", on)
#pragma warning(default:4731)
#pragma warning(default:4733) /* disable warning about modifying FS[0] */
#endif
/*
* further self-processing support
*/
/* we have IsBadReadPtr available, so we can peek at objects */
#define STACKLESS_SPY
#ifdef GREENLET_DEBUG
#define CANNOT_READ_MEM(p, bytes) IsBadReadPtr(p, bytes)
static int IS_ON_STACK(void*p)
{
int stackref;
int stackbase = ((int)&stackref) & 0xfffff000;
return (int)p >= stackbase && (int)p < stackbase + 0x00100000;
}
static void
x86_slp_show_seh_chain()
{
GExceptionRegistration* seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList));
fprintf(stderr, "====== SEH Chain ======\n");
while (seh_state && seh_state != (GExceptionRegistration*)0xFFFFFFFF) {
fprintf(stderr, "\tSEH_chain addr: %p handler: %p prev: %p\n",
seh_state,
seh_state->handler_f, seh_state->prev);
if ((void*)seh_state->prev < (void*)100) {
fprintf(stderr, "\tERROR: Broken chain.\n");
break;
}
seh_state = seh_state->prev;
}
fprintf(stderr, "====== End SEH Chain ======\n");
fflush(NULL);
return;
}
//addVectoredExceptionHandler constants:
//CALL_FIRST means call this exception handler first;
//CALL_LAST means call this exception handler last
#define CALL_FIRST 1
#define CALL_LAST 0
LONG WINAPI
GreenletVectorHandler(PEXCEPTION_POINTERS ExceptionInfo)
{
// We get one of these for every C++ exception, with code
// E06D7363
// This is a special value that means "C++ exception from MSVC"
// https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273
//
// Install in the module init function with:
// AddVectoredExceptionHandler(CALL_FIRST, GreenletVectorHandler);
PEXCEPTION_RECORD ExceptionRecord = ExceptionInfo->ExceptionRecord;
fprintf(stderr,
"GOT VECTORED EXCEPTION:\n"
"\tExceptionCode : %p\n"
"\tExceptionFlags : %p\n"
"\tExceptionAddr : %p\n"
"\tNumberparams : %ld\n",
ExceptionRecord->ExceptionCode,
ExceptionRecord->ExceptionFlags,
ExceptionRecord->ExceptionAddress,
ExceptionRecord->NumberParameters
);
if (ExceptionRecord->ExceptionFlags & 1) {
fprintf(stderr, "\t\tEH_NONCONTINUABLE\n" );
}
if (ExceptionRecord->ExceptionFlags & 2) {
fprintf(stderr, "\t\tEH_UNWINDING\n" );
}
if (ExceptionRecord->ExceptionFlags & 4) {
fprintf(stderr, "\t\tEH_EXIT_UNWIND\n" );
}
if (ExceptionRecord->ExceptionFlags & 8) {
fprintf(stderr, "\t\tEH_STACK_INVALID\n" );
}
if (ExceptionRecord->ExceptionFlags & 0x10) {
fprintf(stderr, "\t\tEH_NESTED_CALL\n" );
}
if (ExceptionRecord->ExceptionFlags & 0x20) {
fprintf(stderr, "\t\tEH_TARGET_UNWIND\n" );
}
if (ExceptionRecord->ExceptionFlags & 0x40) {
fprintf(stderr, "\t\tEH_COLLIDED_UNWIND\n" );
}
fprintf(stderr, "\n");
fflush(NULL);
for(DWORD i = 0; i < ExceptionRecord->NumberParameters; i++) {
fprintf(stderr, "\t\t\tParam %ld: %lX\n", i, ExceptionRecord->ExceptionInformation[i]);
}
if (ExceptionRecord->NumberParameters == 3) {
fprintf(stderr, "\tAbout to traverse SEH chain\n");
// C++ Exception records have 3 params.
x86_slp_show_seh_chain();
}
return EXCEPTION_CONTINUE_SEARCH;
}
#endif

View File

@ -0,0 +1,105 @@
/*
* this is the internal transfer function.
*
* HISTORY
* 3-May-13 Ralf Schmitt <ralf@systemexit.de>
* Add support for strange GCC caller-save decisions
* (ported from switch_aarch64_gcc.h)
* 19-Aug-11 Alexey Borzenkov <snaury@gmail.com>
* Correctly save ebp, ebx and cw
* 07-Sep-05 (py-dev mailing list discussion)
* removed 'ebx' from the register-saved. !!!! WARNING !!!!
* It means that this file can no longer be compiled statically!
* It is now only suitable as part of a dynamic library!
* 24-Nov-02 Christian Tismer <tismer@tismer.com>
* needed to add another magic constant to insure
* that f in slp_eval_frame(PyFrameObject *f)
* STACK_REFPLUS will probably be 1 in most cases.
* gets included into the saved stack area.
* 17-Sep-02 Christian Tismer <tismer@tismer.com>
* after virtualizing stack save/restore, the
* stack size shrunk a bit. Needed to introduce
* an adjustment STACK_MAGIC per platform.
* 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
* slightly changed framework for spark
* 31-Avr-02 Armin Rigo <arigo@ulb.ac.be>
* Added ebx, esi and edi register-saves.
* 01-Mar-02 Samual M. Rushing <rushing@ironport.com>
* Ported from i386.
*/
#define STACK_REFPLUS 1
#ifdef SLP_EVAL
/* #define STACK_MAGIC 3 */
/* the above works fine with gcc 2.96, but 2.95.3 wants this */
#define STACK_MAGIC 0
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
# define ATTR_NOCLONE __attribute__((noclone))
#else
# define ATTR_NOCLONE
#endif
static int
slp_switch(void)
{
int err;
#ifdef _WIN32
void *seh;
#endif
void *ebp, *ebx;
unsigned short cw;
int *stackref, stsizediff;
__asm__ volatile ("" : : : "esi", "edi");
__asm__ volatile ("fstcw %0" : "=m" (cw));
__asm__ volatile ("movl %%ebp, %0" : "=m" (ebp));
__asm__ volatile ("movl %%ebx, %0" : "=m" (ebx));
#ifdef _WIN32
__asm__ volatile (
"movl %%fs:0x0, %%eax\n"
"movl %%eax, %0\n"
: "=m" (seh)
:
: "eax");
#endif
__asm__ ("movl %%esp, %0" : "=g" (stackref));
{
SLP_SAVE_STATE(stackref, stsizediff);
__asm__ volatile (
"addl %0, %%esp\n"
"addl %0, %%ebp\n"
:
: "r" (stsizediff)
);
SLP_RESTORE_STATE();
__asm__ volatile ("xorl %%eax, %%eax" : "=a" (err));
}
#ifdef _WIN32
__asm__ volatile (
"movl %0, %%eax\n"
"movl %%eax, %%fs:0x0\n"
:
: "m" (seh)
: "eax");
#endif
__asm__ volatile ("movl %0, %%ebx" : : "m" (ebx));
__asm__ volatile ("movl %0, %%ebp" : : "m" (ebp));
__asm__ volatile ("fldcw %0" : : "m" (cw));
__asm__ volatile ("" : : : "esi", "edi");
return err;
}
#endif
/*
* further self-processing support
*/
/*
* if you want to add self-inspection tools, place them
* here. See the x86_msvc for the necessary defines.
* These features are highly experimental und not
* essential yet.
*/

View File

@ -0,0 +1,75 @@
/*
* Platform Selection for Stackless Python
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(MS_WIN32) && !defined(MS_WIN64) && defined(_M_IX86) && defined(_MSC_VER)
# include "platform/switch_x86_msvc.h" /* MS Visual Studio on X86 */
#elif defined(MS_WIN64) && defined(_M_X64) && defined(_MSC_VER) || defined(__MINGW64__)
# include "platform/switch_x64_msvc.h" /* MS Visual Studio on X64 */
#elif defined(MS_WIN64) && defined(_M_ARM64)
# include "platform/switch_arm64_msvc.h" /* MS Visual Studio on ARM64 */
#elif defined(__GNUC__) && defined(__amd64__) && defined(__ILP32__)
# include "platform/switch_x32_unix.h" /* gcc on amd64 with x32 ABI */
#elif defined(__GNUC__) && defined(__amd64__)
# include "platform/switch_amd64_unix.h" /* gcc on amd64 */
#elif defined(__GNUC__) && defined(__i386__)
# include "platform/switch_x86_unix.h" /* gcc on X86 */
#elif defined(__GNUC__) && defined(__powerpc64__) && (defined(__linux__) || defined(__FreeBSD__))
# include "platform/switch_ppc64_linux.h" /* gcc on PowerPC 64-bit */
#elif defined(__GNUC__) && defined(__PPC__) && (defined(__linux__) || defined(__FreeBSD__))
# include "platform/switch_ppc_linux.h" /* gcc on PowerPC */
#elif defined(__GNUC__) && defined(__POWERPC__) && defined(__APPLE__)
# include "platform/switch_ppc_macosx.h" /* Apple MacOS X on 32-bit PowerPC */
#elif defined(__GNUC__) && defined(__powerpc64__) && defined(_AIX)
# include "platform/switch_ppc64_aix.h" /* gcc on AIX/PowerPC 64-bit */
#elif defined(__GNUC__) && defined(_ARCH_PPC) && defined(_AIX)
# include "platform/switch_ppc_aix.h" /* gcc on AIX/PowerPC */
#elif defined(__GNUC__) && defined(__powerpc__) && defined(__NetBSD__)
#include "platform/switch_ppc_unix.h" /* gcc on NetBSD/powerpc */
#elif defined(__GNUC__) && defined(sparc)
# include "platform/switch_sparc_sun_gcc.h" /* SunOS sparc with gcc */
#elif defined(__SUNPRO_C) && defined(sparc) && defined(sun)
# include "platform/switch_sparc_sun_gcc.h" /* SunStudio on amd64 */
#elif defined(__SUNPRO_C) && defined(__amd64__) && defined(sun)
# include "platform/switch_amd64_unix.h" /* SunStudio on amd64 */
#elif defined(__SUNPRO_C) && defined(__i386__) && defined(sun)
# include "platform/switch_x86_unix.h" /* SunStudio on x86 */
#elif defined(__GNUC__) && defined(__s390__) && defined(__linux__)
# include "platform/switch_s390_unix.h" /* Linux/S390 */
#elif defined(__GNUC__) && defined(__s390x__) && defined(__linux__)
# include "platform/switch_s390_unix.h" /* Linux/S390 zSeries (64-bit) */
#elif defined(__GNUC__) && defined(__arm__)
# ifdef __APPLE__
# include <TargetConditionals.h>
# endif
# if TARGET_OS_IPHONE
# include "platform/switch_arm32_ios.h" /* iPhone OS on arm32 */
# else
# include "platform/switch_arm32_gcc.h" /* gcc using arm32 */
# endif
#elif defined(__GNUC__) && defined(__mips__) && defined(__linux__)
# include "platform/switch_mips_unix.h" /* Linux/MIPS */
#elif defined(__GNUC__) && defined(__aarch64__)
# include "platform/switch_aarch64_gcc.h" /* Aarch64 ABI */
#elif defined(__GNUC__) && defined(__mc68000__)
# include "platform/switch_m68k_gcc.h" /* gcc on m68k */
#elif defined(__GNUC__) && defined(__csky__)
#include "platform/switch_csky_gcc.h" /* gcc on csky */
# elif defined(__GNUC__) && defined(__riscv)
# include "platform/switch_riscv_unix.h" /* gcc on RISC-V */
#elif defined(__GNUC__) && defined(__alpha__)
# include "platform/switch_alpha_unix.h" /* gcc on DEC Alpha */
#elif defined(MS_WIN32) && defined(__llvm__) && defined(__aarch64__)
# include "platform/switch_aarch64_gcc.h" /* LLVM Aarch64 ABI for Windows */
#elif defined(__GNUC__) && defined(__loongarch64) && defined(__linux__)
# include "platform/switch_loongarch64_linux.h" /* LoongArch64 */
#elif defined(__GNUC__) && defined(__sh__)
# include "platform/switch_sh_gcc.h" /* SuperH */
#endif
#ifdef __cplusplus
};
#endif

View File

@ -0,0 +1,240 @@
# -*- coding: utf-8 -*-
"""
Tests for greenlet.
"""
import os
import sys
import unittest
from gc import collect
from gc import get_objects
from threading import active_count as active_thread_count
from time import sleep
from time import time
import psutil
from greenlet import greenlet as RawGreenlet
from greenlet import getcurrent
from greenlet._greenlet import get_pending_cleanup_count
from greenlet._greenlet import get_total_main_greenlets
from . import leakcheck
PY312 = sys.version_info[:2] >= (3, 12)
PY313 = sys.version_info[:2] >= (3, 13)
# XXX: First tested on 3.14a7. Revisit all uses of this on later versions to ensure they
# are still valid.
PY314 = sys.version_info[:2] >= (3, 14)
WIN = sys.platform.startswith("win")
RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS')
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
RUNNING_ON_MANYLINUX = os.environ.get('GREENLET_MANYLINUX')
class TestCaseMetaClass(type):
# wrap each test method with
# a) leak checks
def __new__(cls, classname, bases, classDict):
# pylint and pep8 fight over what this should be called (mcs or cls).
# pylint gets it right, but we can't scope disable pep8, so we go with
# its convention.
# pylint: disable=bad-mcs-classmethod-argument
check_totalrefcount = True
# Python 3: must copy, we mutate the classDict. Interestingly enough,
# it doesn't actually error out, but under 3.6 we wind up wrapping
# and re-wrapping the same items over and over and over.
for key, value in list(classDict.items()):
if key.startswith('test') and callable(value):
classDict.pop(key)
if check_totalrefcount:
value = leakcheck.wrap_refcount(value)
classDict[key] = value
return type.__new__(cls, classname, bases, classDict)
class TestCase(unittest.TestCase, metaclass=TestCaseMetaClass):
cleanup_attempt_sleep_duration = 0.001
cleanup_max_sleep_seconds = 1
def wait_for_pending_cleanups(self,
initial_active_threads=None,
initial_main_greenlets=None):
initial_active_threads = initial_active_threads or self.threads_before_test
initial_main_greenlets = initial_main_greenlets or self.main_greenlets_before_test
sleep_time = self.cleanup_attempt_sleep_duration
# NOTE: This is racy! A Python-level thread object may be dead
# and gone, but the C thread may not yet have fired its
# destructors and added to the queue. There's no particular
# way to know that's about to happen. We try to watch the
# Python threads to make sure they, at least, have gone away.
# Counting the main greenlets, which we can easily do deterministically,
# also helps.
# Always sleep at least once to let other threads run
sleep(sleep_time)
quit_after = time() + self.cleanup_max_sleep_seconds
# TODO: We could add an API that calls us back when a particular main greenlet is deleted?
# It would have to drop the GIL
while (
get_pending_cleanup_count()
or active_thread_count() > initial_active_threads
or (not self.expect_greenlet_leak
and get_total_main_greenlets() > initial_main_greenlets)):
sleep(sleep_time)
if time() > quit_after:
print("Time limit exceeded.")
print("Threads: Waiting for only", initial_active_threads,
"-->", active_thread_count())
print("MGlets : Waiting for only", initial_main_greenlets,
"-->", get_total_main_greenlets())
break
collect()
def count_objects(self, kind=list, exact_kind=True):
# pylint:disable=unidiomatic-typecheck
# Collect the garbage.
for _ in range(3):
collect()
if exact_kind:
return sum(
1
for x in get_objects()
if type(x) is kind
)
# instances
return sum(
1
for x in get_objects()
if isinstance(x, kind)
)
greenlets_before_test = 0
threads_before_test = 0
main_greenlets_before_test = 0
expect_greenlet_leak = False
def count_greenlets(self):
"""
Find all the greenlets and subclasses tracked by the GC.
"""
return self.count_objects(RawGreenlet, False)
def setUp(self):
# Ensure the main greenlet exists, otherwise the first test
# gets a false positive leak
super().setUp()
getcurrent()
self.threads_before_test = active_thread_count()
self.main_greenlets_before_test = get_total_main_greenlets()
self.wait_for_pending_cleanups(self.threads_before_test, self.main_greenlets_before_test)
self.greenlets_before_test = self.count_greenlets()
def tearDown(self):
if getattr(self, 'skipTearDown', False):
return
self.wait_for_pending_cleanups(self.threads_before_test, self.main_greenlets_before_test)
super().tearDown()
def get_expected_returncodes_for_aborted_process(self):
import signal
# The child should be aborted in an unusual way. On POSIX
# platforms, this is done with abort() and signal.SIGABRT,
# which is reflected in a negative return value; however, on
# Windows, even though we observe the child print "Fatal
# Python error: Aborted" and in older versions of the C
# runtime "This application has requested the Runtime to
# terminate it in an unusual way," it always has an exit code
# of 3. This is interesting because 3 is the error code for
# ERROR_PATH_NOT_FOUND; BUT: the C runtime abort() function
# also uses this code.
#
# If we link to the static C library on Windows, the error
# code changes to '0xc0000409' (hex(3221226505)), which
# apparently is STATUS_STACK_BUFFER_OVERRUN; but "What this
# means is that nowadays when you get a
# STATUS_STACK_BUFFER_OVERRUN, it doesnt actually mean that
# there is a stack buffer overrun. It just means that the
# application decided to terminate itself with great haste."
#
#
# On windows, we've also seen '0xc0000005' (hex(3221225477)).
# That's "Access Violation"
#
# See
# https://devblogs.microsoft.com/oldnewthing/20110519-00/?p=10623
# and
# https://docs.microsoft.com/en-us/previous-versions/k089yyh0(v=vs.140)?redirectedfrom=MSDN
# and
# https://devblogs.microsoft.com/oldnewthing/20190108-00/?p=100655
expected_exit = (
-signal.SIGABRT,
# But beginning on Python 3.11, the faulthandler
# that prints the C backtraces sometimes segfaults after
# reporting the exception but before printing the stack.
# This has only been seen on linux/gcc.
-signal.SIGSEGV,
) if not WIN else (
3,
0xc0000409,
0xc0000005,
)
return expected_exit
def get_process_uss(self):
"""
Return the current process's USS in bytes.
uss is available on Linux, macOS, Windows. Also known as
"Unique Set Size", this is the memory which is unique to a
process and which would be freed if the process was terminated
right now.
If this is not supported by ``psutil``, this raises the
:exc:`unittest.SkipTest` exception.
"""
try:
return psutil.Process().memory_full_info().uss
except AttributeError as e:
raise unittest.SkipTest("uss not supported") from e
def run_script(self, script_name, show_output=True):
import subprocess
script = os.path.join(
os.path.dirname(__file__),
script_name,
)
try:
return subprocess.check_output([sys.executable, script],
encoding='utf-8',
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if show_output:
print('-----')
print('Failed to run script', script)
print('~~~~~')
print(ex.output)
print('------')
raise
def assertScriptRaises(self, script_name, exitcodes=None):
import subprocess
with self.assertRaises(subprocess.CalledProcessError) as exc:
output = self.run_script(script_name, show_output=False)
__traceback_info__ = output
# We're going to fail the assertion if we get here, at least
# preserve the output in the traceback.
if exitcodes is None:
exitcodes = self.get_expected_returncodes_for_aborted_process()
self.assertIn(exc.exception.returncode, exitcodes)
return exc.exception

View File

@ -0,0 +1,231 @@
/* This is a set of functions used by test_extension_interface.py to test the
* Greenlet C API.
*/
#include "../greenlet.h"
#ifndef Py_RETURN_NONE
# define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None
#endif
#define TEST_MODULE_NAME "_test_extension"
static PyObject*
test_switch(PyObject* self, PyObject* greenlet)
{
PyObject* result = NULL;
if (greenlet == NULL || !PyGreenlet_Check(greenlet)) {
PyErr_BadArgument();
return NULL;
}
result = PyGreenlet_Switch((PyGreenlet*)greenlet, NULL, NULL);
if (result == NULL) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_AssertionError,
"greenlet.switch() failed for some reason.");
}
return NULL;
}
Py_INCREF(result);
return result;
}
static PyObject*
test_switch_kwargs(PyObject* self, PyObject* args, PyObject* kwargs)
{
PyGreenlet* g = NULL;
PyObject* result = NULL;
PyArg_ParseTuple(args, "O!", &PyGreenlet_Type, &g);
if (g == NULL || !PyGreenlet_Check(g)) {
PyErr_BadArgument();
return NULL;
}
result = PyGreenlet_Switch(g, NULL, kwargs);
if (result == NULL) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_AssertionError,
"greenlet.switch() failed for some reason.");
}
return NULL;
}
Py_XINCREF(result);
return result;
}
static PyObject*
test_getcurrent(PyObject* self)
{
PyGreenlet* g = PyGreenlet_GetCurrent();
if (g == NULL || !PyGreenlet_Check(g) || !PyGreenlet_ACTIVE(g)) {
PyErr_SetString(PyExc_AssertionError,
"getcurrent() returned an invalid greenlet");
Py_XDECREF(g);
return NULL;
}
Py_DECREF(g);
Py_RETURN_NONE;
}
static PyObject*
test_setparent(PyObject* self, PyObject* arg)
{
PyGreenlet* current;
PyGreenlet* greenlet = NULL;
if (arg == NULL || !PyGreenlet_Check(arg)) {
PyErr_BadArgument();
return NULL;
}
if ((current = PyGreenlet_GetCurrent()) == NULL) {
return NULL;
}
greenlet = (PyGreenlet*)arg;
if (PyGreenlet_SetParent(greenlet, current)) {
Py_DECREF(current);
return NULL;
}
Py_DECREF(current);
if (PyGreenlet_Switch(greenlet, NULL, NULL) == NULL) {
return NULL;
}
Py_RETURN_NONE;
}
static PyObject*
test_new_greenlet(PyObject* self, PyObject* callable)
{
PyObject* result = NULL;
PyGreenlet* greenlet = PyGreenlet_New(callable, NULL);
if (!greenlet) {
return NULL;
}
result = PyGreenlet_Switch(greenlet, NULL, NULL);
Py_CLEAR(greenlet);
if (result == NULL) {
return NULL;
}
Py_INCREF(result);
return result;
}
static PyObject*
test_raise_dead_greenlet(PyObject* self)
{
PyErr_SetString(PyExc_GreenletExit, "test GreenletExit exception.");
return NULL;
}
static PyObject*
test_raise_greenlet_error(PyObject* self)
{
PyErr_SetString(PyExc_GreenletError, "test greenlet.error exception");
return NULL;
}
static PyObject*
test_throw(PyObject* self, PyGreenlet* g)
{
const char msg[] = "take that sucka!";
PyObject* msg_obj = Py_BuildValue("s", msg);
PyGreenlet_Throw(g, PyExc_ValueError, msg_obj, NULL);
Py_DECREF(msg_obj);
if (PyErr_Occurred()) {
return NULL;
}
Py_RETURN_NONE;
}
static PyObject*
test_throw_exact(PyObject* self, PyObject* args)
{
PyGreenlet* g = NULL;
PyObject* typ = NULL;
PyObject* val = NULL;
PyObject* tb = NULL;
if (!PyArg_ParseTuple(args, "OOOO:throw", &g, &typ, &val, &tb)) {
return NULL;
}
PyGreenlet_Throw(g, typ, val, tb);
if (PyErr_Occurred()) {
return NULL;
}
Py_RETURN_NONE;
}
static PyMethodDef test_methods[] = {
{"test_switch",
(PyCFunction)test_switch,
METH_O,
"Switch to the provided greenlet sending provided arguments, and \n"
"return the results."},
{"test_switch_kwargs",
(PyCFunction)test_switch_kwargs,
METH_VARARGS | METH_KEYWORDS,
"Switch to the provided greenlet sending the provided keyword args."},
{"test_getcurrent",
(PyCFunction)test_getcurrent,
METH_NOARGS,
"Test PyGreenlet_GetCurrent()"},
{"test_setparent",
(PyCFunction)test_setparent,
METH_O,
"Se the parent of the provided greenlet and switch to it."},
{"test_new_greenlet",
(PyCFunction)test_new_greenlet,
METH_O,
"Test PyGreenlet_New()"},
{"test_raise_dead_greenlet",
(PyCFunction)test_raise_dead_greenlet,
METH_NOARGS,
"Just raise greenlet.GreenletExit"},
{"test_raise_greenlet_error",
(PyCFunction)test_raise_greenlet_error,
METH_NOARGS,
"Just raise greenlet.error"},
{"test_throw",
(PyCFunction)test_throw,
METH_O,
"Throw a ValueError at the provided greenlet"},
{"test_throw_exact",
(PyCFunction)test_throw_exact,
METH_VARARGS,
"Throw exactly the arguments given at the provided greenlet"},
{NULL, NULL, 0, NULL}
};
#define INITERROR return NULL
static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT,
TEST_MODULE_NAME,
NULL,
0,
test_methods,
NULL,
NULL,
NULL,
NULL};
PyMODINIT_FUNC
PyInit__test_extension(void)
{
PyObject* module = NULL;
module = PyModule_Create(&moduledef);
if (module == NULL) {
return NULL;
}
PyGreenlet_Import();
return module;
}

View File

@ -0,0 +1,226 @@
/* This is a set of functions used to test C++ exceptions are not
* broken during greenlet switches
*/
#include "../greenlet.h"
#include "../greenlet_compiler_compat.hpp"
#include <exception>
#include <stdexcept>
struct exception_t {
int depth;
exception_t(int depth) : depth(depth) {}
};
/* Functions are called via pointers to prevent inlining */
static void (*p_test_exception_throw_nonstd)(int depth);
static void (*p_test_exception_throw_std)();
static PyObject* (*p_test_exception_switch_recurse)(int depth, int left);
static void
test_exception_throw_nonstd(int depth)
{
throw exception_t(depth);
}
static void
test_exception_throw_std()
{
throw std::runtime_error("Thrown from an extension.");
}
static PyObject*
test_exception_switch_recurse(int depth, int left)
{
if (left > 0) {
return p_test_exception_switch_recurse(depth, left - 1);
}
PyObject* result = NULL;
PyGreenlet* self = PyGreenlet_GetCurrent();
if (self == NULL)
return NULL;
try {
if (PyGreenlet_Switch(PyGreenlet_GET_PARENT(self), NULL, NULL) == NULL) {
Py_DECREF(self);
return NULL;
}
p_test_exception_throw_nonstd(depth);
PyErr_SetString(PyExc_RuntimeError,
"throwing C++ exception didn't work");
}
catch (const exception_t& e) {
if (e.depth != depth)
PyErr_SetString(PyExc_AssertionError, "depth mismatch");
else
result = PyLong_FromLong(depth);
}
catch (...) {
PyErr_SetString(PyExc_RuntimeError, "unexpected C++ exception");
}
Py_DECREF(self);
return result;
}
/* test_exception_switch(int depth)
* - recurses depth times
* - switches to parent inside try/catch block
* - throws an exception that (expected to be caught in the same function)
* - verifies depth matches (exceptions shouldn't be caught in other greenlets)
*/
static PyObject*
test_exception_switch(PyObject* UNUSED(self), PyObject* args)
{
int depth;
if (!PyArg_ParseTuple(args, "i", &depth))
return NULL;
return p_test_exception_switch_recurse(depth, depth);
}
static PyObject*
py_test_exception_throw_nonstd(PyObject* self, PyObject* args)
{
if (!PyArg_ParseTuple(args, ""))
return NULL;
p_test_exception_throw_nonstd(0);
PyErr_SetString(PyExc_AssertionError, "unreachable code running after throw");
return NULL;
}
static PyObject*
py_test_exception_throw_std(PyObject* self, PyObject* args)
{
if (!PyArg_ParseTuple(args, ""))
return NULL;
p_test_exception_throw_std();
PyErr_SetString(PyExc_AssertionError, "unreachable code running after throw");
return NULL;
}
static PyObject*
py_test_call(PyObject* self, PyObject* arg)
{
PyObject* noargs = PyTuple_New(0);
PyObject* ret = PyObject_Call(arg, noargs, nullptr);
Py_DECREF(noargs);
return ret;
}
/* test_exception_switch_and_do_in_g2(g2func)
* - creates new greenlet g2 to run g2func
* - switches to g2 inside try/catch block
* - verifies that no exception has been caught
*
* it is used together with test_exception_throw to verify that unhandled
* exceptions thrown in one greenlet do not propagate to other greenlet nor
* segfault the process.
*/
static PyObject*
test_exception_switch_and_do_in_g2(PyObject* self, PyObject* args)
{
PyObject* g2func = NULL;
PyObject* result = NULL;
if (!PyArg_ParseTuple(args, "O", &g2func))
return NULL;
PyGreenlet* g2 = PyGreenlet_New(g2func, NULL);
if (!g2) {
return NULL;
}
try {
result = PyGreenlet_Switch(g2, NULL, NULL);
if (!result) {
return NULL;
}
}
catch (const exception_t& e) {
/* if we are here the memory can be already corrupted and the program
* might crash before below py-level exception might become printed.
* -> print something to stderr to make it clear that we had entered
* this catch block.
* See comments in inner_bootstrap()
*/
#if defined(WIN32) || defined(_WIN32)
fprintf(stderr, "C++ exception unexpectedly caught in g1\n");
PyErr_SetString(PyExc_AssertionError, "C++ exception unexpectedly caught in g1");
Py_XDECREF(result);
return NULL;
#else
throw;
#endif
}
Py_XDECREF(result);
Py_RETURN_NONE;
}
static PyMethodDef test_methods[] = {
{"test_exception_switch",
(PyCFunction)&test_exception_switch,
METH_VARARGS,
"Switches to parent twice, to test exception handling and greenlet "
"switching."},
{"test_exception_switch_and_do_in_g2",
(PyCFunction)&test_exception_switch_and_do_in_g2,
METH_VARARGS,
"Creates new greenlet g2 to run g2func and switches to it inside try/catch "
"block. Used together with test_exception_throw to verify that unhandled "
"C++ exceptions thrown in a greenlet doe not corrupt memory."},
{"test_exception_throw_nonstd",
(PyCFunction)&py_test_exception_throw_nonstd,
METH_VARARGS,
"Throws non-standard C++ exception. Calling this function directly should abort the process."
},
{"test_exception_throw_std",
(PyCFunction)&py_test_exception_throw_std,
METH_VARARGS,
"Throws standard C++ exception. Calling this function directly should abort the process."
},
{"test_call",
(PyCFunction)&py_test_call,
METH_O,
"Call the given callable. Unlike calling it directly, this creates a "
"new C-level stack frame, which may be helpful in testing."
},
{NULL, NULL, 0, NULL}
};
static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT,
"greenlet.tests._test_extension_cpp",
NULL,
0,
test_methods,
NULL,
NULL,
NULL,
NULL};
PyMODINIT_FUNC
PyInit__test_extension_cpp(void)
{
PyObject* module = NULL;
module = PyModule_Create(&moduledef);
if (module == NULL) {
return NULL;
}
PyGreenlet_Import();
if (_PyGreenlet_API == NULL) {
return NULL;
}
p_test_exception_throw_nonstd = test_exception_throw_nonstd;
p_test_exception_throw_std = test_exception_throw_std;
p_test_exception_switch_recurse = test_exception_switch_recurse;
return module;
}

View File

@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
"""
If we have a run callable passed to the constructor or set as an
attribute, but we don't actually use that (because ``__getattribute__``
or the like interferes), then when we clear callable before beginning
to run, there's an opportunity for Python code to run.
"""
import greenlet
g = None
main = greenlet.getcurrent()
results = []
class RunCallable:
def __del__(self):
results.append(('RunCallable', '__del__'))
main.switch('from RunCallable')
class G(greenlet.greenlet):
def __getattribute__(self, name):
if name == 'run':
results.append(('G.__getattribute__', 'run'))
return run_func
return object.__getattribute__(self, name)
def run_func():
results.append(('run_func', 'enter'))
g = G(RunCallable())
# Try to start G. It will get to the point where it deletes
# its run callable C++ variable in inner_bootstrap. That triggers
# the __del__ method, which switches back to main before g
# actually even starts running.
x = g.switch()
results.append(('main: g.switch()', x))
# In the C++ code, this results in g->g_switch() appearing to return, even though
# it has yet to run.
print('In main with', x, flush=True)
g.switch()
print('RESULTS', results)

View File

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
"""
Helper for testing a C++ exception throw aborts the process.
Takes one argument, the name of the function in :mod:`_test_extension_cpp` to call.
"""
import sys
import greenlet
from greenlet.tests import _test_extension_cpp
print('fail_cpp_exception is running')
def run_unhandled_exception_in_greenlet_aborts():
def _():
_test_extension_cpp.test_exception_switch_and_do_in_g2(
_test_extension_cpp.test_exception_throw_nonstd
)
g1 = greenlet.greenlet(_)
g1.switch()
func_name = sys.argv[1]
try:
func = getattr(_test_extension_cpp, func_name)
except AttributeError:
if func_name == run_unhandled_exception_in_greenlet_aborts.__name__:
func = run_unhandled_exception_in_greenlet_aborts
elif func_name == 'run_as_greenlet_target':
g = greenlet.greenlet(_test_extension_cpp.test_exception_throw_std)
func = g.switch
else:
raise
print('raising', func, flush=True)
func()

View File

@ -0,0 +1,78 @@
"""
Testing initialstub throwing an already started exception.
"""
import greenlet
a = None
b = None
c = None
main = greenlet.getcurrent()
# If we switch into a dead greenlet,
# we go looking for its parents.
# if a parent is not yet started, we start it.
results = []
def a_run(*args):
#results.append('A')
results.append(('Begin A', args))
def c_run():
results.append('Begin C')
b.switch('From C')
results.append('C done')
class A(greenlet.greenlet): pass
class B(greenlet.greenlet):
doing_it = False
def __getattribute__(self, name):
if name == 'run' and not self.doing_it:
assert greenlet.getcurrent() is c
self.doing_it = True
results.append('Switch to b from B.__getattribute__ in '
+ type(greenlet.getcurrent()).__name__)
b.switch()
results.append('B.__getattribute__ back from main in '
+ type(greenlet.getcurrent()).__name__)
if name == 'run':
name = '_B_run'
return object.__getattribute__(self, name)
def _B_run(self, *arg):
results.append(('Begin B', arg))
results.append('_B_run switching to main')
main.switch('From B')
class C(greenlet.greenlet):
pass
a = A(a_run)
b = B(parent=a)
c = C(c_run, b)
# Start a child; while running, it will start B,
# but starting B will ALSO start B.
result = c.switch()
results.append(('main from c', result))
# Switch back to C, which was in the middle of switching
# already. This will throw the ``GreenletStartedWhileInPython``
# exception, which results in parent A getting started (B is finished)
c.switch()
results.append(('A dead?', a.dead, 'B dead?', b.dead, 'C dead?', c.dead))
# A and B should both be dead now.
assert a.dead
assert b.dead
assert not c.dead
result = c.switch()
results.append(('main from c.2', result))
# Now C is dead
assert c.dead
print("RESULTS:", results)

View File

@ -0,0 +1,29 @@
# -*- coding: utf-8 -*-
"""
A test helper for seeing what happens when slp_switch()
fails.
"""
# pragma: no cover
import greenlet
print('fail_slp_switch is running', flush=True)
runs = []
def func():
runs.append(1)
greenlet.getcurrent().parent.switch()
runs.append(2)
greenlet.getcurrent().parent.switch()
runs.append(3)
g = greenlet._greenlet.UnswitchableGreenlet(func)
g.switch()
assert runs == [1]
g.switch()
assert runs == [1, 2]
g.force_slp_switch_error = True
# This should crash.
g.switch()

View File

@ -0,0 +1,44 @@
"""
Uses a trace function to switch greenlets at unexpected times.
In the trace function, we switch from the current greenlet to another
greenlet, which switches
"""
import greenlet
g1 = None
g2 = None
switch_to_g2 = False
def tracefunc(*args):
print('TRACE', *args)
global switch_to_g2
if switch_to_g2:
switch_to_g2 = False
g2.switch()
print('\tLEAVE TRACE', *args)
def g1_run():
print('In g1_run')
global switch_to_g2
switch_to_g2 = True
from_parent = greenlet.getcurrent().parent.switch()
print('Return to g1_run')
print('From parent', from_parent)
def g2_run():
#g1.switch()
greenlet.getcurrent().parent.switch()
greenlet.settrace(tracefunc)
g1 = greenlet.greenlet(g1_run)
g2 = greenlet.greenlet(g2_run)
# This switch didn't actually finish!
# And if it did, it would raise TypeError
# because g1_run() doesn't take any arguments.
g1.switch(1)
print('Back in main')
g1.switch(2)

View File

@ -0,0 +1,55 @@
"""
Like fail_switch_three_greenlets, but the call into g1_run would actually be
valid.
"""
import greenlet
g1 = None
g2 = None
switch_to_g2 = True
results = []
def tracefunc(*args):
results.append(('trace', args[0]))
print('TRACE', *args)
global switch_to_g2
if switch_to_g2:
switch_to_g2 = False
g2.switch('g2 from tracefunc')
print('\tLEAVE TRACE', *args)
def g1_run(arg):
results.append(('g1 arg', arg))
print('In g1_run')
from_parent = greenlet.getcurrent().parent.switch('from g1_run')
results.append(('g1 from parent', from_parent))
return 'g1 done'
def g2_run(arg):
#g1.switch()
results.append(('g2 arg', arg))
parent = greenlet.getcurrent().parent.switch('from g2_run')
global switch_to_g2
switch_to_g2 = False
results.append(('g2 from parent', parent))
return 'g2 done'
greenlet.settrace(tracefunc)
g1 = greenlet.greenlet(g1_run)
g2 = greenlet.greenlet(g2_run)
x = g1.switch('g1 from main')
results.append(('main g1', x))
print('Back in main', x)
x = g1.switch('g2 from main')
results.append(('main g2', x))
print('back in amain again', x)
x = g1.switch('g1 from main 2')
results.append(('main g1.2', x))
x = g2.switch()
results.append(('main g2.2', x))
print("RESULTS:", results)

View File

@ -0,0 +1,41 @@
"""
Uses a trace function to switch greenlets at unexpected times.
In the trace function, we switch from the current greenlet to another
greenlet, which switches
"""
import greenlet
g1 = None
g2 = None
switch_to_g2 = False
def tracefunc(*args):
print('TRACE', *args)
global switch_to_g2
if switch_to_g2:
switch_to_g2 = False
g2.switch()
print('\tLEAVE TRACE', *args)
def g1_run():
print('In g1_run')
global switch_to_g2
switch_to_g2 = True
greenlet.getcurrent().parent.switch()
print('Return to g1_run')
print('Falling off end of g1_run')
def g2_run():
g1.switch()
print('Falling off end of g2')
greenlet.settrace(tracefunc)
g1 = greenlet.greenlet(g1_run)
g2 = greenlet.greenlet(g2_run)
g1.switch()
print('Falling off end of main')
g2.switch()

View File

@ -0,0 +1,319 @@
# Copyright (c) 2018 gevent community
# Copyright (c) 2021 greenlet community
#
# This was originally part of gevent's test suite. The main author
# (Jason Madden) vendored a copy of it into greenlet.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import os
import sys
import gc
from functools import wraps
import unittest
import objgraph
# graphviz 0.18 (Nov 7 2021), available only on Python 3.6 and newer,
# has added type hints (sigh). It wants to use ``typing.Literal`` for
# some stuff, but that's only available on Python 3.9+. If that's not
# found, it creates a ``unittest.mock.MagicMock`` object and annotates
# with that. These are GC'able objects, and doing almost *anything*
# with them results in an explosion of objects. For example, trying to
# compare them for equality creates new objects. This causes our
# leakchecks to fail, with reports like:
#
# greenlet.tests.leakcheck.LeakCheckError: refcount increased by [337, 1333, 343, 430, 530, 643, 769]
# _Call 1820 +546
# dict 4094 +76
# MagicProxy 585 +73
# tuple 2693 +66
# _CallList 24 +3
# weakref 1441 +1
# function 5996 +1
# type 736 +1
# cell 592 +1
# MagicMock 8 +1
#
# To avoid this, we *could* filter this type of object out early. In
# principle it could leak, but we don't use mocks in greenlet, so it
# doesn't leak from us. However, a further issue is that ``MagicMock``
# objects have subobjects that are also GC'able, like ``_Call``, and
# those create new mocks of their own too. So we'd have to filter them
# as well, and they're not public. That's OK, we can workaround the
# problem by being very careful to never compare by equality or other
# user-defined operators, only using object identity or other builtin
# functions.
RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS')
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
RUNNING_ON_MANYLINUX = os.environ.get('GREENLET_MANYLINUX')
SKIP_LEAKCHECKS = RUNNING_ON_MANYLINUX or os.environ.get('GREENLET_SKIP_LEAKCHECKS')
SKIP_FAILING_LEAKCHECKS = os.environ.get('GREENLET_SKIP_FAILING_LEAKCHECKS')
ONLY_FAILING_LEAKCHECKS = os.environ.get('GREENLET_ONLY_FAILING_LEAKCHECKS')
def ignores_leakcheck(func):
"""
Ignore the given object during leakchecks.
Can be applied to a method, in which case the method will run, but
will not be subject to leak checks.
If applied to a class, the entire class will be skipped during leakchecks. This
is intended to be used for classes that are very slow and cause problems such as
test timeouts; typically it will be used for classes that are subclasses of a base
class and specify variants of behaviour (such as pool sizes).
"""
func.ignore_leakcheck = True
return func
def fails_leakcheck(func):
"""
Mark that the function is known to leak.
"""
func.fails_leakcheck = True
if SKIP_FAILING_LEAKCHECKS:
func = unittest.skip("Skipping known failures")(func)
return func
class LeakCheckError(AssertionError):
pass
if hasattr(sys, 'getobjects'):
# In a Python build with ``--with-trace-refs``, make objgraph
# trace *all* the objects, not just those that are tracked by the
# GC
class _MockGC(object):
def get_objects(self):
return sys.getobjects(0) # pylint:disable=no-member
def __getattr__(self, name):
return getattr(gc, name)
objgraph.gc = _MockGC()
fails_strict_leakcheck = fails_leakcheck
else:
def fails_strict_leakcheck(func):
"""
Decorator for a function that is known to fail when running
strict (``sys.getobjects()``) leakchecks.
This type of leakcheck finds all objects, even those, such as
strings, which are not tracked by the garbage collector.
"""
return func
class ignores_types_in_strict_leakcheck(object):
def __init__(self, types):
self.types = types
def __call__(self, func):
func.leakcheck_ignore_types = self.types
return func
class _RefCountChecker(object):
# Some builtin things that we ignore
# XXX: Those things were ignored by gevent, but they're important here,
# presumably.
IGNORED_TYPES = () #(tuple, dict, types.FrameType, types.TracebackType)
def __init__(self, testcase, function):
self.testcase = testcase
self.function = function
self.deltas = []
self.peak_stats = {}
self.ignored_types = ()
# The very first time we are called, we have already been
# self.setUp() by the test runner, so we don't need to do it again.
self.needs_setUp = False
def _include_object_p(self, obj):
# pylint:disable=too-many-return-statements
#
# See the comment block at the top. We must be careful to
# avoid invoking user-defined operations.
if obj is self:
return False
kind = type(obj)
# ``self._include_object_p == obj`` returns NotImplemented
# for non-function objects, which causes the interpreter
# to try to reverse the order of arguments...which leads
# to the explosion of mock objects. We don't want that, so we implement
# the check manually.
if kind == type(self._include_object_p):
try:
# pylint:disable=not-callable
exact_method_equals = self._include_object_p.__eq__(obj)
except AttributeError:
# Python 2.7 methods may only have __cmp__, and that raises a
# TypeError for non-method arguments
# pylint:disable=no-member
exact_method_equals = self._include_object_p.__cmp__(obj) == 0
if exact_method_equals is not NotImplemented and exact_method_equals:
return False
# Similarly, we need to check identity in our __dict__ to avoid mock explosions.
for x in self.__dict__.values():
if obj is x:
return False
if kind in self.ignored_types or kind in self.IGNORED_TYPES:
return False
return True
def _growth(self):
return objgraph.growth(limit=None, peak_stats=self.peak_stats,
filter=self._include_object_p)
def _report_diff(self, growth):
if not growth:
return "<Unable to calculate growth>"
lines = []
width = max(len(name) for name, _, _ in growth)
for name, count, delta in growth:
lines.append('%-*s%9d %+9d' % (width, name, count, delta))
diff = '\n'.join(lines)
return diff
def _run_test(self, args, kwargs):
gc_enabled = gc.isenabled()
gc.disable()
if self.needs_setUp:
self.testcase.setUp()
self.testcase.skipTearDown = False
try:
self.function(self.testcase, *args, **kwargs)
finally:
self.testcase.tearDown()
self.testcase.doCleanups()
self.testcase.skipTearDown = True
self.needs_setUp = True
if gc_enabled:
gc.enable()
def _growth_after(self):
# Grab post snapshot
# pylint:disable=no-member
if 'urlparse' in sys.modules:
sys.modules['urlparse'].clear_cache()
if 'urllib.parse' in sys.modules:
sys.modules['urllib.parse'].clear_cache()
return self._growth()
def _check_deltas(self, growth):
# Return false when we have decided there is no leak,
# true if we should keep looping, raises an assertion
# if we have decided there is a leak.
deltas = self.deltas
if not deltas:
# We haven't run yet, no data, keep looping
return True
if gc.garbage:
raise LeakCheckError("Generated uncollectable garbage %r" % (gc.garbage,))
# the following configurations are classified as "no leak"
# [0, 0]
# [x, 0, 0]
# [... a, b, c, d] where a+b+c+d = 0
#
# the following configurations are classified as "leak"
# [... z, z, z] where z > 0
if deltas[-2:] == [0, 0] and len(deltas) in (2, 3):
return False
if deltas[-3:] == [0, 0, 0]:
return False
if len(deltas) >= 4 and sum(deltas[-4:]) == 0:
return False
if len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = self._report_diff(growth)
raise LeakCheckError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
# this is suspicious, so give a few more runs
limit = 11
else:
limit = 7
if len(deltas) >= limit:
raise LeakCheckError('refcount increased by %r\n%s'
% (deltas,
self._report_diff(growth)))
# We couldn't decide yet, keep going
return True
def __call__(self, args, kwargs):
for _ in range(3):
gc.collect()
expect_failure = getattr(self.function, 'fails_leakcheck', False)
if expect_failure:
self.testcase.expect_greenlet_leak = True
self.ignored_types = getattr(self.function, "leakcheck_ignore_types", ())
# Capture state before; the incremental will be
# updated by each call to _growth_after
growth = self._growth()
try:
while self._check_deltas(growth):
self._run_test(args, kwargs)
growth = self._growth_after()
self.deltas.append(sum((stat[2] for stat in growth)))
except LeakCheckError:
if not expect_failure:
raise
else:
if expect_failure:
raise LeakCheckError("Expected %s to leak but it did not." % (self.function,))
def wrap_refcount(method):
if getattr(method, 'ignore_leakcheck', False) or SKIP_LEAKCHECKS:
return method
@wraps(method)
def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches
if getattr(self, 'ignore_leakcheck', False):
raise unittest.SkipTest("This class ignored during leakchecks")
if ONLY_FAILING_LEAKCHECKS and not getattr(method, 'fails_leakcheck', False):
raise unittest.SkipTest("Only running tests that fail leakchecks.")
return _RefCountChecker(self, method)(args, kwargs)
return wrapper

View File

@ -0,0 +1,312 @@
from __future__ import print_function
import gc
import sys
import unittest
from functools import partial
from unittest import skipUnless
from unittest import skipIf
from greenlet import greenlet
from greenlet import getcurrent
from . import TestCase
from . import PY314
try:
from contextvars import Context
from contextvars import ContextVar
from contextvars import copy_context
# From the documentation:
#
# Important: Context Variables should be created at the top module
# level and never in closures. Context objects hold strong
# references to context variables which prevents context variables
# from being properly garbage collected.
ID_VAR = ContextVar("id", default=None)
VAR_VAR = ContextVar("var", default=None)
ContextVar = None
except ImportError:
Context = ContextVar = copy_context = None
# We don't support testing if greenlet's built-in context var support is disabled.
@skipUnless(Context is not None, "ContextVar not supported")
class ContextVarsTests(TestCase):
def _new_ctx_run(self, *args, **kwargs):
return copy_context().run(*args, **kwargs)
def _increment(self, greenlet_id, callback, counts, expect):
ctx_var = ID_VAR
if expect is None:
self.assertIsNone(ctx_var.get())
else:
self.assertEqual(ctx_var.get(), expect)
ctx_var.set(greenlet_id)
for _ in range(2):
counts[ctx_var.get()] += 1
callback()
def _test_context(self, propagate_by):
# pylint:disable=too-many-branches
ID_VAR.set(0)
callback = getcurrent().switch
counts = dict((i, 0) for i in range(5))
lets = [
greenlet(partial(
partial(
copy_context().run,
self._increment
) if propagate_by == "run" else self._increment,
greenlet_id=i,
callback=callback,
counts=counts,
expect=(
i - 1 if propagate_by == "share" else
0 if propagate_by in ("set", "run") else None
)
))
for i in range(1, 5)
]
for let in lets:
if propagate_by == "set":
let.gr_context = copy_context()
elif propagate_by == "share":
let.gr_context = getcurrent().gr_context
for i in range(2):
counts[ID_VAR.get()] += 1
for let in lets:
let.switch()
if propagate_by == "run":
# Must leave each context.run() in reverse order of entry
for let in reversed(lets):
let.switch()
else:
# No context.run(), so fine to exit in any order.
for let in lets:
let.switch()
for let in lets:
self.assertTrue(let.dead)
# When using run(), we leave the run() as the greenlet dies,
# and there's no context "underneath". When not using run(),
# gr_context still reflects the context the greenlet was
# running in.
if propagate_by == 'run':
self.assertIsNone(let.gr_context)
else:
self.assertIsNotNone(let.gr_context)
if propagate_by == "share":
self.assertEqual(counts, {0: 1, 1: 1, 2: 1, 3: 1, 4: 6})
else:
self.assertEqual(set(counts.values()), set([2]))
def test_context_propagated_by_context_run(self):
self._new_ctx_run(self._test_context, "run")
def test_context_propagated_by_setting_attribute(self):
self._new_ctx_run(self._test_context, "set")
def test_context_not_propagated(self):
self._new_ctx_run(self._test_context, None)
def test_context_shared(self):
self._new_ctx_run(self._test_context, "share")
def test_break_ctxvars(self):
let1 = greenlet(copy_context().run)
let2 = greenlet(copy_context().run)
let1.switch(getcurrent().switch)
let2.switch(getcurrent().switch)
# Since let2 entered the current context and let1 exits its own, the
# interpreter emits:
# RuntimeError: cannot exit context: thread state references a different context object
let1.switch()
def test_not_broken_if_using_attribute_instead_of_context_run(self):
let1 = greenlet(getcurrent().switch)
let2 = greenlet(getcurrent().switch)
let1.gr_context = copy_context()
let2.gr_context = copy_context()
let1.switch()
let2.switch()
let1.switch()
let2.switch()
def test_context_assignment_while_running(self):
# pylint:disable=too-many-statements
ID_VAR.set(None)
def target():
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
# Context is created on first use
ID_VAR.set(1)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(ID_VAR.get(), 1)
self.assertEqual(gr.gr_context[ID_VAR], 1)
# Clearing the context makes it get re-created as another
# empty context when next used
old_context = gr.gr_context
gr.gr_context = None # assign None while running
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
ID_VAR.set(2)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(ID_VAR.get(), 2)
self.assertEqual(gr.gr_context[ID_VAR], 2)
new_context = gr.gr_context
getcurrent().parent.switch((old_context, new_context))
# parent switches us back to old_context
self.assertEqual(ID_VAR.get(), 1)
gr.gr_context = new_context # assign non-None while running
self.assertEqual(ID_VAR.get(), 2)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
gr.gr_context = old_context
self.assertEqual(ID_VAR.get(), 1)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
gr = greenlet(target)
with self.assertRaisesRegex(AttributeError, "can't delete context attribute"):
del gr.gr_context
self.assertIsNone(gr.gr_context)
old_context, new_context = gr.switch()
self.assertIs(new_context, gr.gr_context)
self.assertEqual(old_context[ID_VAR], 1)
self.assertEqual(new_context[ID_VAR], 2)
self.assertEqual(new_context.run(ID_VAR.get), 2)
gr.gr_context = old_context # assign non-None while suspended
gr.switch()
self.assertIs(gr.gr_context, new_context)
gr.gr_context = None # assign None while suspended
gr.switch()
self.assertIs(gr.gr_context, old_context)
gr.gr_context = None
gr.switch()
self.assertIsNone(gr.gr_context)
# Make sure there are no reference leaks
gr = None
gc.collect()
# Python 3.14 elides reference counting operations
# in some cases. See https://github.com/python/cpython/pull/130708
self.assertEqual(sys.getrefcount(old_context), 2 if not PY314 else 1)
self.assertEqual(sys.getrefcount(new_context), 2 if not PY314 else 1)
def test_context_assignment_different_thread(self):
import threading
VAR_VAR.set(None)
ctx = Context()
is_running = threading.Event()
should_suspend = threading.Event()
did_suspend = threading.Event()
should_exit = threading.Event()
holder = []
def greenlet_in_thread_fn():
VAR_VAR.set(1)
is_running.set()
should_suspend.wait(10)
VAR_VAR.set(2)
getcurrent().parent.switch()
holder.append(VAR_VAR.get())
def thread_fn():
gr = greenlet(greenlet_in_thread_fn)
gr.gr_context = ctx
holder.append(gr)
gr.switch()
did_suspend.set()
should_exit.wait(10)
gr.switch()
del gr
greenlet() # trigger cleanup
thread = threading.Thread(target=thread_fn, daemon=True)
thread.start()
is_running.wait(10)
gr = holder[0]
# Can't access or modify context if the greenlet is running
# in a different thread
with self.assertRaisesRegex(ValueError, "running in a different"):
getattr(gr, 'gr_context')
with self.assertRaisesRegex(ValueError, "running in a different"):
gr.gr_context = None
should_suspend.set()
did_suspend.wait(10)
# OK to access and modify context if greenlet is suspended
self.assertIs(gr.gr_context, ctx)
self.assertEqual(gr.gr_context[VAR_VAR], 2)
gr.gr_context = None
should_exit.set()
thread.join(10)
self.assertEqual(holder, [gr, None])
# Context can still be accessed/modified when greenlet is dead:
self.assertIsNone(gr.gr_context)
gr.gr_context = ctx
self.assertIs(gr.gr_context, ctx)
# Otherwise we leak greenlets on some platforms.
# XXX: Should be able to do this automatically
del holder[:]
gr = None
thread = None
def test_context_assignment_wrong_type(self):
g = greenlet()
with self.assertRaisesRegex(TypeError,
"greenlet context must be a contextvars.Context or None"):
g.gr_context = self
@skipIf(Context is not None, "ContextVar supported")
class NoContextVarsTests(TestCase):
def test_contextvars_errors(self):
let1 = greenlet(getcurrent().switch)
self.assertFalse(hasattr(let1, 'gr_context'))
with self.assertRaises(AttributeError):
getattr(let1, 'gr_context')
with self.assertRaises(AttributeError):
let1.gr_context = None
let1.switch()
with self.assertRaises(AttributeError):
getattr(let1, 'gr_context')
with self.assertRaises(AttributeError):
let1.gr_context = None
del let1
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,73 @@
from __future__ import print_function
from __future__ import absolute_import
import subprocess
import unittest
import greenlet
from . import _test_extension_cpp
from . import TestCase
from . import WIN
class CPPTests(TestCase):
def test_exception_switch(self):
greenlets = []
for i in range(4):
g = greenlet.greenlet(_test_extension_cpp.test_exception_switch)
g.switch(i)
greenlets.append(g)
for i, g in enumerate(greenlets):
self.assertEqual(g.switch(), i)
def _do_test_unhandled_exception(self, target):
import os
import sys
script = os.path.join(
os.path.dirname(__file__),
'fail_cpp_exception.py',
)
args = [sys.executable, script, target.__name__ if not isinstance(target, str) else target]
__traceback_info__ = args
with self.assertRaises(subprocess.CalledProcessError) as exc:
subprocess.check_output(
args,
encoding='utf-8',
stderr=subprocess.STDOUT
)
ex = exc.exception
expected_exit = self.get_expected_returncodes_for_aborted_process()
self.assertIn(ex.returncode, expected_exit)
self.assertIn('fail_cpp_exception is running', ex.output)
return ex.output
def test_unhandled_nonstd_exception_aborts(self):
# verify that plain unhandled throw aborts
self._do_test_unhandled_exception(_test_extension_cpp.test_exception_throw_nonstd)
def test_unhandled_std_exception_aborts(self):
# verify that plain unhandled throw aborts
self._do_test_unhandled_exception(_test_extension_cpp.test_exception_throw_std)
@unittest.skipIf(WIN, "XXX: This does not crash on Windows")
# Meaning the exception is getting lost somewhere...
def test_unhandled_std_exception_as_greenlet_function_aborts(self):
# verify that plain unhandled throw aborts
output = self._do_test_unhandled_exception('run_as_greenlet_target')
self.assertIn(
# We really expect this to be prefixed with "greenlet: Unhandled C++ exception:"
# as added by our handler for std::exception (see TUserGreenlet.cpp), but
# that's not correct everywhere --- our handler never runs before std::terminate
# gets called (for example, on arm32).
'Thrown from an extension.',
output
)
def test_unhandled_exception_in_greenlet_aborts(self):
# verify that unhandled throw called in greenlet aborts too
self._do_test_unhandled_exception('run_unhandled_exception_in_greenlet_aborts')
if __name__ == '__main__':
unittest.main()

Some files were not shown because too many files have changed in this diff Show More