Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Revision 0:5868e8752d44, committed 2016-04-16
- Comitter:
- pythontech
- Date:
- Sat Apr 16 17:11:56 2016 +0000
- Child:
- 1:873275b0210d
- Commit message:
- Split off library from repl
Changed in this revision
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extmod/machine_mem.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,103 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "extmod/machine_mem.h"
+#include "py/nlr.h"
+
+#if MICROPY_PY_MACHINE
+
+// If you wish to override the functions for mapping the machine_mem read/write
+// address, then add a #define for MICROPY_MACHINE_MEM_GET_READ_ADDR and/or
+// MICROPY_MACHINE_MEM_GET_WRITE_ADDR in yopur mpconfigport.h. Since the
+// prototypes are identical, it is allowable for both of the macros to evaluate
+// the to same function.
+//
+// It is expected that the modmachine.c file for a given port will provide the
+// implementations, if the default implementation isn't used.
+
+#if !defined(MICROPY_MACHINE_MEM_GET_READ_ADDR) || !defined(MICROPY_MACHINE_MEM_GET_WRITE_ADDR)
+STATIC uintptr_t machine_mem_get_addr(mp_obj_t addr_o, uint align) {
+ uintptr_t addr = mp_obj_int_get_truncated(addr_o);
+ if ((addr & (align - 1)) != 0) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "address %08x is not aligned to %d bytes", addr, align));
+ }
+ return addr;
+}
+#if !defined(MICROPY_MACHINE_MEM_GET_READ_ADDR)
+#define MICROPY_MACHINE_MEM_GET_READ_ADDR machine_mem_get_addr
+#endif
+#if !defined(MICROPY_MACHINE_MEM_GET_WRITE_ADDR)
+#define MICROPY_MACHINE_MEM_GET_WRITE_ADDR machine_mem_get_addr
+#endif
+#endif
+
+STATIC void machine_mem_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ machine_mem_obj_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "<%u-bit memory>", 8 * self->elem_size);
+}
+
+STATIC mp_obj_t machine_mem_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ // TODO support slice index to read/write multiple values at once
+ machine_mem_obj_t *self = MP_OBJ_TO_PTR(self_in);
+ if (value == MP_OBJ_NULL) {
+ // delete
+ return MP_OBJ_NULL; // op not supported
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load
+ uintptr_t addr = MICROPY_MACHINE_MEM_GET_READ_ADDR(index, self->elem_size);
+ uint32_t val;
+ switch (self->elem_size) {
+ case 1: val = (*(uint8_t*)addr); break;
+ case 2: val = (*(uint16_t*)addr); break;
+ default: val = (*(uint32_t*)addr); break;
+ }
+ return mp_obj_new_int(val);
+ } else {
+ // store
+ uintptr_t addr = MICROPY_MACHINE_MEM_GET_WRITE_ADDR(index, self->elem_size);
+ uint32_t val = mp_obj_get_int_truncated(value);
+ switch (self->elem_size) {
+ case 1: (*(uint8_t*)addr) = val; break;
+ case 2: (*(uint16_t*)addr) = val; break;
+ default: (*(uint32_t*)addr) = val; break;
+ }
+ return mp_const_none;
+ }
+}
+
+const mp_obj_type_t machine_mem_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_mem,
+ .print = machine_mem_print,
+ .subscr = machine_mem_subscr,
+};
+
+const machine_mem_obj_t machine_mem8_obj = {{&machine_mem_type}, 1};
+const machine_mem_obj_t machine_mem16_obj = {{&machine_mem_type}, 2};
+const machine_mem_obj_t machine_mem32_obj = {{&machine_mem_type}, 4};
+
+#endif // MICROPY_PY_MACHINE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extmod/machine_mem.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,51 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+
+#ifndef __MICROPY_INCLUDED_EXTMOD_MACHINE_MEM_H__
+#define __MICROPY_INCLUDED_EXTMOD_MACHINE_MEM_H__
+
+#include "py/obj.h"
+
+typedef struct _machine_mem_obj_t {
+ mp_obj_base_t base;
+ unsigned elem_size; // in bytes
+} machine_mem_obj_t;
+
+extern const mp_obj_type_t machine_mem_type;
+
+extern const machine_mem_obj_t machine_mem8_obj;
+extern const machine_mem_obj_t machine_mem16_obj;
+extern const machine_mem_obj_t machine_mem32_obj;
+
+#if defined(MICROPY_MACHINE_MEM_GET_READ_ADDR)
+uintptr_t MICROPY_MACHINE_MEM_GET_READ_ADDR(mp_obj_t addr_o, uint align);
+#endif
+#if defined(MICROPY_MACHINE_MEM_GET_WRITE_ADDR)
+uintptr_t MICROPY_MACHINE_MEM_GET_WRITE_ADDR(mp_obj_t addr_o, uint align);
+#endif
+
+#endif // __MICROPY_INCLUDED_EXTMOD_MACHINE_MEM_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/genhdr/mpversion.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,7 @@ +#define MICROPY_GIT_TAG "hand-crafted" +#define MICROPY_GIT_HASH "hand-crafted" +#define MICROPY_BUILD_DATE __DATE__ +#define MICROPY_VERSION_MAJOR (1) +#define MICROPY_VERSION_MINOR (7) +#define MICROPY_VERSION_MICRO (3) +#define MICROPY_VERSION_STRING "1.7.3"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/genhdr/qstrdefs.generated.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,242 @@
+// This file was automatically generated by makeqstrdata.py
+
+QDEF(MP_QSTR_NULL, (const byte*)"\x00\x00\x00" "")
+QDEF(MP_QSTR_, (const byte*)"\x05\x15\x00" "")
+QDEF(MP_QSTR__star_, (const byte*)"\x8f\xb5\x01" "*")
+QDEF(MP_QSTR__, (const byte*)"\xfa\xb5\x01" "_")
+QDEF(MP_QSTR___build_class__, (const byte*)"\x42\x88\x0f" "__build_class__")
+QDEF(MP_QSTR___class__, (const byte*)"\x2b\xc5\x09" "__class__")
+QDEF(MP_QSTR___doc__, (const byte*)"\x2d\xac\x07" "__doc__")
+QDEF(MP_QSTR___import__, (const byte*)"\x38\x3e\x0a" "__import__")
+QDEF(MP_QSTR___init__, (const byte*)"\x5f\xa5\x08" "__init__")
+QDEF(MP_QSTR___new__, (const byte*)"\x79\x15\x07" "__new__")
+QDEF(MP_QSTR___locals__, (const byte*)"\x7b\x6a\x0a" "__locals__")
+QDEF(MP_QSTR___main__, (const byte*)"\x8e\x13\x08" "__main__")
+QDEF(MP_QSTR___module__, (const byte*)"\xff\x30\x0a" "__module__")
+QDEF(MP_QSTR___name__, (const byte*)"\xe2\x38\x08" "__name__")
+QDEF(MP_QSTR___dict__, (const byte*)"\x7f\x54\x08" "__dict__")
+QDEF(MP_QSTR___hash__, (const byte*)"\xf7\xc8\x08" "__hash__")
+QDEF(MP_QSTR___next__, (const byte*)"\x02\x73\x08" "__next__")
+QDEF(MP_QSTR___qualname__, (const byte*)"\x6b\x00\x0c" "__qualname__")
+QDEF(MP_QSTR___path__, (const byte*)"\xc8\x23\x08" "__path__")
+QDEF(MP_QSTR___repl_print__, (const byte*)"\x00\xbb\x0e" "__repl_print__")
+QDEF(MP_QSTR___bool__, (const byte*)"\x2b\x65\x08" "__bool__")
+QDEF(MP_QSTR___contains__, (const byte*)"\xc6\x5f\x0c" "__contains__")
+QDEF(MP_QSTR___enter__, (const byte*)"\x6d\xba\x09" "__enter__")
+QDEF(MP_QSTR___exit__, (const byte*)"\x45\xf8\x08" "__exit__")
+QDEF(MP_QSTR___len__, (const byte*)"\xe2\xb0\x07" "__len__")
+QDEF(MP_QSTR___iter__, (const byte*)"\xcf\x32\x08" "__iter__")
+QDEF(MP_QSTR___getitem__, (const byte*)"\x26\x39\x0b" "__getitem__")
+QDEF(MP_QSTR___setitem__, (const byte*)"\x32\x3e\x0b" "__setitem__")
+QDEF(MP_QSTR___delitem__, (const byte*)"\xfd\x35\x0b" "__delitem__")
+QDEF(MP_QSTR___add__, (const byte*)"\xc4\x82\x07" "__add__")
+QDEF(MP_QSTR___sub__, (const byte*)"\x21\x09\x07" "__sub__")
+QDEF(MP_QSTR___repr__, (const byte*)"\x10\x0b\x08" "__repr__")
+QDEF(MP_QSTR___str__, (const byte*)"\xd0\xcd\x07" "__str__")
+QDEF(MP_QSTR___getattr__, (const byte*)"\x40\xf8\x0b" "__getattr__")
+QDEF(MP_QSTR___del__, (const byte*)"\x68\x37\x07" "__del__")
+QDEF(MP_QSTR___call__, (const byte*)"\xa7\xf9\x08" "__call__")
+QDEF(MP_QSTR___lt__, (const byte*)"\x5d\x68\x06" "__lt__")
+QDEF(MP_QSTR___gt__, (const byte*)"\xb6\x82\x06" "__gt__")
+QDEF(MP_QSTR___eq__, (const byte*)"\x71\x3e\x06" "__eq__")
+QDEF(MP_QSTR___le__, (const byte*)"\xcc\x13\x06" "__le__")
+QDEF(MP_QSTR___ge__, (const byte*)"\xa7\x46\x06" "__ge__")
+QDEF(MP_QSTR___reversed__, (const byte*)"\x61\xff\x0c" "__reversed__")
+QDEF(MP_QSTR_micropython, (const byte*)"\x0b\x7c\x0b" "micropython")
+QDEF(MP_QSTR_bytecode, (const byte*)"\x22\x7d\x08" "bytecode")
+QDEF(MP_QSTR_const, (const byte*)"\xc0\xff\x05" "const")
+QDEF(MP_QSTR_builtins, (const byte*)"\xf7\x31\x08" "builtins")
+QDEF(MP_QSTR_Ellipsis, (const byte*)"\xf0\xe0\x08" "Ellipsis")
+QDEF(MP_QSTR_StopIteration, (const byte*)"\xea\x1c\x0d" "StopIteration")
+QDEF(MP_QSTR_BaseException, (const byte*)"\x07\x92\x0d" "BaseException")
+QDEF(MP_QSTR_ArithmeticError, (const byte*)"\x2d\x8c\x0f" "ArithmeticError")
+QDEF(MP_QSTR_AssertionError, (const byte*)"\x97\x5a\x0e" "AssertionError")
+QDEF(MP_QSTR_AttributeError, (const byte*)"\x21\xde\x0e" "AttributeError")
+QDEF(MP_QSTR_BufferError, (const byte*)"\x1d\x59\x0b" "BufferError")
+QDEF(MP_QSTR_EOFError, (const byte*)"\x91\xbf\x08" "EOFError")
+QDEF(MP_QSTR_Exception, (const byte*)"\xf2\x29\x09" "Exception")
+QDEF(MP_QSTR_FileExistsError, (const byte*)"\x5b\x14\x0f" "FileExistsError")
+QDEF(MP_QSTR_FileNotFoundError, (const byte*)"\x78\x89\x11" "FileNotFoundError")
+QDEF(MP_QSTR_FloatingPointError, (const byte*)"\x01\x34\x12" "FloatingPointError")
+QDEF(MP_QSTR_GeneratorExit, (const byte*)"\x16\x62\x0d" "GeneratorExit")
+QDEF(MP_QSTR_ImportError, (const byte*)"\x20\x9c\x0b" "ImportError")
+QDEF(MP_QSTR_IndentationError, (const byte*)"\x5c\x20\x10" "IndentationError")
+QDEF(MP_QSTR_IndexError, (const byte*)"\x83\xad\x0a" "IndexError")
+QDEF(MP_QSTR_KeyboardInterrupt, (const byte*)"\xaf\xe2\x11" "KeyboardInterrupt")
+QDEF(MP_QSTR_KeyError, (const byte*)"\xea\x00\x08" "KeyError")
+QDEF(MP_QSTR_LookupError, (const byte*)"\xff\x69\x0b" "LookupError")
+QDEF(MP_QSTR_MemoryError, (const byte*)"\xdc\x83\x0b" "MemoryError")
+QDEF(MP_QSTR_NameError, (const byte*)"\xba\x2d\x09" "NameError")
+QDEF(MP_QSTR_NotImplementedError, (const byte*)"\xc6\x98\x13" "NotImplementedError")
+QDEF(MP_QSTR_OSError, (const byte*)"\xa1\x65\x07" "OSError")
+QDEF(MP_QSTR_OverflowError, (const byte*)"\x81\xe1\x0d" "OverflowError")
+QDEF(MP_QSTR_RuntimeError, (const byte*)"\x61\xf1\x0c" "RuntimeError")
+QDEF(MP_QSTR_SyntaxError, (const byte*)"\x94\x8f\x0b" "SyntaxError")
+QDEF(MP_QSTR_SystemExit, (const byte*)"\x20\xff\x0a" "SystemExit")
+QDEF(MP_QSTR_TypeError, (const byte*)"\x25\x96\x09" "TypeError")
+QDEF(MP_QSTR_UnboundLocalError, (const byte*)"\x99\x22\x11" "UnboundLocalError")
+QDEF(MP_QSTR_ValueError, (const byte*)"\x96\x87\x0a" "ValueError")
+QDEF(MP_QSTR_ZeroDivisionError, (const byte*)"\xb6\x27\x11" "ZeroDivisionError")
+QDEF(MP_QSTR_None, (const byte*)"\x6f\xd1\x04" "None")
+QDEF(MP_QSTR_False, (const byte*)"\x38\x6f\x05" "False")
+QDEF(MP_QSTR_True, (const byte*)"\x13\x17\x04" "True")
+QDEF(MP_QSTR_object, (const byte*)"\x90\x8d\x06" "object")
+QDEF(MP_QSTR_NoneType, (const byte*)"\x17\x68\x08" "NoneType")
+QDEF(MP_QSTR_abs, (const byte*)"\x95\x32\x03" "abs")
+QDEF(MP_QSTR_all, (const byte*)"\x44\x33\x03" "all")
+QDEF(MP_QSTR_any, (const byte*)"\x13\x33\x03" "any")
+QDEF(MP_QSTR_args, (const byte*)"\xc2\xc6\x04" "args")
+QDEF(MP_QSTR_bin, (const byte*)"\xe0\x48\x03" "bin")
+QDEF(MP_QSTR__brace_open__colon__hash_b_brace_close_, (const byte*)"\x58\x37\x05" "{:#b}")
+QDEF(MP_QSTR_bool, (const byte*)"\xeb\x3c\x04" "bool")
+QDEF(MP_QSTR_bytes, (const byte*)"\x5c\xb2\x05" "bytes")
+QDEF(MP_QSTR_callable, (const byte*)"\x0d\x70\x08" "callable")
+QDEF(MP_QSTR_chr, (const byte*)"\xdc\x4c\x03" "chr")
+QDEF(MP_QSTR_classmethod, (const byte*)"\xb4\x8c\x0b" "classmethod")
+QDEF(MP_QSTR__collections, (const byte*)"\x3f\x35\x0c" "_collections")
+QDEF(MP_QSTR_dict, (const byte*)"\x3f\xfc\x04" "dict")
+QDEF(MP_QSTR_dir, (const byte*)"\xfa\x1e\x03" "dir")
+QDEF(MP_QSTR_divmod, (const byte*)"\xb8\x04\x06" "divmod")
+QDEF(MP_QSTR_eval, (const byte*)"\x9b\xa6\x04" "eval")
+QDEF(MP_QSTR_exec, (const byte*)"\x1e\xc0\x04" "exec")
+QDEF(MP_QSTR_from_bytes, (const byte*)"\x35\x74\x0a" "from_bytes")
+QDEF(MP_QSTR_getattr, (const byte*)"\xc0\x17\x07" "getattr")
+QDEF(MP_QSTR_setattr, (const byte*)"\xd4\xa8\x07" "setattr")
+QDEF(MP_QSTR_globals, (const byte*)"\x9d\x49\x07" "globals")
+QDEF(MP_QSTR_hasattr, (const byte*)"\x8c\xb0\x07" "hasattr")
+QDEF(MP_QSTR_hash, (const byte*)"\xb7\x70\x04" "hash")
+QDEF(MP_QSTR_hex, (const byte*)"\x70\x50\x03" "hex")
+QDEF(MP_QSTR__percent__hash_x, (const byte*)"\x7b\x1a\x03" "%#x")
+QDEF(MP_QSTR_id, (const byte*)"\x28\x6f\x02" "id")
+QDEF(MP_QSTR_int, (const byte*)"\x16\x53\x03" "int")
+QDEF(MP_QSTR_isinstance, (const byte*)"\xb6\xbe\x0a" "isinstance")
+QDEF(MP_QSTR_issubclass, (const byte*)"\xb5\x7f\x0a" "issubclass")
+QDEF(MP_QSTR_iter, (const byte*)"\x8f\x21\x04" "iter")
+QDEF(MP_QSTR_len, (const byte*)"\x62\x40\x03" "len")
+QDEF(MP_QSTR_list, (const byte*)"\x27\x1d\x04" "list")
+QDEF(MP_QSTR_locals, (const byte*)"\x3b\xa1\x06" "locals")
+QDEF(MP_QSTR_map, (const byte*)"\xb9\x43\x03" "map")
+QDEF(MP_QSTR_namedtuple, (const byte*)"\x1e\x16\x0a" "namedtuple")
+QDEF(MP_QSTR_next, (const byte*)"\x42\x88\x04" "next")
+QDEF(MP_QSTR_oct, (const byte*)"\xfd\x5c\x03" "oct")
+QDEF(MP_QSTR__percent__hash_o, (const byte*)"\x6c\x1a\x03" "%#o")
+QDEF(MP_QSTR_open, (const byte*)"\xd1\x3a\x04" "open")
+QDEF(MP_QSTR_ord, (const byte*)"\x1c\x5e\x03" "ord")
+QDEF(MP_QSTR_path, (const byte*)"\x88\xce\x04" "path")
+QDEF(MP_QSTR_pow, (const byte*)"\x2d\x73\x03" "pow")
+QDEF(MP_QSTR_print, (const byte*)"\x54\xc6\x05" "print")
+QDEF(MP_QSTR_range, (const byte*)"\x1a\x5e\x05" "range")
+QDEF(MP_QSTR_read, (const byte*)"\xb7\xf9\x04" "read")
+QDEF(MP_QSTR_repr, (const byte*)"\xd0\xf7\x04" "repr")
+QDEF(MP_QSTR_reversed, (const byte*)"\xa1\x6e\x08" "reversed")
+QDEF(MP_QSTR_round, (const byte*)"\xe7\x25\x05" "round")
+QDEF(MP_QSTR_sorted, (const byte*)"\x5e\x15\x06" "sorted")
+QDEF(MP_QSTR_staticmethod, (const byte*)"\x62\xaf\x0c" "staticmethod")
+QDEF(MP_QSTR_sum, (const byte*)"\x2e\x8d\x03" "sum")
+QDEF(MP_QSTR_super, (const byte*)"\xc4\xb2\x05" "super")
+QDEF(MP_QSTR_str, (const byte*)"\x50\x8d\x03" "str")
+QDEF(MP_QSTR_sys, (const byte*)"\xbc\x8e\x03" "sys")
+QDEF(MP_QSTR_to_bytes, (const byte*)"\xd8\x3e\x08" "to_bytes")
+QDEF(MP_QSTR_tuple, (const byte*)"\xfd\x41\x05" "tuple")
+QDEF(MP_QSTR_type, (const byte*)"\x9d\x7f\x04" "type")
+QDEF(MP_QSTR_value, (const byte*)"\x4e\x34\x05" "value")
+QDEF(MP_QSTR_write, (const byte*)"\x98\xa8\x05" "write")
+QDEF(MP_QSTR_zip, (const byte*)"\xe6\xac\x03" "zip")
+QDEF(MP_QSTR_sep, (const byte*)"\x23\x8f\x03" "sep")
+QDEF(MP_QSTR_end, (const byte*)"\x0a\x23\x03" "end")
+QDEF(MP_QSTR_step, (const byte*)"\x57\x36\x04" "step")
+QDEF(MP_QSTR_stop, (const byte*)"\x9d\x36\x04" "stop")
+QDEF(MP_QSTR_clear, (const byte*)"\x7c\xa0\x05" "clear")
+QDEF(MP_QSTR_copy, (const byte*)"\xe0\xdb\x04" "copy")
+QDEF(MP_QSTR_fromkeys, (const byte*)"\x37\xbd\x08" "fromkeys")
+QDEF(MP_QSTR_get, (const byte*)"\x33\x3b\x03" "get")
+QDEF(MP_QSTR_items, (const byte*)"\xe3\x53\x05" "items")
+QDEF(MP_QSTR_keys, (const byte*)"\x01\x13\x04" "keys")
+QDEF(MP_QSTR_pop, (const byte*)"\x2a\x73\x03" "pop")
+QDEF(MP_QSTR_popitem, (const byte*)"\xbf\x2c\x07" "popitem")
+QDEF(MP_QSTR_setdefault, (const byte*)"\x6c\xa3\x0a" "setdefault")
+QDEF(MP_QSTR_update, (const byte*)"\xb4\x76\x06" "update")
+QDEF(MP_QSTR_values, (const byte*)"\x7d\xbe\x06" "values")
+QDEF(MP_QSTR_append, (const byte*)"\x6b\x97\x06" "append")
+QDEF(MP_QSTR_close, (const byte*)"\x33\x67\x05" "close")
+QDEF(MP_QSTR_send, (const byte*)"\xb9\x76\x04" "send")
+QDEF(MP_QSTR_throw, (const byte*)"\xb3\x44\x05" "throw")
+QDEF(MP_QSTR_count, (const byte*)"\xa6\x4d\x05" "count")
+QDEF(MP_QSTR_extend, (const byte*)"\x63\xe8\x06" "extend")
+QDEF(MP_QSTR_index, (const byte*)"\x7b\x28\x05" "index")
+QDEF(MP_QSTR_remove, (const byte*)"\x63\x8a\x06" "remove")
+QDEF(MP_QSTR_insert, (const byte*)"\x12\x54\x06" "insert")
+QDEF(MP_QSTR_sort, (const byte*)"\xbf\x9d\x04" "sort")
+QDEF(MP_QSTR_join, (const byte*)"\xa7\x5c\x04" "join")
+QDEF(MP_QSTR_strip, (const byte*)"\x29\x1e\x05" "strip")
+QDEF(MP_QSTR_lstrip, (const byte*)"\xe5\xb9\x06" "lstrip")
+QDEF(MP_QSTR_rstrip, (const byte*)"\x3b\x95\x06" "rstrip")
+QDEF(MP_QSTR_format, (const byte*)"\x26\x33\x06" "format")
+QDEF(MP_QSTR_key, (const byte*)"\x32\x6d\x03" "key")
+QDEF(MP_QSTR_reverse, (const byte*)"\x25\x2a\x07" "reverse")
+QDEF(MP_QSTR_add, (const byte*)"\x44\x32\x03" "add")
+QDEF(MP_QSTR_find, (const byte*)"\x00\x34\x04" "find")
+QDEF(MP_QSTR_rfind, (const byte*)"\xd2\x9c\x05" "rfind")
+QDEF(MP_QSTR_rindex, (const byte*)"\xe9\x2b\x06" "rindex")
+QDEF(MP_QSTR_split, (const byte*)"\xb7\x33\x05" "split")
+QDEF(MP_QSTR_rsplit, (const byte*)"\xa5\x00\x06" "rsplit")
+QDEF(MP_QSTR_startswith, (const byte*)"\x74\xe8\x0a" "startswith")
+QDEF(MP_QSTR_endswith, (const byte*)"\x1b\xa3\x08" "endswith")
+QDEF(MP_QSTR_replace, (const byte*)"\x49\x25\x07" "replace")
+QDEF(MP_QSTR_partition, (const byte*)"\x87\xe5\x09" "partition")
+QDEF(MP_QSTR_rpartition, (const byte*)"\x15\xd0\x0a" "rpartition")
+QDEF(MP_QSTR_lower, (const byte*)"\xc6\xcb\x05" "lower")
+QDEF(MP_QSTR_upper, (const byte*)"\x27\x94\x05" "upper")
+QDEF(MP_QSTR_isspace, (const byte*)"\x5b\xf8\x07" "isspace")
+QDEF(MP_QSTR_isalpha, (const byte*)"\xeb\x37\x07" "isalpha")
+QDEF(MP_QSTR_isdigit, (const byte*)"\xa8\x9a\x07" "isdigit")
+QDEF(MP_QSTR_isupper, (const byte*)"\xdd\xa7\x07" "isupper")
+QDEF(MP_QSTR_islower, (const byte*)"\xfc\x80\x07" "islower")
+QDEF(MP_QSTR_iterable, (const byte*)"\x25\x92\x08" "iterable")
+QDEF(MP_QSTR_start, (const byte*)"\x85\xef\x05" "start")
+QDEF(MP_QSTR_bound_method, (const byte*)"\x97\xa2\x0c" "bound_method")
+QDEF(MP_QSTR_closure, (const byte*)"\x74\xca\x07" "closure")
+QDEF(MP_QSTR_dict_view, (const byte*)"\x2d\xa9\x09" "dict_view")
+QDEF(MP_QSTR_function, (const byte*)"\x27\x02\x08" "function")
+QDEF(MP_QSTR_generator, (const byte*)"\x96\xc3\x09" "generator")
+QDEF(MP_QSTR_iterator, (const byte*)"\x47\xbe\x08" "iterator")
+QDEF(MP_QSTR_module, (const byte*)"\xbf\x99\x06" "module")
+QDEF(MP_QSTR_slice, (const byte*)"\xb5\xf4\x05" "slice")
+QDEF(MP_QSTR_maximum_space_recursion_space_depth_space_exceeded, (const byte*)"\x73\x1e\x20" "maximum recursion depth exceeded")
+QDEF(MP_QSTR__lt_module_gt_, (const byte*)"\xbd\x94\x08" "<module>")
+QDEF(MP_QSTR__lt_lambda_gt_, (const byte*)"\x80\x8c\x08" "<lambda>")
+QDEF(MP_QSTR__lt_listcomp_gt_, (const byte*)"\xd4\x15\x0a" "<listcomp>")
+QDEF(MP_QSTR__lt_dictcomp_gt_, (const byte*)"\xcc\x8d\x0a" "<dictcomp>")
+QDEF(MP_QSTR__lt_setcomp_gt_, (const byte*)"\x54\x51\x09" "<setcomp>")
+QDEF(MP_QSTR__lt_genexpr_gt_, (const byte*)"\x34\x6a\x09" "<genexpr>")
+QDEF(MP_QSTR__lt_string_gt_, (const byte*)"\x52\x53\x08" "<string>")
+QDEF(MP_QSTR__lt_stdin_gt_, (const byte*)"\xe3\x63\x07" "<stdin>")
+QDEF(MP_QSTR_argv, (const byte*)"\xc7\xc6\x04" "argv")
+QDEF(MP_QSTR_byteorder, (const byte*)"\x61\x99\x09" "byteorder")
+QDEF(MP_QSTR_big, (const byte*)"\xe9\x48\x03" "big")
+QDEF(MP_QSTR_exit, (const byte*)"\x85\xbe\x04" "exit")
+QDEF(MP_QSTR_little, (const byte*)"\x89\x6a\x06" "little")
+QDEF(MP_QSTR_stdin, (const byte*)"\x21\x04\x05" "stdin")
+QDEF(MP_QSTR_stdout, (const byte*)"\x08\x83\x06" "stdout")
+QDEF(MP_QSTR_stderr, (const byte*)"\xa3\x58\x06" "stderr")
+QDEF(MP_QSTR_version, (const byte*)"\xbf\xd3\x07" "version")
+QDEF(MP_QSTR_version_info, (const byte*)"\x6e\x0a\x0c" "version_info")
+QDEF(MP_QSTR_implementation, (const byte*)"\x17\x2d\x0e" "implementation")
+QDEF(MP_QSTR_modules, (const byte*)"\xec\xd1\x07" "modules")
+QDEF(MP_QSTR_print_exception, (const byte*)"\x1c\x22\x0f" "print_exception")
+QDEF(MP_QSTR_struct, (const byte*)"\x12\x90\x06" "struct")
+QDEF(MP_QSTR_ustruct, (const byte*)"\x47\x08\x07" "ustruct")
+QDEF(MP_QSTR_pack, (const byte*)"\xbc\xd1\x04" "pack")
+QDEF(MP_QSTR_pack_into, (const byte*)"\x1f\xa9\x09" "pack_into")
+QDEF(MP_QSTR_unpack, (const byte*)"\x07\x3c\x06" "unpack")
+QDEF(MP_QSTR_unpack_from, (const byte*)"\x0e\x6d\x0b" "unpack_from")
+QDEF(MP_QSTR_calcsize, (const byte*)"\x4d\x38\x08" "calcsize")
+QDEF(MP_QSTR_umachine, (const byte*)"\x95\x7f\x08" "umachine")
+QDEF(MP_QSTR_mem, (const byte*)"\x20\x44\x03" "mem")
+QDEF(MP_QSTR_mem8, (const byte*)"\x18\xc8\x04" "mem8")
+QDEF(MP_QSTR_mem16, (const byte*)"\x07\xca\x05" "mem16")
+QDEF(MP_QSTR_mem32, (const byte*)"\x41\xca\x05" "mem32")
+QDEF(MP_QSTR_machine, (const byte*)"\x60\xab\x07" "machine")
+QDEF(MP_QSTR_reset, (const byte*)"\x10\xf4\x05" "reset")
+QDEF(MP_QSTR_mbed, (const byte*)"\x0b\xb5\x04" "mbed")
+QDEF(MP_QSTR_DigitalOut, (const byte*)"\x11\xcb\x0a" "DigitalOut")
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/mp-readline/readline.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,447 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "py/mpstate.h"
+#include "py/repl.h"
+#include "py/mphal.h"
+#include "readline.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#define READLINE_HIST_SIZE (MP_ARRAY_SIZE(MP_STATE_PORT(readline_hist)))
+
+enum { ESEQ_NONE, ESEQ_ESC, ESEQ_ESC_BRACKET, ESEQ_ESC_BRACKET_DIGIT, ESEQ_ESC_O };
+
+void readline_init0(void) {
+ memset(MP_STATE_PORT(readline_hist), 0, READLINE_HIST_SIZE * sizeof(const char*));
+}
+
+STATIC char *str_dup_maybe(const char *str) {
+ uint32_t len = strlen(str);
+ char *s2 = m_new_maybe(char, len + 1);
+ if (s2 == NULL) {
+ return NULL;
+ }
+ memcpy(s2, str, len + 1);
+ return s2;
+}
+
+// By default assume terminal which implements VT100 commands...
+#ifndef MICROPY_HAL_HAS_VT100
+#define MICROPY_HAL_HAS_VT100 (1)
+#endif
+
+// ...and provide the implementation using them
+#if MICROPY_HAL_HAS_VT100
+STATIC void mp_hal_move_cursor_back(uint pos) {
+ if (pos <= 4) {
+ // fast path for most common case of 1 step back
+ mp_hal_stdout_tx_strn("\b\b\b\b", pos);
+ } else {
+ char vt100_command[6];
+ // snprintf needs space for the terminating null character
+ int n = snprintf(&vt100_command[0], sizeof(vt100_command), "\x1b[%u", pos);
+ if (n > 0) {
+ vt100_command[n] = 'D'; // replace null char
+ mp_hal_stdout_tx_strn(vt100_command, n + 1);
+ }
+ }
+}
+
+STATIC void mp_hal_erase_line_from_cursor(uint n_chars_to_erase) {
+ (void)n_chars_to_erase;
+ mp_hal_stdout_tx_strn("\x1b[K", 3);
+}
+#endif
+
+typedef struct _readline_t {
+ vstr_t *line;
+ size_t orig_line_len;
+ int escape_seq;
+ int hist_cur;
+ size_t cursor_pos;
+ char escape_seq_buf[1];
+ const char *prompt;
+} readline_t;
+
+STATIC readline_t rl;
+
+int readline_process_char(int c) {
+ size_t last_line_len = rl.line->len;
+ int redraw_step_back = 0;
+ bool redraw_from_cursor = false;
+ int redraw_step_forward = 0;
+ if (rl.escape_seq == ESEQ_NONE) {
+ if (CHAR_CTRL_A <= c && c <= CHAR_CTRL_E && vstr_len(rl.line) == rl.orig_line_len) {
+ // control character with empty line
+ return c;
+ } else if (c == CHAR_CTRL_A) {
+ // CTRL-A with non-empty line is go-to-start-of-line
+ goto home_key;
+ #if MICROPY_REPL_EMACS_KEYS
+ } else if (c == CHAR_CTRL_B) {
+ // CTRL-B with non-empty line is go-back-one-char
+ goto left_arrow_key;
+ #endif
+ } else if (c == CHAR_CTRL_C) {
+ // CTRL-C with non-empty line is cancel
+ return c;
+ #if MICROPY_REPL_EMACS_KEYS
+ } else if (c == CHAR_CTRL_D) {
+ // CTRL-D with non-empty line is delete-at-cursor
+ goto delete_key;
+ #endif
+ } else if (c == CHAR_CTRL_E) {
+ // CTRL-E is go-to-end-of-line
+ goto end_key;
+ #if MICROPY_REPL_EMACS_KEYS
+ } else if (c == CHAR_CTRL_F) {
+ // CTRL-F with non-empty line is go-forward-one-char
+ goto right_arrow_key;
+ } else if (c == CHAR_CTRL_K) {
+ // CTRL-K is kill from cursor to end-of-line, inclusive
+ vstr_cut_tail_bytes(rl.line, last_line_len - rl.cursor_pos);
+ // set redraw parameters
+ redraw_from_cursor = true;
+ } else if (c == CHAR_CTRL_N) {
+ // CTRL-N is go to next line in history
+ goto down_arrow_key;
+ } else if (c == CHAR_CTRL_P) {
+ // CTRL-P is go to previous line in history
+ goto up_arrow_key;
+ } else if (c == CHAR_CTRL_U) {
+ // CTRL-U is kill from beginning-of-line up to cursor
+ vstr_cut_out_bytes(rl.line, rl.orig_line_len, rl.cursor_pos - rl.orig_line_len);
+ // set redraw parameters
+ redraw_step_back = rl.cursor_pos - rl.orig_line_len;
+ redraw_from_cursor = true;
+ #endif
+ } else if (c == '\r') {
+ // newline
+ mp_hal_stdout_tx_str("\r\n");
+ readline_push_history(vstr_null_terminated_str(rl.line) + rl.orig_line_len);
+ return 0;
+ } else if (c == 27) {
+ // escape sequence
+ rl.escape_seq = ESEQ_ESC;
+ } else if (c == 8 || c == 127) {
+ // backspace/delete
+ if (rl.cursor_pos > rl.orig_line_len) {
+ // work out how many chars to backspace
+ #if MICROPY_REPL_AUTO_INDENT
+ int nspace = 0;
+ for (size_t i = rl.orig_line_len; i < rl.cursor_pos; i++) {
+ if (rl.line->buf[i] != ' ') {
+ nspace = 0;
+ break;
+ }
+ nspace += 1;
+ }
+ if (nspace < 4) {
+ nspace = 1;
+ } else {
+ nspace = 4;
+ }
+ #else
+ int nspace = 1;
+ #endif
+
+ // do the backspace
+ vstr_cut_out_bytes(rl.line, rl.cursor_pos - nspace, nspace);
+ // set redraw parameters
+ redraw_step_back = nspace;
+ redraw_from_cursor = true;
+ }
+ #if MICROPY_HELPER_REPL
+ } else if (c == 9) {
+ // tab magic
+ const char *compl_str;
+ mp_uint_t compl_len = mp_repl_autocomplete(rl.line->buf + rl.orig_line_len, rl.cursor_pos - rl.orig_line_len, &mp_plat_print, &compl_str);
+ if (compl_len == 0) {
+ // no match
+ } else if (compl_len == (mp_uint_t)(-1)) {
+ // many matches
+ mp_hal_stdout_tx_str(rl.prompt);
+ mp_hal_stdout_tx_strn(rl.line->buf + rl.orig_line_len, rl.cursor_pos - rl.orig_line_len);
+ redraw_from_cursor = true;
+ } else {
+ // one match
+ for (mp_uint_t i = 0; i < compl_len; ++i) {
+ vstr_ins_byte(rl.line, rl.cursor_pos + i, *compl_str++);
+ }
+ // set redraw parameters
+ redraw_from_cursor = true;
+ redraw_step_forward = compl_len;
+ }
+ #endif
+ } else if (32 <= c && c <= 126) {
+ // printable character
+ vstr_ins_char(rl.line, rl.cursor_pos, c);
+ // set redraw parameters
+ redraw_from_cursor = true;
+ redraw_step_forward = 1;
+ }
+ } else if (rl.escape_seq == ESEQ_ESC) {
+ switch (c) {
+ case '[':
+ rl.escape_seq = ESEQ_ESC_BRACKET;
+ break;
+ case 'O':
+ rl.escape_seq = ESEQ_ESC_O;
+ break;
+ default:
+ DEBUG_printf("(ESC %d)", c);
+ rl.escape_seq = ESEQ_NONE;
+ }
+ } else if (rl.escape_seq == ESEQ_ESC_BRACKET) {
+ if ('0' <= c && c <= '9') {
+ rl.escape_seq = ESEQ_ESC_BRACKET_DIGIT;
+ rl.escape_seq_buf[0] = c;
+ } else {
+ rl.escape_seq = ESEQ_NONE;
+ if (c == 'A') {
+#if MICROPY_REPL_EMACS_KEYS
+up_arrow_key:
+#endif
+ // up arrow
+ if (rl.hist_cur + 1 < (int)READLINE_HIST_SIZE && MP_STATE_PORT(readline_hist)[rl.hist_cur + 1] != NULL) {
+ // increase hist num
+ rl.hist_cur += 1;
+ // set line to history
+ rl.line->len = rl.orig_line_len;
+ vstr_add_str(rl.line, MP_STATE_PORT(readline_hist)[rl.hist_cur]);
+ // set redraw parameters
+ redraw_step_back = rl.cursor_pos - rl.orig_line_len;
+ redraw_from_cursor = true;
+ redraw_step_forward = rl.line->len - rl.orig_line_len;
+ }
+ } else if (c == 'B') {
+#if MICROPY_REPL_EMACS_KEYS
+down_arrow_key:
+#endif
+ // down arrow
+ if (rl.hist_cur >= 0) {
+ // decrease hist num
+ rl.hist_cur -= 1;
+ // set line to history
+ vstr_cut_tail_bytes(rl.line, rl.line->len - rl.orig_line_len);
+ if (rl.hist_cur >= 0) {
+ vstr_add_str(rl.line, MP_STATE_PORT(readline_hist)[rl.hist_cur]);
+ }
+ // set redraw parameters
+ redraw_step_back = rl.cursor_pos - rl.orig_line_len;
+ redraw_from_cursor = true;
+ redraw_step_forward = rl.line->len - rl.orig_line_len;
+ }
+ } else if (c == 'C') {
+#if MICROPY_REPL_EMACS_KEYS
+right_arrow_key:
+#endif
+ // right arrow
+ if (rl.cursor_pos < rl.line->len) {
+ redraw_step_forward = 1;
+ }
+ } else if (c == 'D') {
+#if MICROPY_REPL_EMACS_KEYS
+left_arrow_key:
+#endif
+ // left arrow
+ if (rl.cursor_pos > rl.orig_line_len) {
+ redraw_step_back = 1;
+ }
+ } else if (c == 'H') {
+ // home
+ goto home_key;
+ } else if (c == 'F') {
+ // end
+ goto end_key;
+ } else {
+ DEBUG_printf("(ESC [ %d)", c);
+ }
+ }
+ } else if (rl.escape_seq == ESEQ_ESC_BRACKET_DIGIT) {
+ if (c == '~') {
+ if (rl.escape_seq_buf[0] == '1' || rl.escape_seq_buf[0] == '7') {
+home_key:
+ redraw_step_back = rl.cursor_pos - rl.orig_line_len;
+ } else if (rl.escape_seq_buf[0] == '4' || rl.escape_seq_buf[0] == '8') {
+end_key:
+ redraw_step_forward = rl.line->len - rl.cursor_pos;
+ } else if (rl.escape_seq_buf[0] == '3') {
+ // delete
+#if MICROPY_REPL_EMACS_KEYS
+delete_key:
+#endif
+ if (rl.cursor_pos < rl.line->len) {
+ vstr_cut_out_bytes(rl.line, rl.cursor_pos, 1);
+ redraw_from_cursor = true;
+ }
+ } else {
+ DEBUG_printf("(ESC [ %c %d)", rl.escape_seq_buf[0], c);
+ }
+ } else {
+ DEBUG_printf("(ESC [ %c %d)", rl.escape_seq_buf[0], c);
+ }
+ rl.escape_seq = ESEQ_NONE;
+ } else if (rl.escape_seq == ESEQ_ESC_O) {
+ switch (c) {
+ case 'H':
+ goto home_key;
+ case 'F':
+ goto end_key;
+ default:
+ DEBUG_printf("(ESC O %d)", c);
+ rl.escape_seq = ESEQ_NONE;
+ }
+ } else {
+ rl.escape_seq = ESEQ_NONE;
+ }
+
+ // redraw command prompt, efficiently
+ if (redraw_step_back > 0) {
+ mp_hal_move_cursor_back(redraw_step_back);
+ rl.cursor_pos -= redraw_step_back;
+ }
+ if (redraw_from_cursor) {
+ if (rl.line->len < last_line_len) {
+ // erase old chars
+ mp_hal_erase_line_from_cursor(last_line_len - rl.cursor_pos);
+ }
+ // draw new chars
+ mp_hal_stdout_tx_strn(rl.line->buf + rl.cursor_pos, rl.line->len - rl.cursor_pos);
+ // move cursor forward if needed (already moved forward by length of line, so move it back)
+ mp_hal_move_cursor_back(rl.line->len - (rl.cursor_pos + redraw_step_forward));
+ rl.cursor_pos += redraw_step_forward;
+ } else if (redraw_step_forward > 0) {
+ // draw over old chars to move cursor forwards
+ mp_hal_stdout_tx_strn(rl.line->buf + rl.cursor_pos, redraw_step_forward);
+ rl.cursor_pos += redraw_step_forward;
+ }
+
+ return -1;
+}
+
+#if MICROPY_REPL_AUTO_INDENT
+STATIC void readline_auto_indent(void) {
+ vstr_t *line = rl.line;
+ if (line->len > 1 && line->buf[line->len - 1] == '\n') {
+ int i;
+ for (i = line->len - 1; i > 0; i--) {
+ if (line->buf[i - 1] == '\n') {
+ break;
+ }
+ }
+ size_t j;
+ for (j = i; j < line->len; j++) {
+ if (line->buf[j] != ' ') {
+ break;
+ }
+ }
+ // i=start of line; j=first non-space
+ if (i > 0 && j + 1 == line->len) {
+ // previous line is not first line and is all spaces
+ for (size_t k = i - 1; k > 0; --k) {
+ if (line->buf[k - 1] == '\n') {
+ // don't auto-indent if last 2 lines are all spaces
+ return;
+ } else if (line->buf[k - 1] != ' ') {
+ // 2nd previous line is not all spaces
+ break;
+ }
+ }
+ }
+ int n = (j - i) / 4;
+ if (line->buf[line->len - 2] == ':') {
+ n += 1;
+ }
+ while (n-- > 0) {
+ vstr_add_strn(line, " ", 4);
+ mp_hal_stdout_tx_strn(" ", 4);
+ rl.cursor_pos += 4;
+ }
+ }
+}
+#endif
+
+void readline_note_newline(const char *prompt) {
+ rl.orig_line_len = rl.line->len;
+ rl.cursor_pos = rl.orig_line_len;
+ rl.prompt = prompt;
+ mp_hal_stdout_tx_str(prompt);
+ #if MICROPY_REPL_AUTO_INDENT
+ readline_auto_indent();
+ #endif
+}
+
+void readline_init(vstr_t *line, const char *prompt) {
+ rl.line = line;
+ rl.orig_line_len = line->len;
+ rl.escape_seq = ESEQ_NONE;
+ rl.escape_seq_buf[0] = 0;
+ rl.hist_cur = -1;
+ rl.cursor_pos = rl.orig_line_len;
+ rl.prompt = prompt;
+ mp_hal_stdout_tx_str(prompt);
+ #if MICROPY_REPL_AUTO_INDENT
+ readline_auto_indent();
+ #endif
+}
+
+int readline(vstr_t *line, const char *prompt) {
+ readline_init(line, prompt);
+ for (;;) {
+ int c = mp_hal_stdin_rx_chr();
+ int r = readline_process_char(c);
+ if (r >= 0) {
+ return r;
+ }
+ }
+}
+
+void readline_push_history(const char *line) {
+ if (line[0] != '\0'
+ && (MP_STATE_PORT(readline_hist)[0] == NULL
+ || strcmp(MP_STATE_PORT(readline_hist)[0], line) != 0)) {
+ // a line which is not empty and different from the last one
+ // so update the history
+ char *most_recent_hist = str_dup_maybe(line);
+ if (most_recent_hist != NULL) {
+ for (int i = READLINE_HIST_SIZE - 1; i > 0; i--) {
+ MP_STATE_PORT(readline_hist)[i] = MP_STATE_PORT(readline_hist)[i - 1];
+ }
+ MP_STATE_PORT(readline_hist)[0] = most_recent_hist;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/mp-readline/readline.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,44 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#define CHAR_CTRL_A (1) +#define CHAR_CTRL_B (2) +#define CHAR_CTRL_C (3) +#define CHAR_CTRL_D (4) +#define CHAR_CTRL_E (5) +#define CHAR_CTRL_F (6) +#define CHAR_CTRL_K (11) +#define CHAR_CTRL_N (14) +#define CHAR_CTRL_P (16) +#define CHAR_CTRL_U (21) + +void readline_init0(void); +int readline(vstr_t *line, const char *prompt); +void readline_push_history(const char *line); + +void readline_init(vstr_t *line, const char *prompt); +void readline_note_newline(const char *prompt); +int readline_process_char(int c);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/utils/pyexec.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,507 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/repl.h"
+#include "py/gc.h"
+#include "py/frozenmod.h"
+#include "py/mphal.h"
+#if defined(USE_DEVICE_MODE)
+#include "irq.h"
+#include "usb.h"
+#endif
+#include "readline.h"
+#include "lib/utils/pyexec.h"
+#include "genhdr/mpversion.h"
+
+pyexec_mode_kind_t pyexec_mode_kind = PYEXEC_MODE_FRIENDLY_REPL;
+STATIC bool repl_display_debugging_info = 0;
+
+#define EXEC_FLAG_PRINT_EOF (1)
+#define EXEC_FLAG_ALLOW_DEBUGGING (2)
+#define EXEC_FLAG_IS_REPL (4)
+
+// parses, compiles and executes the code in the lexer
+// frees the lexer before returning
+// EXEC_FLAG_PRINT_EOF prints 2 EOF chars: 1 after normal output, 1 after exception output
+// EXEC_FLAG_ALLOW_DEBUGGING allows debugging info to be printed after executing the code
+// EXEC_FLAG_IS_REPL is used for REPL inputs (flag passed on to mp_compile)
+STATIC int parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t input_kind, int exec_flags) {
+ int ret = 0;
+ uint32_t start = 0;
+
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ // parse and compile the script
+ qstr source_name = lex->source_name;
+ mp_parse_tree_t parse_tree = mp_parse(lex, input_kind);
+ mp_obj_t module_fun = mp_compile(&parse_tree, source_name, MP_EMIT_OPT_NONE, exec_flags & EXEC_FLAG_IS_REPL);
+
+ // execute code
+ mp_hal_set_interrupt_char(CHAR_CTRL_C); // allow ctrl-C to interrupt us
+ start = mp_hal_ticks_ms();
+ mp_call_function_0(module_fun);
+ mp_hal_set_interrupt_char(-1); // disable interrupt
+ nlr_pop();
+ ret = 1;
+ if (exec_flags & EXEC_FLAG_PRINT_EOF) {
+ mp_hal_stdout_tx_strn("\x04", 1);
+ }
+ } else {
+ // uncaught exception
+ // FIXME it could be that an interrupt happens just before we disable it here
+ mp_hal_set_interrupt_char(-1); // disable interrupt
+ // print EOF after normal output
+ if (exec_flags & EXEC_FLAG_PRINT_EOF) {
+ mp_hal_stdout_tx_strn("\x04", 1);
+ }
+ // check for SystemExit
+ if (mp_obj_is_subclass_fast(mp_obj_get_type((mp_obj_t)nlr.ret_val), &mp_type_SystemExit)) {
+ // at the moment, the value of SystemExit is unused
+ ret = PYEXEC_FORCED_EXIT;
+ } else {
+ mp_obj_print_exception(&mp_plat_print, (mp_obj_t)nlr.ret_val);
+ ret = 0;
+ }
+ }
+
+ // display debugging info if wanted
+ if ((exec_flags & EXEC_FLAG_ALLOW_DEBUGGING) && repl_display_debugging_info) {
+ mp_uint_t ticks = mp_hal_ticks_ms() - start; // TODO implement a function that does this properly
+ printf("took " UINT_FMT " ms\n", ticks);
+ gc_collect();
+ // qstr info
+ {
+ mp_uint_t n_pool, n_qstr, n_str_data_bytes, n_total_bytes;
+ qstr_pool_info(&n_pool, &n_qstr, &n_str_data_bytes, &n_total_bytes);
+ printf("qstr:\n n_pool=" UINT_FMT "\n n_qstr=" UINT_FMT "\n n_str_data_bytes=" UINT_FMT "\n n_total_bytes=" UINT_FMT "\n", n_pool, n_qstr, n_str_data_bytes, n_total_bytes);
+ }
+
+ // GC info
+ gc_dump_info();
+ }
+
+ if (exec_flags & EXEC_FLAG_PRINT_EOF) {
+ mp_hal_stdout_tx_strn("\x04", 1);
+ }
+
+ return ret;
+}
+
+#if MICROPY_REPL_EVENT_DRIVEN
+
+typedef struct _repl_t {
+ // This structure originally also held current REPL line,
+ // but it was moved to MP_STATE_VM(repl_line) as containing
+ // root pointer. Still keep structure in case more state
+ // will be added later.
+ //vstr_t line;
+ bool cont_line;
+} repl_t;
+
+repl_t repl;
+
+STATIC int pyexec_raw_repl_process_char(int c);
+STATIC int pyexec_friendly_repl_process_char(int c);
+
+void pyexec_event_repl_init(void) {
+ MP_STATE_VM(repl_line) = vstr_new_size(32);
+ repl.cont_line = false;
+ readline_init(MP_STATE_VM(repl_line), ">>> ");
+ if (pyexec_mode_kind == PYEXEC_MODE_RAW_REPL) {
+ pyexec_raw_repl_process_char(CHAR_CTRL_A);
+ } else {
+ pyexec_friendly_repl_process_char(CHAR_CTRL_B);
+ }
+}
+
+STATIC int pyexec_raw_repl_process_char(int c) {
+ if (c == CHAR_CTRL_A) {
+ // reset raw REPL
+ mp_hal_stdout_tx_str("raw REPL; CTRL-B to exit\r\n");
+ goto reset;
+ } else if (c == CHAR_CTRL_B) {
+ // change to friendly REPL
+ pyexec_mode_kind = PYEXEC_MODE_FRIENDLY_REPL;
+ repl.cont_line = false;
+ pyexec_friendly_repl_process_char(CHAR_CTRL_B);
+ return 0;
+ } else if (c == CHAR_CTRL_C) {
+ // clear line
+ vstr_reset(MP_STATE_VM(repl_line));
+ return 0;
+ } else if (c == CHAR_CTRL_D) {
+ // input finished
+ } else {
+ // let through any other raw 8-bit value
+ vstr_add_byte(MP_STATE_VM(repl_line), c);
+ return 0;
+ }
+
+ // indicate reception of command
+ mp_hal_stdout_tx_str("OK");
+
+ if (MP_STATE_VM(repl_line)->len == 0) {
+ // exit for a soft reset
+ mp_hal_stdout_tx_str("\r\n");
+ vstr_clear(MP_STATE_VM(repl_line));
+ return PYEXEC_FORCED_EXIT;
+ }
+
+ mp_lexer_t *lex = mp_lexer_new_from_str_len(MP_QSTR__lt_stdin_gt_, MP_STATE_VM(repl_line)->buf, MP_STATE_VM(repl_line)->len, 0);
+ if (lex == NULL) {
+ mp_hal_stdout_tx_str("\x04MemoryError\r\n\x04");
+ } else {
+ int ret = parse_compile_execute(lex, MP_PARSE_FILE_INPUT, EXEC_FLAG_PRINT_EOF);
+ if (ret & PYEXEC_FORCED_EXIT) {
+ return ret;
+ }
+ }
+
+reset:
+ vstr_reset(MP_STATE_VM(repl_line));
+ mp_hal_stdout_tx_str(">");
+
+ return 0;
+}
+
+STATIC int pyexec_friendly_repl_process_char(int c) {
+ int ret = readline_process_char(c);
+
+ if (!repl.cont_line) {
+
+ if (ret == CHAR_CTRL_A) {
+ // change to raw REPL
+ pyexec_mode_kind = PYEXEC_MODE_RAW_REPL;
+ mp_hal_stdout_tx_str("\r\n");
+ pyexec_raw_repl_process_char(CHAR_CTRL_A);
+ return 0;
+ } else if (ret == CHAR_CTRL_B) {
+ // reset friendly REPL
+ mp_hal_stdout_tx_str("\r\n");
+ mp_hal_stdout_tx_str("MicroPython " MICROPY_GIT_TAG " on " MICROPY_BUILD_DATE "; " MICROPY_HW_BOARD_NAME " with " MICROPY_HW_MCU_NAME "\r\n");
+ mp_hal_stdout_tx_str("Type \"help()\" for more information.\r\n");
+ goto input_restart;
+ } else if (ret == CHAR_CTRL_C) {
+ // break
+ mp_hal_stdout_tx_str("\r\n");
+ goto input_restart;
+ } else if (ret == CHAR_CTRL_D) {
+ // exit for a soft reset
+ mp_hal_stdout_tx_str("\r\n");
+ vstr_clear(MP_STATE_VM(repl_line));
+ return PYEXEC_FORCED_EXIT;
+ }
+
+ if (ret < 0) {
+ return 0;
+ }
+
+ if (!mp_repl_continue_with_input(vstr_null_terminated_str(MP_STATE_VM(repl_line)))) {
+ goto exec;
+ }
+
+ vstr_add_byte(MP_STATE_VM(repl_line), '\n');
+ repl.cont_line = true;
+ readline_note_newline("... ");
+ return 0;
+
+ } else {
+
+ if (ret == CHAR_CTRL_C) {
+ // cancel everything
+ mp_hal_stdout_tx_str("\r\n");
+ repl.cont_line = false;
+ goto input_restart;
+ } else if (ret == CHAR_CTRL_D) {
+ // stop entering compound statement
+ goto exec;
+ }
+
+ if (ret < 0) {
+ return 0;
+ }
+
+ if (mp_repl_continue_with_input(vstr_null_terminated_str(MP_STATE_VM(repl_line)))) {
+ vstr_add_byte(MP_STATE_VM(repl_line), '\n');
+ readline_note_newline("... ");
+ return 0;
+ }
+
+exec: ;
+ mp_lexer_t *lex = mp_lexer_new_from_str_len(MP_QSTR__lt_stdin_gt_, vstr_str(MP_STATE_VM(repl_line)), vstr_len(MP_STATE_VM(repl_line)), 0);
+ if (lex == NULL) {
+ printf("MemoryError\n");
+ } else {
+ int ret = parse_compile_execute(lex, MP_PARSE_SINGLE_INPUT, EXEC_FLAG_ALLOW_DEBUGGING | EXEC_FLAG_IS_REPL);
+ if (ret & PYEXEC_FORCED_EXIT) {
+ return ret;
+ }
+ }
+
+input_restart:
+ vstr_reset(MP_STATE_VM(repl_line));
+ repl.cont_line = false;
+ readline_init(MP_STATE_VM(repl_line), ">>> ");
+ return 0;
+ }
+}
+
+uint8_t pyexec_repl_active;
+int pyexec_event_repl_process_char(int c) {
+ pyexec_repl_active = 1;
+ int res;
+ if (pyexec_mode_kind == PYEXEC_MODE_RAW_REPL) {
+ res = pyexec_raw_repl_process_char(c);
+ } else {
+ res = pyexec_friendly_repl_process_char(c);
+ }
+ pyexec_repl_active = 0;
+ return res;
+}
+
+#else // MICROPY_REPL_EVENT_DRIVEN
+
+int pyexec_raw_repl(void) {
+ vstr_t line;
+ vstr_init(&line, 32);
+
+raw_repl_reset:
+ mp_hal_stdout_tx_str("raw REPL; CTRL-B to exit\r\n");
+
+ for (;;) {
+ vstr_reset(&line);
+ mp_hal_stdout_tx_str(">");
+ for (;;) {
+ int c = mp_hal_stdin_rx_chr();
+ if (c == CHAR_CTRL_A) {
+ // reset raw REPL
+ goto raw_repl_reset;
+ } else if (c == CHAR_CTRL_B) {
+ // change to friendly REPL
+ mp_hal_stdout_tx_str("\r\n");
+ vstr_clear(&line);
+ pyexec_mode_kind = PYEXEC_MODE_FRIENDLY_REPL;
+ return 0;
+ } else if (c == CHAR_CTRL_C) {
+ // clear line
+ vstr_reset(&line);
+ } else if (c == CHAR_CTRL_D) {
+ // input finished
+ break;
+ } else {
+ // let through any other raw 8-bit value
+ vstr_add_byte(&line, c);
+ }
+ }
+
+ // indicate reception of command
+ mp_hal_stdout_tx_str("OK");
+
+ if (line.len == 0) {
+ // exit for a soft reset
+ mp_hal_stdout_tx_str("\r\n");
+ vstr_clear(&line);
+ return PYEXEC_FORCED_EXIT;
+ }
+
+ mp_lexer_t *lex = mp_lexer_new_from_str_len(MP_QSTR__lt_stdin_gt_, line.buf, line.len, 0);
+ if (lex == NULL) {
+ printf("\x04MemoryError\n\x04");
+ } else {
+ int ret = parse_compile_execute(lex, MP_PARSE_FILE_INPUT, EXEC_FLAG_PRINT_EOF);
+ if (ret & PYEXEC_FORCED_EXIT) {
+ return ret;
+ }
+ }
+ }
+}
+
+int pyexec_friendly_repl(void) {
+ vstr_t line;
+ vstr_init(&line, 32);
+
+#if defined(USE_HOST_MODE) && MICROPY_HW_HAS_LCD
+ // in host mode, we enable the LCD for the repl
+ mp_obj_t lcd_o = mp_call_function_0(mp_load_name(qstr_from_str("LCD")));
+ mp_call_function_1(mp_load_attr(lcd_o, qstr_from_str("light")), mp_const_true);
+#endif
+
+friendly_repl_reset:
+ mp_hal_stdout_tx_str("MicroPython " MICROPY_GIT_TAG " on " MICROPY_BUILD_DATE "; " MICROPY_HW_BOARD_NAME " with " MICROPY_HW_MCU_NAME "\r\n");
+ mp_hal_stdout_tx_str("Type \"help()\" for more information.\r\n");
+
+ // to test ctrl-C
+ /*
+ {
+ uint32_t x[4] = {0x424242, 0xdeaddead, 0x242424, 0xdeadbeef};
+ for (;;) {
+ nlr_buf_t nlr;
+ printf("pyexec_repl: %p\n", x);
+ mp_hal_set_interrupt_char(CHAR_CTRL_C);
+ if (nlr_push(&nlr) == 0) {
+ for (;;) {
+ }
+ } else {
+ printf("break\n");
+ }
+ }
+ }
+ */
+
+ for (;;) {
+ input_restart:
+
+ #if defined(USE_DEVICE_MODE)
+ if (usb_vcp_is_enabled()) {
+ // If the user gets to here and interrupts are disabled then
+ // they'll never see the prompt, traceback etc. The USB REPL needs
+ // interrupts to be enabled or no transfers occur. So we try to
+ // do the user a favor and reenable interrupts.
+ if (query_irq() == IRQ_STATE_DISABLED) {
+ enable_irq(IRQ_STATE_ENABLED);
+ mp_hal_stdout_tx_str("PYB: enabling IRQs\r\n");
+ }
+ }
+ #endif
+
+ vstr_reset(&line);
+ int ret = readline(&line, ">>> ");
+ mp_parse_input_kind_t parse_input_kind = MP_PARSE_SINGLE_INPUT;
+
+ if (ret == CHAR_CTRL_A) {
+ // change to raw REPL
+ mp_hal_stdout_tx_str("\r\n");
+ vstr_clear(&line);
+ pyexec_mode_kind = PYEXEC_MODE_RAW_REPL;
+ return 0;
+ } else if (ret == CHAR_CTRL_B) {
+ // reset friendly REPL
+ mp_hal_stdout_tx_str("\r\n");
+ goto friendly_repl_reset;
+ } else if (ret == CHAR_CTRL_C) {
+ // break
+ mp_hal_stdout_tx_str("\r\n");
+ continue;
+ } else if (ret == CHAR_CTRL_D) {
+ // exit for a soft reset
+ mp_hal_stdout_tx_str("\r\n");
+ vstr_clear(&line);
+ return PYEXEC_FORCED_EXIT;
+ } else if (ret == CHAR_CTRL_E) {
+ // paste mode
+ mp_hal_stdout_tx_str("\r\npaste mode; Ctrl-C to cancel, Ctrl-D to finish\r\n=== ");
+ vstr_reset(&line);
+ for (;;) {
+ char c = mp_hal_stdin_rx_chr();
+ if (c == CHAR_CTRL_C) {
+ // cancel everything
+ mp_hal_stdout_tx_str("\r\n");
+ goto input_restart;
+ } else if (c == CHAR_CTRL_D) {
+ // end of input
+ mp_hal_stdout_tx_str("\r\n");
+ break;
+ } else {
+ // add char to buffer and echo
+ vstr_add_byte(&line, c);
+ if (c == '\r') {
+ mp_hal_stdout_tx_str("\r\n=== ");
+ } else {
+ mp_hal_stdout_tx_strn(&c, 1);
+ }
+ }
+ }
+ parse_input_kind = MP_PARSE_FILE_INPUT;
+ } else if (vstr_len(&line) == 0) {
+ continue;
+ } else {
+ // got a line with non-zero length, see if it needs continuing
+ while (mp_repl_continue_with_input(vstr_null_terminated_str(&line))) {
+ vstr_add_byte(&line, '\n');
+ ret = readline(&line, "... ");
+ if (ret == CHAR_CTRL_C) {
+ // cancel everything
+ mp_hal_stdout_tx_str("\r\n");
+ goto input_restart;
+ } else if (ret == CHAR_CTRL_D) {
+ // stop entering compound statement
+ break;
+ }
+ }
+ }
+
+ mp_lexer_t *lex = mp_lexer_new_from_str_len(MP_QSTR__lt_stdin_gt_, vstr_str(&line), vstr_len(&line), 0);
+ if (lex == NULL) {
+ printf("MemoryError\n");
+ } else {
+ ret = parse_compile_execute(lex, parse_input_kind, EXEC_FLAG_ALLOW_DEBUGGING | EXEC_FLAG_IS_REPL);
+ if (ret & PYEXEC_FORCED_EXIT) {
+ return ret;
+ }
+ }
+ }
+}
+
+#endif // MICROPY_REPL_EVENT_DRIVEN
+
+int pyexec_file(const char *filename) {
+ mp_lexer_t *lex = mp_lexer_new_from_file(filename);
+
+ if (lex == NULL) {
+ printf("could not open file '%s' for reading\n", filename);
+ return false;
+ }
+
+ return parse_compile_execute(lex, MP_PARSE_FILE_INPUT, 0);
+}
+
+#if MICROPY_MODULE_FROZEN
+int pyexec_frozen_module(const char *name) {
+ mp_lexer_t *lex = mp_find_frozen_module(name, strlen(name));
+
+ if (lex == NULL) {
+ printf("could not find module '%s'\n", name);
+ return false;
+ }
+
+ return parse_compile_execute(lex, MP_PARSE_FILE_INPUT, 0);
+}
+#endif
+
+mp_obj_t pyb_set_repl_info(mp_obj_t o_value) {
+ repl_display_debugging_info = mp_obj_get_int(o_value);
+ return mp_const_none;
+}
+
+MP_DEFINE_CONST_FUN_OBJ_1(pyb_set_repl_info_obj, pyb_set_repl_info);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/utils/pyexec.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,49 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_LIB_UTILS_PYEXEC_H__
+#define __MICROPY_INCLUDED_LIB_UTILS_PYEXEC_H__
+
+typedef enum {
+ PYEXEC_MODE_RAW_REPL,
+ PYEXEC_MODE_FRIENDLY_REPL,
+} pyexec_mode_kind_t;
+
+extern pyexec_mode_kind_t pyexec_mode_kind;
+
+#define PYEXEC_FORCED_EXIT (0x100)
+#define PYEXEC_SWITCH_MODE (0x200)
+
+int pyexec_raw_repl(void);
+int pyexec_friendly_repl(void);
+int pyexec_file(const char *filename);
+int pyexec_frozen_module(const char *name);
+void pyexec_event_repl_init(void);
+int pyexec_event_repl_process_char(int c);
+extern uint8_t pyexec_repl_active;
+
+MP_DECLARE_CONST_FUN_OBJ(pyb_set_repl_info_obj);
+
+#endif // __MICROPY_INCLUDED_LIB_UTILS_PYEXEC_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/modmachine.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,65 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ * Copyright (c) 2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include "py/obj.h"
+#include "py/runtime.h"
+#include "extmod/machine_mem.h"
+
+#if MICROPY_PY_MACHINE
+
+#include "cmsis.h"
+
+/** \brief Reset the board.
+ *
+ * Reset in a manner similar to pushing the external RESET button.
+ */
+STATIC mp_obj_t machine_reset(void) {
+ // Part of CMSIS API.
+ NVIC_SystemReset();
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(machine_reset_obj, machine_reset);
+
+STATIC const mp_rom_map_elem_t machine_module_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_umachine) },
+ { MP_ROM_QSTR(MP_QSTR_reset), MP_ROM_PTR(&machine_reset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem8), MP_ROM_PTR(&machine_mem8_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem16), MP_ROM_PTR(&machine_mem16_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem32), MP_ROM_PTR(&machine_mem32_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(machine_module_globals, machine_module_globals_table);
+
+const mp_obj_module_t mp_module_machine = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_umachine,
+ .globals = (mp_obj_dict_t*)&machine_module_globals,
+};
+
+#endif // MICROPY_PY_MACHINE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/modmbed.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,106 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Colin Hogben
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+// High-level mbed objects
+
+#include "py/runtime.h"
+
+// Only if selected in mpconfigport.h
+#if MICROPY_PY_MBED
+
+#include "modmbed_i.h"
+
+#if MICROPY_MBED_DIGITALOUT
+//-----------------------------------------------------------------------
+// DigitalOut class
+//-----------------------------------------------------------------------
+STATIC const mp_obj_type_t mbed_DigitalOut_type;
+
+typedef struct _mbed_DigitalOut_obj_t {
+ mp_obj_base_t base;
+ void *dout;
+} mbed_DigitalOut_obj_t;
+
+// constructor DigitalOut(pin)
+STATIC mp_obj_t mbed_DigitalOut_make_new(const mp_obj_type_t *type,
+ mp_uint_t n_args, mp_uint_t n_kw,
+ const mp_obj_t *args)
+{
+ (void)type;
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+ int pin = mp_obj_get_int(args[0]);
+
+ mbed_DigitalOut_obj_t *o = m_new_obj_with_finaliser(mbed_DigitalOut_obj_t);
+ o->base.type = (mp_obj_t)&mbed_DigitalOut_type;
+ o->dout = mbed_DigitalOut__create(pin);
+
+ return o;
+}
+
+STATIC mp_obj_t mbed_DigitalOut_write(mp_obj_t self_in, mp_obj_t value_in)
+{
+ mbed_DigitalOut_obj_t *self = self_in;
+ int value = mp_obj_get_int(value_in);
+
+ mbed_DigitalOut__write(self->dout, value);
+ return mp_const_none;
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(mbed_DigitalOut_write_obj,
+ mbed_DigitalOut_write);
+
+STATIC const mp_map_elem_t mbed_DigitalOut_locals_dict_table[] = {
+ { MP_OBJ_NEW_QSTR(MP_QSTR_write), (mp_obj_t)&mbed_DigitalOut_write_obj },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mbed_DigitalOut_locals_dict,
+ mbed_DigitalOut_locals_dict_table);
+
+STATIC const mp_obj_type_t mbed_DigitalOut_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_DigitalOut,
+ // .print
+ .make_new = mbed_DigitalOut_make_new,
+ .locals_dict = (mp_obj_t)&mbed_DigitalOut_locals_dict,
+};
+
+#endif // MICROPY_MBED_DIGITALOUT
+
+// Module
+STATIC const mp_rom_map_elem_t mp_mbed_module_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_mbed) },
+#if MICROPY_MBED_DIGITALOUT
+ { MP_ROM_QSTR(MP_QSTR_DigitalOut), MP_ROM_PTR(&mbed_DigitalOut_type) },
+#endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_mbed_module_globals,
+ mp_mbed_module_globals_table);
+
+const mp_obj_module_t mp_module_mbed = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_mbed,
+ .globals = (mp_obj_dict_t *)&mp_mbed_module_globals,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/modmbed_i.cpp Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,53 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Colin Hogben
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * Shims to isolate the pure C world of micropython from the C++ world
+ * of mbed high-level objects. Maybe there's a way to write the module
+ * directly in a compatible way but my C++-fu is not that strong.
+ */
+extern "C" {
+ #include "py/mpconfig.h"
+}
+
+#if MICROPY_PY_MBED
+
+#include "mbed.h"
+extern "C" {
+ #include "modmbed_i.h"
+}
+
+void *mbed_DigitalOut__create(int pin)
+{
+ DigitalOut *dout = new DigitalOut((PinName)pin);
+ return (void *)dout;
+}
+
+void mbed_DigitalOut__write(void *self, int value)
+{
+ DigitalOut *dout = (DigitalOut *)self;
+ *dout = value;
+}
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/modmbed_i.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,26 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2016 Colin Hogben + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +extern void *mbed_DigitalOut__create(int pin); +extern void mbed_DigitalOut__write(void *self, int value);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mpconfigport.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,130 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2016 Colin Hogben
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdint.h>
+
+// options to control how Micro Python is built
+
+// The mbed online compiler uses different assembler syntax, so avoid
+#define MICROPY_NLR_SETJMP 1
+
+#define MICROPY_ALLOC_PATH_MAX (256)
+#define MICROPY_ALLOC_PARSE_CHUNK_INIT (16)
+#define MICROPY_EMIT_X64 (0)
+#define MICROPY_EMIT_THUMB (0)
+#define MICROPY_EMIT_INLINE_THUMB (0)
+#define MICROPY_COMP_MODULE_CONST (0)
+#define MICROPY_COMP_CONST (0)
+#define MICROPY_COMP_DOUBLE_TUPLE_ASSIGN (0)
+#define MICROPY_COMP_TRIPLE_TUPLE_ASSIGN (0)
+#define MICROPY_MEM_STATS (0)
+#define MICROPY_DEBUG_PRINTERS (0)
+#define MICROPY_ENABLE_GC (0)
+#define MICROPY_REPL_EVENT_DRIVEN (0)
+#define MICROPY_HELPER_REPL (1)
+#define MICROPY_HELPER_LEXER_UNIX (0)
+#define MICROPY_ENABLE_SOURCE_LINE (0)
+#define MICROPY_ENABLE_DOC_STRING (0)
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_TERSE)
+#define MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG (0)
+#define MICROPY_PY_BUILTINS_BYTEARRAY (0)
+#define MICROPY_PY_BUILTINS_MEMORYVIEW (0)
+#define MICROPY_PY_BUILTINS_ENUMERATE (0)
+#define MICROPY_PY_BUILTINS_FILTER (0)
+#define MICROPY_PY_BUILTINS_FROZENSET (0)
+#define MICROPY_PY_BUILTINS_REVERSED (0)
+#define MICROPY_PY_BUILTINS_SET (0)
+#define MICROPY_PY_BUILTINS_SLICE (0)
+#define MICROPY_PY_BUILTINS_PROPERTY (0)
+#define MICROPY_PY_BUILTINS_MIN_MAX (0)
+#define MICROPY_PY___FILE__ (0)
+#define MICROPY_PY_GC (0)
+#define MICROPY_PY_ARRAY (0)
+#define MICROPY_PY_ATTRTUPLE (0)
+#define MICROPY_PY_COLLECTIONS (0)
+#define MICROPY_PY_MATH (0)
+#define MICROPY_PY_CMATH (0)
+#define MICROPY_PY_IO (0)
+#define MICROPY_PY_STRUCT (1)
+#define MICROPY_PY_SYS (1)
+// Build "machine" module (port-specific)
+#define MICROPY_PY_MACHINE 1
+// Build "mbed" module
+#define MICROPY_PY_MBED 1
+// Select which mbed features wanted
+#define MICROPY_MBED_DIGITALOUT 1
+
+#define MICROPY_MODULE_FROZEN (0)
+#define MICROPY_CPYTHON_COMPAT (0)
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_NONE)
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_NONE)
+
+// type definitions for the specific machine
+
+#define BYTES_PER_WORD (4)
+#define MP_ENDIANNESS_LITTLE 1
+
+#define MICROPY_MAKE_POINTER_CALLABLE(p) ((void*)((mp_uint_t)(p) | 1))
+
+// This port is intended to be 32-bit, but unfortunately, int32_t for
+// different targets may be defined in different ways - either as int
+// or as long. This requires different printf formatting specifiers
+// to print such value. So, we avoid int32_t and use int directly.
+#define UINT_FMT "%u"
+#define INT_FMT "%d"
+typedef int mp_int_t; // must be pointer size
+typedef unsigned mp_uint_t; // must be pointer size
+
+typedef void *machine_ptr_t; // must be of pointer size
+typedef const void *machine_const_ptr_t; // must be of pointer size
+typedef long mp_off_t;
+
+#define MP_PLAT_PRINT_STRN(str, len) mp_hal_stdout_tx_strn_cooked(str, len)
+
+#if 0
+// extra built in names to add to the global namespace
+extern const struct _mp_obj_fun_builtin_t mp_builtin_open_obj;
+#define MICROPY_PORT_BUILTINS \
+ { MP_OBJ_NEW_QSTR(MP_QSTR_open), (mp_obj_t)&mp_builtin_open_obj },
+#endif
+
+// We need to provide a declaration/definition of alloca()
+#include <alloca.h>
+
+#define MICROPY_HW_BOARD_NAME "mbed"
+#define MICROPY_HW_MCU_NAME "arm"
+
+#define MP_STATE_PORT MP_STATE_VM
+
+// Extra modules to build in
+extern const struct _mp_obj_module_t mp_module_mbed;
+#define MICROPY_PORT_BUILTIN_MODULES \
+ { MP_ROM_QSTR(MP_QSTR_machine), MP_ROM_PTR(&mp_module_machine) }, \
+ { MP_ROM_QSTR(MP_QSTR_mbed), MP_ROM_PTR(&mp_module_mbed) }, \
+
+// Use by readline.c
+#define MICROPY_PORT_ROOT_POINTERS \
+ const char *readline_hist[8];
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mphalport.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,92 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Colin Hogben
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * Hardware Abstraction Layer.
+ *
+ * Implementations or stubs for functions provided by the specific port.
+ * Note that two functions are delegated to the user of the library:
+ * - mp_hal_stdout_tx_chr
+ * - mp_hal_stdin_rx_chr
+ */
+#include <string.h>
+#include "py/mpstate.h"
+#include "py/lexer.h"
+
+// Since we have no filesystem, no file exists
+mp_import_stat_t mp_import_stat(const char *path)
+{
+ (void)path;
+ return MP_IMPORT_STAT_NO_EXIST;
+}
+
+mp_lexer_t *mp_lexer_new_from_file(const char *filename)
+{
+ (void)filename;
+ return NULL;
+}
+
+mp_obj_t mp_builtin_open(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_open_obj, 1, mp_builtin_open);
+
+void mp_hal_stdout_tx_chr(char c);
+
+// Text output
+void mp_hal_stdout_tx_strn(const char *str, size_t len) {
+ while (len--) {
+ mp_hal_stdout_tx_chr(*str++);
+ }
+}
+
+void mp_hal_stdout_tx_strn_cooked(const char *str, size_t len) {
+ while (len--) {
+ char c = *str++;
+ if (c == '\n') {
+ mp_hal_stdout_tx_chr('\r');
+ }
+ mp_hal_stdout_tx_chr(c);
+ }
+}
+
+void mp_hal_stdout_tx_str(const char *str) {
+ mp_hal_stdout_tx_strn(str, strlen(str));
+}
+
+void mp_hal_set_interrupt_char(int c)
+{
+}
+
+mp_uint_t mp_hal_ticks_ms(void) {
+ return 0;
+}
+
+void mp_hal_delay_ms(mp_uint_t ms) {
+}
+
+void gc_collect(void) {}
+#if ! MICROPY_ENABLE_GC
+void gc_dump_info(void) {}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mphalport.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,1 @@ +// Placeholder for the moment \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/argcheck.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,151 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/runtime.h"
+
+void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw) {
+ // TODO maybe take the function name as an argument so we can print nicer error messages
+
+ if (n_kw && !takes_kw) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "function does not take keyword arguments"));
+ }
+ }
+
+ if (n_args_min == n_args_max) {
+ if (n_args != n_args_min) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function takes %d positional arguments but %d were given",
+ n_args_min, n_args));
+ }
+ }
+ } else {
+ if (n_args < n_args_min) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function missing %d required positional arguments",
+ n_args_min - n_args));
+ }
+ } else if (n_args > n_args_max) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function expected at most %d arguments, got %d",
+ n_args_max, n_args));
+ }
+ }
+ }
+}
+
+void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals) {
+ size_t pos_found = 0, kws_found = 0;
+ for (size_t i = 0; i < n_allowed; i++) {
+ mp_obj_t given_arg;
+ if (i < n_pos) {
+ if (allowed[i].flags & MP_ARG_KW_ONLY) {
+ goto extra_positional;
+ }
+ pos_found++;
+ given_arg = pos[i];
+ } else {
+ mp_map_elem_t *kw = mp_map_lookup(kws, MP_OBJ_NEW_QSTR(allowed[i].qst), MP_MAP_LOOKUP);
+ if (kw == NULL) {
+ if (allowed[i].flags & MP_ARG_REQUIRED) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%q' argument required", allowed[i].qst));
+ }
+ }
+ out_vals[i] = allowed[i].defval;
+ continue;
+ } else {
+ kws_found++;
+ given_arg = kw->value;
+ }
+ }
+ if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_BOOL) {
+ out_vals[i].u_bool = mp_obj_is_true(given_arg);
+ } else if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_INT) {
+ out_vals[i].u_int = mp_obj_get_int(given_arg);
+ } else if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_OBJ) {
+ out_vals[i].u_obj = given_arg;
+ } else {
+ assert(0);
+ }
+ }
+ if (pos_found < n_pos) {
+ extra_positional:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ // TODO better error message
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "extra positional arguments given"));
+ }
+ }
+ if (kws_found < kws->used) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ // TODO better error message
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "extra keyword arguments given"));
+ }
+ }
+}
+
+void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals) {
+ mp_map_t kw_args;
+ mp_map_init_fixed_table(&kw_args, n_kw, args + n_pos);
+ mp_arg_parse_all(n_pos, args, &kw_args, n_allowed, allowed, out_vals);
+}
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE || _MSC_VER
+NORETURN void mp_arg_error_terse_mismatch(void) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "argument num/types mismatch"));
+}
+#endif
+
+#if MICROPY_CPYTHON_COMPAT
+NORETURN void mp_arg_error_unimpl_kw(void) {
+ mp_not_implemented("keyword argument(s) not yet implemented - use normal args instead");
+}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/asmarm.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,464 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Fabian Vogt
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_ARM
+
+#include "py/asmarm.h"
+
+#define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
+
+struct _asm_arm_t {
+ uint pass;
+ mp_uint_t code_offset;
+ mp_uint_t code_size;
+ byte *code_base;
+ byte dummy_data[4];
+
+ mp_uint_t max_num_labels;
+ mp_uint_t *label_offsets;
+ uint push_reglist;
+ uint stack_adjust;
+};
+
+asm_arm_t *asm_arm_new(uint max_num_labels) {
+ asm_arm_t *as;
+
+ as = m_new0(asm_arm_t, 1);
+ as->max_num_labels = max_num_labels;
+ as->label_offsets = m_new(mp_uint_t, max_num_labels);
+
+ return as;
+}
+
+void asm_arm_free(asm_arm_t *as, bool free_code) {
+ if (free_code) {
+ MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+ }
+ m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
+ m_del_obj(asm_arm_t, as);
+}
+
+void asm_arm_start_pass(asm_arm_t *as, uint pass) {
+ if (pass == ASM_ARM_PASS_COMPUTE) {
+ memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
+ } else if (pass == ASM_ARM_PASS_EMIT) {
+ MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size);
+ if (as->code_base == NULL) {
+ assert(0);
+ }
+ }
+ as->pass = pass;
+ as->code_offset = 0;
+}
+
+void asm_arm_end_pass(asm_arm_t *as) {
+ if (as->pass == ASM_ARM_PASS_EMIT) {
+#ifdef __arm__
+ // flush I- and D-cache
+ asm volatile(
+ "0:"
+ "mrc p15, 0, r15, c7, c10, 3\n"
+ "bne 0b\n"
+ "mov r0, #0\n"
+ "mcr p15, 0, r0, c7, c7, 0\n"
+ : : : "r0", "cc");
+#endif
+ }
+}
+
+// all functions must go through this one to emit bytes
+// if as->pass < ASM_ARM_PASS_EMIT, then this function only returns a buffer of 4 bytes length
+STATIC byte *asm_arm_get_cur_to_write_bytes(asm_arm_t *as, int num_bytes_to_write) {
+ if (as->pass < ASM_ARM_PASS_EMIT) {
+ as->code_offset += num_bytes_to_write;
+ return as->dummy_data;
+ } else {
+ assert(as->code_offset + num_bytes_to_write <= as->code_size);
+ byte *c = as->code_base + as->code_offset;
+ as->code_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+uint asm_arm_get_code_pos(asm_arm_t *as) {
+ return as->code_offset;
+}
+
+uint asm_arm_get_code_size(asm_arm_t *as) {
+ return as->code_size;
+}
+
+void *asm_arm_get_code(asm_arm_t *as) {
+ return as->code_base;
+}
+
+// Insert word into instruction flow
+STATIC void emit(asm_arm_t *as, uint op) {
+ *(uint*)asm_arm_get_cur_to_write_bytes(as, 4) = op;
+}
+
+// Insert word into instruction flow, add "ALWAYS" condition code
+STATIC void emit_al(asm_arm_t *as, uint op) {
+ emit(as, op | ASM_ARM_CC_AL);
+}
+
+// Basic instructions without condition code
+STATIC uint asm_arm_op_push(uint reglist) {
+ // stmfd sp!, {reglist}
+ return 0x92d0000 | (reglist & 0xFFFF);
+}
+
+STATIC uint asm_arm_op_pop(uint reglist) {
+ // ldmfd sp!, {reglist}
+ return 0x8bd0000 | (reglist & 0xFFFF);
+}
+
+STATIC uint asm_arm_op_mov_reg(uint rd, uint rn) {
+ // mov rd, rn
+ return 0x1a00000 | (rd << 12) | rn;
+}
+
+STATIC uint asm_arm_op_mov_imm(uint rd, uint imm) {
+ // mov rd, #imm
+ return 0x3a00000 | (rd << 12) | imm;
+}
+
+STATIC uint asm_arm_op_mvn_imm(uint rd, uint imm) {
+ // mvn rd, #imm
+ return 0x3e00000 | (rd << 12) | imm;
+}
+
+STATIC uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
+ // add rd, rn, #imm
+ return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+STATIC uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
+ // add rd, rn, rm
+ return 0x0800000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
+ // sub rd, rn, #imm
+ return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
+ // sub rd, rn, rm
+ return 0x0400000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
+ // mul rd, rm, rs
+ assert(rd != rm);
+ return 0x0000090 | (rd << 16) | (rs << 8) | rm;
+}
+
+STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
+ // and rd, rn, rm
+ return 0x0000000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
+ // eor rd, rn, rm
+ return 0x0200000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
+ // orr rd, rn, rm
+ return 0x1800000 | (rn << 16) | (rd << 12) | rm;
+}
+
+void asm_arm_bkpt(asm_arm_t *as) {
+ // bkpt #0
+ emit_al(as, 0x1200070);
+}
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through num_locals-1
+// - SP points to first local
+//
+// | SP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+
+void asm_arm_entry(asm_arm_t *as, int num_locals) {
+
+ if (num_locals < 0) {
+ num_locals = 0;
+ }
+
+ as->stack_adjust = 0;
+ as->push_reglist = 1 << ASM_ARM_REG_R1
+ | 1 << ASM_ARM_REG_R2
+ | 1 << ASM_ARM_REG_R3
+ | 1 << ASM_ARM_REG_R4
+ | 1 << ASM_ARM_REG_R5
+ | 1 << ASM_ARM_REG_R6
+ | 1 << ASM_ARM_REG_R7
+ | 1 << ASM_ARM_REG_R8;
+
+ // Only adjust the stack if there are more locals than usable registers
+ if (num_locals > 3) {
+ as->stack_adjust = num_locals * 4;
+ // Align stack to 8 bytes
+ if (num_locals & 1) {
+ as->stack_adjust += 4;
+ }
+ }
+
+ emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
+ if (as->stack_adjust > 0) {
+ emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
+ }
+}
+
+void asm_arm_exit(asm_arm_t *as) {
+ if (as->stack_adjust > 0) {
+ emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
+ }
+
+ emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
+}
+
+void asm_arm_push(asm_arm_t *as, uint reglist) {
+ emit_al(as, asm_arm_op_push(reglist));
+}
+
+void asm_arm_pop(asm_arm_t *as, uint reglist) {
+ emit_al(as, asm_arm_op_pop(reglist));
+}
+
+void asm_arm_label_assign(asm_arm_t *as, uint label) {
+ assert(label < as->max_num_labels);
+ if (as->pass < ASM_ARM_PASS_EMIT) {
+ // assign label offset
+ assert(as->label_offsets[label] == -1);
+ as->label_offsets[label] = as->code_offset;
+ } else {
+ // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+ assert(as->label_offsets[label] == as->code_offset);
+ }
+}
+
+void asm_arm_align(asm_arm_t* as, uint align) {
+ // TODO fill unused data with NOPs?
+ as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
+}
+
+void asm_arm_data(asm_arm_t* as, uint bytesize, uint val) {
+ byte *c = asm_arm_get_cur_to_write_bytes(as, bytesize);
+ // only write to the buffer in the emit pass (otherwise we overflow dummy_data)
+ if (as->pass == ASM_ARM_PASS_EMIT) {
+ // little endian
+ for (uint i = 0; i < bytesize; i++) {
+ *c++ = val;
+ val >>= 8;
+ }
+ }
+}
+
+void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
+ emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
+}
+
+void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
+ // TODO: There are more variants of immediate values
+ if ((imm & 0xFF) == imm) {
+ emit_al(as, asm_arm_op_mov_imm(rd, imm));
+ } else if (imm < 0 && imm >= -256) {
+ // mvn is "move not", not "move negative"
+ emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
+ } else {
+ //Insert immediate into code and jump over it
+ emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
+ emit_al(as, 0xa000000); // b pc
+ emit(as, imm);
+ }
+}
+
+void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
+ // str rd, [sp, #local_num*4]
+ emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
+}
+
+void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
+ // ldr rd, [sp, #local_num*4]
+ emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
+}
+
+void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
+ // cmp rd, #imm
+ emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
+}
+
+void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+ // cmp rd, rn
+ emit_al(as, 0x1500000 | (rd << 16) | rn);
+}
+
+void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
+ emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
+ emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
+}
+
+void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // add rd, rn, rm
+ emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
+}
+
+void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // sub rd, rn, rm
+ emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
+}
+
+void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
+ // rs and rm are swapped because of restriction rd!=rm
+ // mul rd, rm, rs
+ emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
+}
+
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // and rd, rn, rm
+ emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
+}
+
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // eor rd, rn, rm
+ emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
+}
+
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // orr rd, rn, rm
+ emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
+}
+
+void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
+ // add rd, sp, #local_num*4
+ emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
+}
+
+void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+ // mov rd, rd, lsl rs
+ emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+ // mov rd, rd, asr rs
+ emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
+ // ldr rd, [rn, #off]
+ emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
+}
+
+void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+ // ldrh rd, [rn]
+ emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
+}
+
+void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+ // ldrb rd, [rn]
+ emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
+}
+
+void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
+ // str rd, [rm, #off]
+ emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
+}
+
+void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+ // strh rd, [rm]
+ emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
+}
+
+void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+ // strb rd, [rm]
+ emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
+}
+
+void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // str rd, [rm, rn, lsl #2]
+ emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // strh doesn't support scaled register index
+ emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
+ emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
+}
+
+void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // strb rd, [rm, rn]
+ emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
+ assert(label < as->max_num_labels);
+ mp_uint_t dest = as->label_offsets[label];
+ mp_int_t rel = dest - as->code_offset;
+ rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
+ rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
+
+ if (SIGNED_FIT24(rel)) {
+ emit(as, cond | 0xa000000 | (rel & 0xffffff));
+ } else {
+ printf("asm_arm_bcc: branch does not fit in 24 bits\n");
+ }
+}
+
+void asm_arm_b_label(asm_arm_t *as, uint label) {
+ asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
+}
+
+void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
+ // If the table offset fits into the ldr instruction
+ if (fun_id < (0x1000 / 4)) {
+ emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
+ emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
+ return;
+ }
+
+ emit_al(as, 0x59f0004 | (reg_temp << 12)); // ldr rd, [pc, #4]
+ // Set lr after fun_ptr
+ emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_LR, ASM_ARM_REG_PC, 4)); // add lr, pc, #4
+ emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_PC, reg_temp)); // mov pc, reg_temp
+ emit(as, (uint) fun_ptr);
+}
+
+#endif // MICROPY_EMIT_ARM
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/asmarm.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,133 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2014 Fabian Vogt + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_ASMARM_H__ +#define __MICROPY_INCLUDED_PY_ASMARM_H__ + +#include "py/misc.h" + +#define ASM_ARM_PASS_COMPUTE (1) +#define ASM_ARM_PASS_EMIT (2) + +#define ASM_ARM_REG_R0 (0) +#define ASM_ARM_REG_R1 (1) +#define ASM_ARM_REG_R2 (2) +#define ASM_ARM_REG_R3 (3) +#define ASM_ARM_REG_R4 (4) +#define ASM_ARM_REG_R5 (5) +#define ASM_ARM_REG_R6 (6) +#define ASM_ARM_REG_R7 (7) +#define ASM_ARM_REG_R8 (8) +#define ASM_ARM_REG_R9 (9) +#define ASM_ARM_REG_R10 (10) +#define ASM_ARM_REG_R11 (11) +#define ASM_ARM_REG_R12 (12) +#define ASM_ARM_REG_R13 (13) +#define ASM_ARM_REG_R14 (14) +#define ASM_ARM_REG_R15 (15) +#define ASM_ARM_REG_SP (ASM_ARM_REG_R13) +#define ASM_ARM_REG_LR (ASM_ARM_REG_R14) +#define ASM_ARM_REG_PC (ASM_ARM_REG_R15) + +#define ASM_ARM_CC_EQ (0x0 << 28) +#define ASM_ARM_CC_NE (0x1 << 28) +#define ASM_ARM_CC_CS (0x2 << 28) +#define ASM_ARM_CC_CC (0x3 << 28) +#define ASM_ARM_CC_MI (0x4 << 28) +#define ASM_ARM_CC_PL (0x5 << 28) +#define ASM_ARM_CC_VS (0x6 << 28) +#define ASM_ARM_CC_VC (0x7 << 28) +#define ASM_ARM_CC_HI (0x8 << 28) +#define ASM_ARM_CC_LS (0x9 << 28) +#define ASM_ARM_CC_GE (0xa << 28) +#define ASM_ARM_CC_LT (0xb << 28) +#define ASM_ARM_CC_GT (0xc << 28) +#define ASM_ARM_CC_LE (0xd << 28) +#define ASM_ARM_CC_AL (0xe << 28) + +typedef struct _asm_arm_t asm_arm_t; + +asm_arm_t *asm_arm_new(uint max_num_labels); +void asm_arm_free(asm_arm_t *as, bool free_code); +void asm_arm_start_pass(asm_arm_t *as, uint pass); +void asm_arm_end_pass(asm_arm_t *as); +uint asm_arm_get_code_pos(asm_arm_t *as); +uint asm_arm_get_code_size(asm_arm_t *as); +void *asm_arm_get_code(asm_arm_t *as); + +void asm_arm_entry(asm_arm_t *as, int num_locals); +void asm_arm_exit(asm_arm_t *as); +void asm_arm_label_assign(asm_arm_t *as, uint label); + +void asm_arm_align(asm_arm_t* as, uint align); +void asm_arm_data(asm_arm_t* as, uint bytesize, uint val); + +void asm_arm_bkpt(asm_arm_t *as); + +// mov +void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src); +void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm); +void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd); +void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num); +void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond); + +// compare +void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm); +void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn); + +// arithmetic +void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); +void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); +void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); +void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); +void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); +void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); +void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num); +void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs); +void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs); + +// memory +void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset); +void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn); +void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn); +void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset); +void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm); +void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm); +// store to array +void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn); +void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn); +void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn); + +// stack +void asm_arm_push(asm_arm_t *as, uint reglist); +void asm_arm_pop(asm_arm_t *as, uint reglist); + +// control flow +void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label); +void asm_arm_b_label(asm_arm_t *as, uint label); +void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp); + +#endif // __MICROPY_INCLUDED_PY_ASMARM_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/asmthumb.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,464 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
+
+#include "py/asmthumb.h"
+
+#define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
+#define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+#define SIGNED_FIT9(x) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
+#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
+#define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
+
+struct _asm_thumb_t {
+ mp_uint_t pass;
+ mp_uint_t code_offset;
+ mp_uint_t code_size;
+ byte *code_base;
+ byte dummy_data[4];
+
+ mp_uint_t max_num_labels;
+ mp_uint_t *label_offsets;
+ mp_uint_t push_reglist;
+ mp_uint_t stack_adjust;
+};
+
+asm_thumb_t *asm_thumb_new(uint max_num_labels) {
+ asm_thumb_t *as;
+
+ as = m_new0(asm_thumb_t, 1);
+ as->max_num_labels = max_num_labels;
+ as->label_offsets = m_new(mp_uint_t, max_num_labels);
+
+ return as;
+}
+
+void asm_thumb_free(asm_thumb_t *as, bool free_code) {
+ if (free_code) {
+ MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+ }
+ m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
+ m_del_obj(asm_thumb_t, as);
+}
+
+void asm_thumb_start_pass(asm_thumb_t *as, uint pass) {
+ if (pass == ASM_THUMB_PASS_COMPUTE) {
+ memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
+ } else if (pass == ASM_THUMB_PASS_EMIT) {
+ MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size);
+ if (as->code_base == NULL) {
+ assert(0);
+ }
+ //printf("code_size: %u\n", as->code_size);
+ }
+ as->pass = pass;
+ as->code_offset = 0;
+}
+
+void asm_thumb_end_pass(asm_thumb_t *as) {
+ (void)as;
+ // could check labels are resolved...
+}
+
+// all functions must go through this one to emit bytes
+// if as->pass < ASM_THUMB_PASS_EMIT, then this function only returns a buffer of 4 bytes length
+STATIC byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int num_bytes_to_write) {
+ //printf("emit %d\n", num_bytes_to_write);
+ if (as->pass < ASM_THUMB_PASS_EMIT) {
+ as->code_offset += num_bytes_to_write;
+ return as->dummy_data;
+ } else {
+ assert(as->code_offset + num_bytes_to_write <= as->code_size);
+ byte *c = as->code_base + as->code_offset;
+ as->code_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+uint asm_thumb_get_code_pos(asm_thumb_t *as) {
+ return as->code_offset;
+}
+
+uint asm_thumb_get_code_size(asm_thumb_t *as) {
+ return as->code_size;
+}
+
+void *asm_thumb_get_code(asm_thumb_t *as) {
+ return as->code_base;
+}
+
+/*
+STATIC void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 1);
+ c[0] = b1;
+}
+*/
+
+/*
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+}
+*/
+
+// rlolist is a bit map indicating desired lo-registers
+#define OP_PUSH_RLIST(rlolist) (0xb400 | (rlolist))
+#define OP_PUSH_RLIST_LR(rlolist) (0xb400 | 0x0100 | (rlolist))
+#define OP_POP_RLIST(rlolist) (0xbc00 | (rlolist))
+#define OP_POP_RLIST_PC(rlolist) (0xbc00 | 0x0100 | (rlolist))
+
+#define OP_ADD_SP(num_words) (0xb000 | (num_words))
+#define OP_SUB_SP(num_words) (0xb080 | (num_words))
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through num_locals-1
+// - SP points to first local
+//
+// | SP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+
+void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
+ // work out what to push and how many extra spaces to reserve on stack
+ // so that we have enough for all locals and it's aligned an 8-byte boundary
+ // we push extra regs (r1, r2, r3) to help do the stack adjustment
+ // we probably should just always subtract from sp, since this would be more efficient
+ // for push rlist, lowest numbered register at the lowest address
+ uint reglist;
+ uint stack_adjust;
+ if (num_locals < 0) {
+ num_locals = 0;
+ }
+ // don't pop r0 because it's used for return value
+ switch (num_locals) {
+ case 0:
+ reglist = 0xf2;
+ stack_adjust = 0;
+ break;
+
+ case 1:
+ reglist = 0xf2;
+ stack_adjust = 0;
+ break;
+
+ case 2:
+ reglist = 0xfe;
+ stack_adjust = 0;
+ break;
+
+ case 3:
+ reglist = 0xfe;
+ stack_adjust = 0;
+ break;
+
+ default:
+ reglist = 0xfe;
+ stack_adjust = ((num_locals - 3) + 1) & (~1);
+ break;
+ }
+ asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
+ if (stack_adjust > 0) {
+ asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
+ }
+ as->push_reglist = reglist;
+ as->stack_adjust = stack_adjust;
+}
+
+void asm_thumb_exit(asm_thumb_t *as) {
+ if (as->stack_adjust > 0) {
+ asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
+ }
+ asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
+}
+
+void asm_thumb_label_assign(asm_thumb_t *as, uint label) {
+ assert(label < as->max_num_labels);
+ if (as->pass < ASM_THUMB_PASS_EMIT) {
+ // assign label offset
+ assert(as->label_offsets[label] == -1);
+ as->label_offsets[label] = as->code_offset;
+ } else {
+ // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+ //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset);
+ assert(as->label_offsets[label] == as->code_offset);
+ }
+}
+
+void asm_thumb_align(asm_thumb_t* as, uint align) {
+ // TODO fill unused data with NOPs?
+ as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
+}
+
+void asm_thumb_data(asm_thumb_t* as, uint bytesize, uint val) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, bytesize);
+ // only write to the buffer in the emit pass (otherwise we overflow dummy_data)
+ if (as->pass == ASM_THUMB_PASS_EMIT) {
+ // little endian
+ for (uint i = 0; i < bytesize; i++) {
+ *c++ = val;
+ val >>= 8;
+ }
+ }
+}
+
+STATIC mp_uint_t get_label_dest(asm_thumb_t *as, uint label) {
+ assert(label < as->max_num_labels);
+ return as->label_offsets[label];
+}
+
+void asm_thumb_op16(asm_thumb_t *as, uint op) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 2);
+ // little endian
+ c[0] = op;
+ c[1] = op >> 8;
+}
+
+void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
+ // little endian, op1 then op2
+ c[0] = op1;
+ c[1] = op1 >> 8;
+ c[2] = op2;
+ c[3] = op2 >> 8;
+}
+
+#define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest))
+
+void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, OP_FORMAT_4(op, rlo_dest, rlo_src));
+}
+
+void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
+ uint op_lo;
+ if (reg_src < 8) {
+ op_lo = reg_src << 3;
+ } else {
+ op_lo = 0x40 | ((reg_src - 8) << 3);
+ }
+ if (reg_dest < 8) {
+ op_lo |= reg_dest;
+ } else {
+ op_lo |= 0x80 | (reg_dest - 8);
+ }
+ // mov reg_dest, reg_src
+ asm_thumb_op16(as, 0x4600 | op_lo);
+}
+
+// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
+void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
+ assert(reg_dest < ASM_THUMB_REG_R15);
+ // mov[wt] reg_dest, #i16_src
+ asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
+}
+
+#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
+
+bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ asm_thumb_op16(as, OP_B_N(rel));
+ return as->pass != ASM_THUMB_PASS_EMIT || SIGNED_FIT12(rel);
+}
+
+#define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff))
+
+// all these bit arithmetics need coverage testing!
+#define OP_BCC_W_HI(cond, byte_offset) (0xf000 | ((cond) << 6) | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
+#define OP_BCC_W_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
+
+bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ if (!wide) {
+ asm_thumb_op16(as, OP_BCC_N(cond, rel));
+ return as->pass != ASM_THUMB_PASS_EMIT || SIGNED_FIT9(rel);
+ } else {
+ asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
+ return true;
+ }
+}
+
+#define OP_BL_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
+#define OP_BL_LO(byte_offset) (0xf800 | (((byte_offset) >> 1) & 0x07ff))
+
+bool asm_thumb_bl_label(asm_thumb_t *as, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ asm_thumb_op32(as, OP_BL_HI(rel), OP_BL_LO(rel));
+ return as->pass != ASM_THUMB_PASS_EMIT || SIGNED_FIT23(rel);
+}
+
+void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
+ // movw, movt does it in 8 bytes
+ // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
+
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
+}
+
+void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
+ if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
+ asm_thumb_mov_rlo_i8(as, reg_dest, i32);
+ } else if (UNSIGNED_FIT16(i32)) {
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
+ } else {
+ asm_thumb_mov_reg_i32(as, reg_dest, i32);
+ }
+}
+
+// i32 is stored as a full word in the code, and aligned to machine-word boundary
+// TODO this is very inefficient, improve it!
+void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) {
+ // align on machine-word + 2
+ if ((as->code_offset & 3) == 0) {
+ asm_thumb_op16(as, ASM_THUMB_OP_NOP);
+ }
+ // jump over the i32 value (instruction prefetch adds 2 to PC)
+ asm_thumb_op16(as, OP_B_N(2));
+ // store i32 on machine-word aligned boundary
+ asm_thumb_data(as, 4, i32);
+ // do the actual load of the i32 value
+ asm_thumb_mov_reg_i32_optimised(as, reg_dest, i32);
+}
+
+#define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+#define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+
+void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ int word_offset = local_num;
+ assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0);
+ asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
+}
+
+void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ int word_offset = local_num;
+ assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0);
+ asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
+}
+
+#define OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset) (0xa800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+
+void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ int word_offset = local_num;
+ assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0);
+ asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset));
+}
+
+// this could be wrong, because it should have a range of +/- 16MiB...
+#define OP_BW_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
+#define OP_BW_LO(byte_offset) (0xb800 | (((byte_offset) >> 1) & 0x07ff))
+
+void asm_thumb_b_label(asm_thumb_t *as, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ if (dest != (mp_uint_t)-1 && rel <= -4) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 12 bit relative jump
+ if (SIGNED_FIT12(rel)) {
+ asm_thumb_op16(as, OP_B_N(rel));
+ } else {
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
+ }
+}
+
+void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ if (dest != (mp_uint_t)-1 && rel <= -4) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 9 bit relative jump
+ if (SIGNED_FIT9(rel)) {
+ asm_thumb_op16(as, OP_BCC_N(cond, rel));
+ } else {
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
+ }
+}
+
+#define OP_BLX(reg) (0x4780 | ((reg) << 3))
+#define OP_SVC(arg) (0xdf00 | (arg))
+
+void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
+ /* TODO make this use less bytes
+ uint rlo_base = ASM_THUMB_REG_R3;
+ uint rlo_dest = ASM_THUMB_REG_R7;
+ uint word_offset = 4;
+ asm_thumb_op16(as, 0x0000);
+ asm_thumb_op16(as, 0x6800 | (word_offset << 6) | (rlo_base << 3) | rlo_dest); // ldr rlo_dest, [rlo_base, #offset]
+ asm_thumb_op16(as, 0x4780 | (ASM_THUMB_REG_R9 << 3)); // blx reg
+ */
+
+ if (fun_id < 32) {
+ // load ptr to function from table, indexed by fun_id (must be in range 0-31); 4 bytes
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, ASM_THUMB_REG_R7, fun_id));
+ asm_thumb_op16(as, OP_BLX(reg_temp));
+ } else {
+ // load ptr to function into register using immediate; 6 bytes
+ asm_thumb_mov_reg_i32(as, reg_temp, (mp_uint_t)fun_ptr);
+ asm_thumb_op16(as, OP_BLX(reg_temp));
+ }
+}
+
+#endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/asmthumb.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,249 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_ASMTHUMB_H__
+#define __MICROPY_INCLUDED_PY_ASMTHUMB_H__
+
+#include "py/misc.h"
+
+#define ASM_THUMB_PASS_COMPUTE (1)
+#define ASM_THUMB_PASS_EMIT (2)
+
+#define ASM_THUMB_REG_R0 (0)
+#define ASM_THUMB_REG_R1 (1)
+#define ASM_THUMB_REG_R2 (2)
+#define ASM_THUMB_REG_R3 (3)
+#define ASM_THUMB_REG_R4 (4)
+#define ASM_THUMB_REG_R5 (5)
+#define ASM_THUMB_REG_R6 (6)
+#define ASM_THUMB_REG_R7 (7)
+#define ASM_THUMB_REG_R8 (8)
+#define ASM_THUMB_REG_R9 (9)
+#define ASM_THUMB_REG_R10 (10)
+#define ASM_THUMB_REG_R11 (11)
+#define ASM_THUMB_REG_R12 (12)
+#define ASM_THUMB_REG_R13 (13)
+#define ASM_THUMB_REG_R14 (14)
+#define ASM_THUMB_REG_R15 (15)
+#define ASM_THUMB_REG_LR (REG_R14)
+
+#define ASM_THUMB_CC_EQ (0x0)
+#define ASM_THUMB_CC_NE (0x1)
+#define ASM_THUMB_CC_CS (0x2)
+#define ASM_THUMB_CC_CC (0x3)
+#define ASM_THUMB_CC_MI (0x4)
+#define ASM_THUMB_CC_PL (0x5)
+#define ASM_THUMB_CC_VS (0x6)
+#define ASM_THUMB_CC_VC (0x7)
+#define ASM_THUMB_CC_HI (0x8)
+#define ASM_THUMB_CC_LS (0x9)
+#define ASM_THUMB_CC_GE (0xa)
+#define ASM_THUMB_CC_LT (0xb)
+#define ASM_THUMB_CC_GT (0xc)
+#define ASM_THUMB_CC_LE (0xd)
+
+typedef struct _asm_thumb_t asm_thumb_t;
+
+asm_thumb_t *asm_thumb_new(uint max_num_labels);
+void asm_thumb_free(asm_thumb_t *as, bool free_code);
+void asm_thumb_start_pass(asm_thumb_t *as, uint pass);
+void asm_thumb_end_pass(asm_thumb_t *as);
+uint asm_thumb_get_code_pos(asm_thumb_t *as);
+uint asm_thumb_get_code_size(asm_thumb_t *as);
+void *asm_thumb_get_code(asm_thumb_t *as);
+
+void asm_thumb_entry(asm_thumb_t *as, int num_locals);
+void asm_thumb_exit(asm_thumb_t *as);
+
+void asm_thumb_label_assign(asm_thumb_t *as, uint label);
+
+void asm_thumb_align(asm_thumb_t* as, uint align);
+void asm_thumb_data(asm_thumb_t* as, uint bytesize, uint val);
+
+// argument order follows ARM, in general dest is first
+// note there is a difference between movw and mov.w, and many others!
+
+#define ASM_THUMB_OP_IT (0xbf00)
+#define ASM_THUMB_OP_ITE_EQ (0xbf0c)
+#define ASM_THUMB_OP_ITE_CS (0xbf2c)
+#define ASM_THUMB_OP_ITE_MI (0xbf4c)
+#define ASM_THUMB_OP_ITE_VS (0xbf6c)
+#define ASM_THUMB_OP_ITE_HI (0xbf8c)
+#define ASM_THUMB_OP_ITE_GE (0xbfac)
+#define ASM_THUMB_OP_ITE_GT (0xbfcc)
+
+#define ASM_THUMB_OP_NOP (0xbf00)
+#define ASM_THUMB_OP_WFI (0xbf30)
+#define ASM_THUMB_OP_CPSID_I (0xb672) // cpsid i, disable irq
+#define ASM_THUMB_OP_CPSIE_I (0xb662) // cpsie i, enable irq
+
+void asm_thumb_op16(asm_thumb_t *as, uint op);
+void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2);
+
+static inline void asm_thumb_it_cc(asm_thumb_t *as, uint cc, uint mask)
+ { asm_thumb_op16(as, ASM_THUMB_OP_IT | (cc << 4) | mask); }
+
+// FORMAT 1: move shifted register
+
+#define ASM_THUMB_FORMAT_1_LSL (0x0000)
+#define ASM_THUMB_FORMAT_1_LSR (0x0800)
+#define ASM_THUMB_FORMAT_1_ASR (0x1000)
+
+#define ASM_THUMB_FORMAT_1_ENCODE(op, rlo_dest, rlo_src, offset) \
+ ((op) | ((offset) << 6) | ((rlo_src) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_1(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, uint offset) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_1_ENCODE(op, rlo_dest, rlo_src, offset));
+}
+
+// FORMAT 2: add/subtract
+
+#define ASM_THUMB_FORMAT_2_ADD (0x1800)
+#define ASM_THUMB_FORMAT_2_SUB (0x1a00)
+#define ASM_THUMB_FORMAT_2_REG_OPERAND (0x0000)
+#define ASM_THUMB_FORMAT_2_IMM_OPERAND (0x0400)
+
+#define ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b) \
+ ((op) | ((src_b) << 6) | ((rlo_src) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_2(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, int src_b) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b));
+}
+
+static inline void asm_thumb_add_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b)
+ { asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b); }
+static inline void asm_thumb_add_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src)
+ { asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src); }
+static inline void asm_thumb_sub_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b)
+ { asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b); }
+static inline void asm_thumb_sub_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src)
+ { asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src); }
+
+// FORMAT 3: move/compare/add/subtract immediate
+// These instructions all do zero extension of the i8 value
+
+#define ASM_THUMB_FORMAT_3_MOV (0x2000)
+#define ASM_THUMB_FORMAT_3_CMP (0x2800)
+#define ASM_THUMB_FORMAT_3_ADD (0x3000)
+#define ASM_THUMB_FORMAT_3_SUB (0x3800)
+
+#define ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8) ((op) | ((rlo) << 8) | (i8))
+
+static inline void asm_thumb_format_3(asm_thumb_t *as, uint op, uint rlo, int i8) {
+ assert(rlo < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8));
+}
+
+static inline void asm_thumb_mov_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_MOV, rlo, i8); }
+static inline void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_CMP, rlo, i8); }
+static inline void asm_thumb_add_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_ADD, rlo, i8); }
+static inline void asm_thumb_sub_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_SUB, rlo, i8); }
+
+// FORMAT 4: ALU operations
+
+#define ASM_THUMB_FORMAT_4_AND (0x4000)
+#define ASM_THUMB_FORMAT_4_EOR (0x4040)
+#define ASM_THUMB_FORMAT_4_LSL (0x4080)
+#define ASM_THUMB_FORMAT_4_LSR (0x40c0)
+#define ASM_THUMB_FORMAT_4_ASR (0x4100)
+#define ASM_THUMB_FORMAT_4_ADC (0x4140)
+#define ASM_THUMB_FORMAT_4_SBC (0x4180)
+#define ASM_THUMB_FORMAT_4_ROR (0x41c0)
+#define ASM_THUMB_FORMAT_4_TST (0x4200)
+#define ASM_THUMB_FORMAT_4_NEG (0x4240)
+#define ASM_THUMB_FORMAT_4_CMP (0x4280)
+#define ASM_THUMB_FORMAT_4_CMN (0x42c0)
+#define ASM_THUMB_FORMAT_4_ORR (0x4300)
+#define ASM_THUMB_FORMAT_4_MUL (0x4340)
+#define ASM_THUMB_FORMAT_4_BIC (0x4380)
+#define ASM_THUMB_FORMAT_4_MVN (0x43c0)
+
+void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src);
+
+static inline void asm_thumb_cmp_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) { asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_CMP, rlo_dest, rlo_src); }
+
+// FORMAT 9: load/store with immediate offset
+// For word transfers the offset must be aligned, and >>2
+
+// FORMAT 10: load/store halfword
+// The offset must be aligned, and >>1
+// The load is zero extended into the register
+
+#define ASM_THUMB_FORMAT_9_STR (0x6000)
+#define ASM_THUMB_FORMAT_9_LDR (0x6800)
+#define ASM_THUMB_FORMAT_9_WORD_TRANSFER (0x0000)
+#define ASM_THUMB_FORMAT_9_BYTE_TRANSFER (0x1000)
+
+#define ASM_THUMB_FORMAT_10_STRH (0x8000)
+#define ASM_THUMB_FORMAT_10_LDRH (0x8800)
+
+#define ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset) \
+ ((op) | (((offset) << 6) & 0x07c0) | ((rlo_base) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_9_10(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_base, uint offset)
+ { asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset)); }
+
+static inline void asm_thumb_str_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint word_offset)
+ { asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_src, rlo_base, word_offset); }
+static inline void asm_thumb_strb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset)
+ { asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_src, rlo_base, byte_offset); }
+static inline void asm_thumb_strh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset)
+ { asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_STRH, rlo_src, rlo_base, byte_offset); }
+static inline void asm_thumb_ldr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint word_offset)
+ { asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_dest, rlo_base, word_offset); }
+static inline void asm_thumb_ldrb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset)
+ { asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER , rlo_dest, rlo_base, byte_offset); }
+static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset)
+ { asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_LDRH, rlo_dest, rlo_base, byte_offset); }
+
+// TODO convert these to above format style
+
+#define ASM_THUMB_OP_MOVW (0xf240)
+#define ASM_THUMB_OP_MOVT (0xf2c0)
+
+void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
+void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src);
+
+// these return true if the destination is in range, false otherwise
+bool asm_thumb_b_n_label(asm_thumb_t *as, uint label);
+bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide);
+bool asm_thumb_bl_label(asm_thumb_t *as, uint label);
+
+void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32_src); // convenience
+void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience
+void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32); // convenience
+void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); // convenience
+void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
+void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
+
+void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narrow or wide branch
+void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch
+void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp); // convenience
+
+#endif // __MICROPY_INCLUDED_PY_ASMTHUMB_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/asmx64.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,721 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X64
+
+#include "py/asmx64.h"
+
+/* all offsets are measured in multiples of 8 bytes */
+#define WORD_SIZE (8)
+
+#define OPCODE_NOP (0x90)
+#define OPCODE_PUSH_R64 (0x50) /* +rq */
+#define OPCODE_PUSH_I64 (0x68)
+#define OPCODE_PUSH_M64 (0xff) /* /6 */
+#define OPCODE_POP_R64 (0x58) /* +rq */
+#define OPCODE_RET (0xc3)
+#define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
+#define OPCODE_MOV_I64_TO_R64 (0xb8) /* +rq */
+#define OPCODE_MOV_I32_TO_RM32 (0xc7)
+#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
+#define OPCODE_MOV_R64_TO_RM64 (0x89) /* /r */
+#define OPCODE_MOV_RM64_TO_R64 (0x8b) /* /r */
+#define OPCODE_MOVZX_RM8_TO_R64 (0xb6) /* 0x0f 0xb6/r */
+#define OPCODE_MOVZX_RM16_TO_R64 (0xb7) /* 0x0f 0xb7/r */
+#define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */
+#define OPCODE_AND_R64_TO_RM64 (0x21) /* /r */
+#define OPCODE_OR_R64_TO_RM64 (0x09) /* /r */
+#define OPCODE_XOR_R64_TO_RM64 (0x31) /* /r */
+#define OPCODE_ADD_R64_TO_RM64 (0x01) /* /r */
+#define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */
+#define OPCODE_ADD_I8_TO_RM32 (0x83) /* /0 */
+#define OPCODE_SUB_R64_FROM_RM64 (0x29)
+#define OPCODE_SUB_I32_FROM_RM64 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM64 (0x83) /* /5 */
+//#define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
+//#define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
+//#define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
+#define OPCODE_SHL_RM64_CL (0xd3) /* /4 */
+#define OPCODE_SAR_RM64_CL (0xd3) /* /7 */
+//#define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+//#define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
+#define OPCODE_CMP_R64_WITH_RM64 (0x39) /* /r */
+//#define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */
+#define OPCODE_JMP_REL8 (0xeb)
+#define OPCODE_JMP_REL32 (0xe9)
+#define OPCODE_JCC_REL8 (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A (0x0f)
+#define OPCODE_JCC_REL32_B (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A (0x0f)
+#define OPCODE_SETCC_RM8_B (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32 (0xe8)
+#define OPCODE_CALL_RM32 (0xff) /* /2 */
+#define OPCODE_LEAVE (0xc9)
+
+#define MODRM_R64(x) (((x) & 0x7) << 3)
+#define MODRM_RM_DISP0 (0x00)
+#define MODRM_RM_DISP8 (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG (0xc0)
+#define MODRM_RM_R64(x) ((x) & 0x7)
+
+#define OP_SIZE_PREFIX (0x66)
+
+#define REX_PREFIX (0x40)
+#define REX_W (0x08) // width
+#define REX_R (0x04) // register
+#define REX_X (0x02) // index
+#define REX_B (0x01) // base
+#define REX_W_FROM_R64(r64) ((r64) >> 0 & 0x08)
+#define REX_R_FROM_R64(r64) ((r64) >> 1 & 0x04)
+#define REX_X_FROM_R64(r64) ((r64) >> 2 & 0x02)
+#define REX_B_FROM_R64(r64) ((r64) >> 3 & 0x01)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+#define IMM64_L4(x) (((x) >> 32) & 0xff)
+#define IMM64_L5(x) (((x) >> 40) & 0xff)
+#define IMM64_L6(x) (((x) >> 48) & 0xff)
+#define IMM64_L7(x) (((x) >> 56) & 0xff)
+
+#define UNSIGNED_FIT8(x) (((x) & 0xffffffffffffff00) == 0)
+#define UNSIGNED_FIT32(x) (((x) & 0xffffffff00000000) == 0)
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+struct _asm_x64_t {
+ uint pass;
+ mp_uint_t code_offset;
+ mp_uint_t code_size;
+ byte *code_base;
+ byte dummy_data[8];
+
+ mp_uint_t max_num_labels;
+ mp_uint_t *label_offsets;
+ int num_locals;
+};
+
+asm_x64_t *asm_x64_new(mp_uint_t max_num_labels) {
+ asm_x64_t *as;
+
+ as = m_new0(asm_x64_t, 1);
+ as->max_num_labels = max_num_labels;
+ as->label_offsets = m_new(mp_uint_t, max_num_labels);
+
+ return as;
+}
+
+void asm_x64_free(asm_x64_t *as, bool free_code) {
+ if (free_code) {
+ MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+ }
+ m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
+ m_del_obj(asm_x64_t, as);
+}
+
+void asm_x64_start_pass(asm_x64_t *as, uint pass) {
+ if (pass == ASM_X64_PASS_COMPUTE) {
+ // reset all labels
+ memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
+ } if (pass == ASM_X64_PASS_EMIT) {
+ MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size);
+ if (as->code_base == NULL) {
+ assert(0);
+ }
+ //printf("code_size: %u\n", as->code_size);
+ }
+ as->pass = pass;
+ as->code_offset = 0;
+}
+
+void asm_x64_end_pass(asm_x64_t *as) {
+ // could check labels are resolved...
+ (void)as;
+}
+
+// all functions must go through this one to emit bytes
+STATIC byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int num_bytes_to_write) {
+ //printf("emit %d\n", num_bytes_to_write);
+ if (as->pass < ASM_X64_PASS_EMIT) {
+ as->code_offset += num_bytes_to_write;
+ return as->dummy_data;
+ } else {
+ assert(as->code_offset + num_bytes_to_write <= as->code_size);
+ byte *c = as->code_base + as->code_offset;
+ as->code_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+mp_uint_t asm_x64_get_code_pos(asm_x64_t *as) {
+ return as->code_offset;
+}
+
+mp_uint_t asm_x64_get_code_size(asm_x64_t *as) {
+ return as->code_size;
+}
+
+void *asm_x64_get_code(asm_x64_t *as) {
+ return as->code_base;
+}
+
+STATIC void asm_x64_write_byte_1(asm_x64_t *as, byte b1) {
+ byte* c = asm_x64_get_cur_to_write_bytes(as, 1);
+ c[0] = b1;
+}
+
+STATIC void asm_x64_write_byte_2(asm_x64_t *as, byte b1, byte b2) {
+ byte* c = asm_x64_get_cur_to_write_bytes(as, 2);
+ c[0] = b1;
+ c[1] = b2;
+}
+
+STATIC void asm_x64_write_byte_3(asm_x64_t *as, byte b1, byte b2, byte b3) {
+ byte* c = asm_x64_get_cur_to_write_bytes(as, 3);
+ c[0] = b1;
+ c[1] = b2;
+ c[2] = b3;
+}
+
+STATIC void asm_x64_write_word32(asm_x64_t *as, int w32) {
+ byte* c = asm_x64_get_cur_to_write_bytes(as, 4);
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+}
+
+STATIC void asm_x64_write_word64(asm_x64_t *as, int64_t w64) {
+ byte* c = asm_x64_get_cur_to_write_bytes(as, 8);
+ c[0] = IMM32_L0(w64);
+ c[1] = IMM32_L1(w64);
+ c[2] = IMM32_L2(w64);
+ c[3] = IMM32_L3(w64);
+ c[4] = IMM64_L4(w64);
+ c[5] = IMM64_L5(w64);
+ c[6] = IMM64_L6(w64);
+ c[7] = IMM64_L7(w64);
+}
+
+// align must be a multiple of 2
+void asm_x64_align(asm_x64_t* as, mp_uint_t align) {
+ // TODO fill unused data with NOPs?
+ as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
+}
+
+void asm_x64_data(asm_x64_t* as, mp_uint_t bytesize, mp_uint_t val) {
+ byte *c = asm_x64_get_cur_to_write_bytes(as, bytesize);
+ // machine is little endian
+ for (uint i = 0; i < bytesize; i++) {
+ *c++ = val;
+ val >>= 8;
+ }
+}
+
+/* unused
+STATIC void asm_x64_write_word32_to(asm_x64_t *as, int offset, int w32) {
+ byte* c;
+ assert(offset + 4 <= as->code_size);
+ c = as->code_base + offset;
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+}
+*/
+
+STATIC void asm_x64_write_r64_disp(asm_x64_t *as, int r64, int disp_r64, int disp_offset) {
+ assert(disp_r64 != ASM_X64_REG_RSP);
+
+ if (disp_r64 == ASM_X64_REG_R12) {
+ // special case for r12; not fully implemented
+ assert(SIGNED_FIT8(disp_offset));
+ asm_x64_write_byte_3(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), 0x24, IMM32_L0(disp_offset));
+ return;
+ }
+
+ if (disp_offset == 0 && disp_r64 != ASM_X64_REG_RBP) {
+ asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP0 | MODRM_RM_R64(disp_r64));
+ } else if (SIGNED_FIT8(disp_offset)) {
+ asm_x64_write_byte_2(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), IMM32_L0(disp_offset));
+ } else {
+ asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP32 | MODRM_RM_R64(disp_r64));
+ asm_x64_write_word32(as, disp_offset);
+ }
+}
+
+STATIC void asm_x64_generic_r64_r64(asm_x64_t *as, int dest_r64, int src_r64, int op) {
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), op, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+}
+
+void asm_x64_nop(asm_x64_t *as) {
+ asm_x64_write_byte_1(as, OPCODE_NOP);
+}
+
+void asm_x64_push_r64(asm_x64_t *as, int src_r64) {
+ if (src_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_PUSH_R64 | src_r64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_PUSH_R64 | (src_r64 & 7));
+ }
+}
+
+/*
+void asm_x64_push_i32(asm_x64_t *as, int src_i32) {
+ asm_x64_write_byte_1(as, OPCODE_PUSH_I64);
+ asm_x64_write_word32(as, src_i32); // will be sign extended to 64 bits
+}
+*/
+
+/*
+void asm_x64_push_disp(asm_x64_t *as, int src_r64, int src_offset) {
+ assert(src_r64 < 8);
+ asm_x64_write_byte_1(as, OPCODE_PUSH_M64);
+ asm_x64_write_r64_disp(as, 6, src_r64, src_offset);
+}
+*/
+
+void asm_x64_pop_r64(asm_x64_t *as, int dest_r64) {
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_POP_R64 | dest_r64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_POP_R64 | (dest_r64 & 7));
+ }
+}
+
+STATIC void asm_x64_ret(asm_x64_t *as) {
+ asm_x64_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_MOV_R64_TO_RM64);
+}
+
+void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R8_TO_RM8);
+ }
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R64_TO_RM64);
+ } else {
+ asm_x64_write_byte_3(as, OP_SIZE_PREFIX, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+ }
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_R64_TO_RM64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+ }
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ assert(src_r64 < 8);
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
+ } else {
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_R, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
+ }
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ assert(src_r64 < 8);
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
+ } else {
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_R, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
+ }
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ assert(src_r64 < 8);
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_RM64_TO_R64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_R, OPCODE_MOV_RM64_TO_R64);
+ }
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_MOV_RM64_TO_R64);
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+STATIC void asm_x64_lea_disp_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ // use REX prefix for 64 bit operation
+ assert(src_r64 < 8);
+ assert(dest_r64 < 8);
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_LEA_MEM_TO_R64);
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+/*
+void asm_x64_mov_i8_to_r8(asm_x64_t *as, int src_i8, int dest_r64) {
+ assert(dest_r64 < 8);
+ asm_x64_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r64, src_i8);
+}
+*/
+
+STATIC void asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64) {
+ // cpu defaults to i32 to r64, with zero extension
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_I64_TO_R64 | dest_r64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_MOV_I64_TO_R64 | (dest_r64 & 7));
+ }
+ asm_x64_write_word32(as, src_i32);
+}
+
+void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+ // cpu defaults to i32 to r64
+ // to mov i64 to r64 need to use REX prefix
+ assert(dest_r64 < 8);
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_MOV_I64_TO_R64 | dest_r64);
+ asm_x64_write_word64(as, src_i64);
+}
+
+void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+ // TODO use movzx, movsx if possible
+ if (UNSIGNED_FIT32(src_i64)) {
+ // 5 bytes
+ asm_x64_mov_i32_to_r64(as, src_i64 & 0xffffffff, dest_r64);
+ } else {
+ // 10 bytes
+ asm_x64_mov_i64_to_r64(as, src_i64, dest_r64);
+ }
+}
+
+// src_i64 is stored as a full word in the code, and aligned to machine-word boundary
+void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+ // mov instruction uses 2 bytes for the instruction, before the i64
+ while (((as->code_offset + 2) & (WORD_SIZE - 1)) != 0) {
+ asm_x64_nop(as);
+ }
+ asm_x64_mov_i64_to_r64(as, src_i64, dest_r64);
+}
+
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64);
+}
+
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_OR_R64_TO_RM64);
+}
+
+void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_XOR_R64_TO_RM64);
+}
+
+void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, 4, OPCODE_SHL_RM64_CL);
+}
+
+void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, 7, OPCODE_SAR_RM64_CL);
+}
+
+void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_ADD_R64_TO_RM64);
+}
+
+void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_SUB_R64_FROM_RM64);
+}
+
+void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ // imul reg64, reg/mem64 -- 0x0f 0xaf /r
+ asm_x64_write_byte_1(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64));
+ asm_x64_write_byte_3(as, 0x0f, 0xaf, MODRM_R64(dest_r64) | MODRM_RM_REG | MODRM_RM_R64(src_r64));
+}
+
+/*
+void asm_x64_sub_i32_from_r32(asm_x64_t *as, int src_i32, int dest_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ // defaults to 32 bit operation
+ asm_x64_write_byte_2(as, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
+ asm_x64_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ // defaults to 32 bit operation
+ asm_x64_write_byte_2(as, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
+ asm_x64_write_word32(as, src_i32);
+ }
+}
+*/
+
+STATIC void asm_x64_sub_r64_i32(asm_x64_t *as, int dest_r64, int src_i32) {
+ assert(dest_r64 < 8);
+ if (SIGNED_FIT8(src_i32)) {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+ asm_x64_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+ asm_x64_write_word32(as, src_i32);
+ }
+}
+
+/*
+void asm_x64_shl_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+ asm_x64_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(r32));
+ asm_x64_write_byte_1(as, imm);
+}
+
+void asm_x64_shr_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+ asm_x64_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(r32));
+ asm_x64_write_byte_1(as, imm);
+}
+
+void asm_x64_sar_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+ asm_x64_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(r32));
+ asm_x64_write_byte_1(as, imm);
+}
+*/
+
+void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+ asm_x64_generic_r64_r64(as, src_r64_b, src_r64_a, OPCODE_CMP_R64_WITH_RM64);
+}
+
+/*
+void asm_x64_cmp_i32_with_r32(asm_x64_t *as, int src_i32, int src_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ asm_x64_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32));
+ asm_x64_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ asm_x64_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32));
+ asm_x64_write_word32(as, src_i32);
+ }
+}
+*/
+
+void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+ // TODO implement for other registers
+ assert(src_r64_a == ASM_X64_REG_RAX);
+ assert(src_r64_b == ASM_X64_REG_RAX);
+ asm_x64_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b));
+}
+
+void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8) {
+ assert(dest_r8 < 8);
+ asm_x64_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r8));
+}
+
+void asm_x64_label_assign(asm_x64_t *as, mp_uint_t label) {
+ assert(label < as->max_num_labels);
+ if (as->pass < ASM_X64_PASS_EMIT) {
+ // assign label offset
+ assert(as->label_offsets[label] == (mp_uint_t)-1);
+ as->label_offsets[label] = as->code_offset;
+ } else {
+ // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+ //printf("l%d: (at %ld=%ld)\n", label, as->label_offsets[label], as->code_offset);
+ assert(as->label_offsets[label] == as->code_offset);
+ }
+}
+
+STATIC mp_uint_t get_label_dest(asm_x64_t *as, mp_uint_t label) {
+ assert(label < as->max_num_labels);
+ return as->label_offsets[label];
+}
+
+void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x64_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 5;
+ asm_x64_write_byte_1(as, OPCODE_JMP_REL32);
+ asm_x64_write_word32(as, rel);
+ }
+}
+
+void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x64_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 6;
+ asm_x64_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+ asm_x64_write_word32(as, rel);
+ }
+}
+
+void asm_x64_entry(asm_x64_t *as, int num_locals) {
+ asm_x64_push_r64(as, ASM_X64_REG_RBP);
+ asm_x64_mov_r64_r64(as, ASM_X64_REG_RBP, ASM_X64_REG_RSP);
+ if (num_locals < 0) {
+ num_locals = 0;
+ }
+ num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary
+ asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, num_locals * WORD_SIZE);
+ asm_x64_push_r64(as, ASM_X64_REG_RBX);
+ asm_x64_push_r64(as, ASM_X64_REG_R12);
+ asm_x64_push_r64(as, ASM_X64_REG_R13);
+ as->num_locals = num_locals;
+}
+
+void asm_x64_exit(asm_x64_t *as) {
+ asm_x64_pop_r64(as, ASM_X64_REG_R13);
+ asm_x64_pop_r64(as, ASM_X64_REG_R12);
+ asm_x64_pop_r64(as, ASM_X64_REG_RBX);
+ asm_x64_write_byte_1(as, OPCODE_LEAVE);
+ asm_x64_ret(as);
+}
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through as->num_locals-1
+// - RBP points above the last local
+//
+// | RBP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+//
+STATIC int asm_x64_local_offset_from_ebp(asm_x64_t *as, int local_num) {
+ return (-as->num_locals + local_num) * WORD_SIZE;
+}
+
+void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64) {
+ asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, src_local_num), dest_r64);
+}
+
+void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num) {
+ asm_x64_mov_r64_to_mem64(as, src_r64, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, dest_local_num));
+}
+
+void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64) {
+ int offset = asm_x64_local_offset_from_ebp(as, local_num);
+ if (offset == 0) {
+ asm_x64_mov_r64_r64(as, dest_r64, ASM_X64_REG_RBP);
+ } else {
+ asm_x64_lea_disp_to_r64(as, ASM_X64_REG_RBP, offset, dest_r64);
+ }
+}
+
+/*
+void asm_x64_push_local(asm_x64_t *as, int local_num) {
+ asm_x64_push_disp(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, local_num));
+}
+
+void asm_x64_push_local_addr(asm_x64_t *as, int local_num, int temp_r64) {
+ asm_x64_mov_r64_r64(as, temp_r64, ASM_X64_REG_RBP);
+ asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_ebp(as, local_num), temp_r64);
+ asm_x64_push_r64(as, temp_r64);
+}
+*/
+
+/*
+ can't use these because code might be relocated when resized
+
+void asm_x64_call(asm_x64_t *as, void* func) {
+ asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+ asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
+ asm_x64_mov_r64_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
+}
+
+void asm_x64_call_i1(asm_x64_t *as, void* func, int i1) {
+ asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+ asm_x64_sub_i32_from_r32(as, 12, ASM_X64_REG_RSP);
+ asm_x64_push_i32(as, i1);
+ asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
+ asm_x64_add_i32_to_r32(as, 16, ASM_X64_REG_RSP);
+ asm_x64_mov_r64_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
+}
+*/
+
+void asm_x64_call_ind(asm_x64_t *as, void *ptr, int temp_r64) {
+ assert(temp_r64 < 8);
+#ifdef __LP64__
+ asm_x64_mov_i64_to_r64_optimised(as, (int64_t)ptr, temp_r64);
+#else
+ // If we get here, sizeof(int) == sizeof(void*).
+ asm_x64_mov_i64_to_r64_optimised(as, (int64_t)(unsigned int)ptr, temp_r64);
+#endif
+ asm_x64_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R64(2) | MODRM_RM_REG | MODRM_RM_R64(temp_r64));
+ // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all
+ // doesn't work anymore because calls are 64 bits away
+ /*
+ asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x64_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4));
+ */
+}
+
+#endif // MICROPY_EMIT_X64
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/asmx64.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,124 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_ASMX64_H__ +#define __MICROPY_INCLUDED_PY_ASMX64_H__ + +#include "py/mpconfig.h" +#include "py/misc.h" + +// AMD64 calling convention is: +// - args pass in: RDI, RSI, RDX, RCX, R08, R09 +// - return value in RAX +// - stack must be aligned on a 16-byte boundary before all calls +// - RAX, RCX, RDX, RSI, RDI, R08, R09, R10, R11 are caller-save +// - RBX, RBP, R12, R13, R14, R15 are callee-save + +// In the functions below, argument order follows x86 docs and generally +// the destination is the first argument. +// NOTE: this is a change from the old convention used in this file and +// some functions still use the old (reverse) convention. + +#define ASM_X64_PASS_COMPUTE (1) +#define ASM_X64_PASS_EMIT (2) + +#define ASM_X64_REG_RAX (0) +#define ASM_X64_REG_RCX (1) +#define ASM_X64_REG_RDX (2) +#define ASM_X64_REG_RBX (3) +#define ASM_X64_REG_RSP (4) +#define ASM_X64_REG_RBP (5) +#define ASM_X64_REG_RSI (6) +#define ASM_X64_REG_RDI (7) +#define ASM_X64_REG_R08 (8) +#define ASM_X64_REG_R09 (9) +#define ASM_X64_REG_R10 (10) +#define ASM_X64_REG_R11 (11) +#define ASM_X64_REG_R12 (12) +#define ASM_X64_REG_R13 (13) +#define ASM_X64_REG_R14 (14) +#define ASM_X64_REG_R15 (15) + +// condition codes, used for jcc and setcc (despite their j-name!) +#define ASM_X64_CC_JB (0x2) // below, unsigned +#define ASM_X64_CC_JZ (0x4) +#define ASM_X64_CC_JE (0x4) +#define ASM_X64_CC_JNZ (0x5) +#define ASM_X64_CC_JNE (0x5) +#define ASM_X64_CC_JL (0xc) // less, signed +#define ASM_X64_CC_JGE (0xd) // greater or equal, signed +#define ASM_X64_CC_JLE (0xe) // less or equal, signed +#define ASM_X64_CC_JG (0xf) // greater, signed + +typedef struct _asm_x64_t asm_x64_t; + +asm_x64_t* asm_x64_new(mp_uint_t max_num_labels); +void asm_x64_free(asm_x64_t* as, bool free_code); +void asm_x64_start_pass(asm_x64_t *as, uint pass); +void asm_x64_end_pass(asm_x64_t *as); +mp_uint_t asm_x64_get_code_pos(asm_x64_t *as); +mp_uint_t asm_x64_get_code_size(asm_x64_t* as); +void* asm_x64_get_code(asm_x64_t* as); + +void asm_x64_align(asm_x64_t *as, mp_uint_t align); +void asm_x64_data(asm_x64_t *as, mp_uint_t bytesize, mp_uint_t val); + +void asm_x64_nop(asm_x64_t* as); +void asm_x64_push_r64(asm_x64_t* as, int src_r64); +void asm_x64_pop_r64(asm_x64_t* as, int dest_r64); +void asm_x64_mov_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); +void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64); +void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64); +void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64); +void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); +void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); +void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); +void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); +void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64); +void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64); +void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64); +void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64); +void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); +void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); +void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); +void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64); +void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64); +void asm_x64_add_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); +void asm_x64_sub_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); +void asm_x64_mul_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); +void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b); +void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b); +void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8); +void asm_x64_label_assign(asm_x64_t* as, mp_uint_t label); +void asm_x64_jmp_label(asm_x64_t* as, mp_uint_t label); +void asm_x64_jcc_label(asm_x64_t* as, int jcc_type, mp_uint_t label); +void asm_x64_entry(asm_x64_t* as, int num_locals); +void asm_x64_exit(asm_x64_t* as); +void asm_x64_mov_local_to_r64(asm_x64_t* as, int src_local_num, int dest_r64); +void asm_x64_mov_r64_to_local(asm_x64_t* as, int src_r64, int dest_local_num); +void asm_x64_mov_local_addr_to_r64(asm_x64_t* as, int local_num, int dest_r64); +void asm_x64_call_ind(asm_x64_t* as, void* ptr, int temp_r32); + +#endif // __MICROPY_INCLUDED_PY_ASMX64_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/asmx86.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,605 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X86
+
+#include "py/asmx86.h"
+
+/* all offsets are measured in multiples of 4 bytes */
+#define WORD_SIZE (4)
+
+#define OPCODE_NOP (0x90)
+#define OPCODE_PUSH_R32 (0x50)
+//#define OPCODE_PUSH_I32 (0x68)
+//#define OPCODE_PUSH_M32 (0xff) /* /6 */
+#define OPCODE_POP_R32 (0x58)
+#define OPCODE_RET (0xc3)
+//#define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
+#define OPCODE_MOV_I32_TO_R32 (0xb8)
+//#define OPCODE_MOV_I32_TO_RM32 (0xc7)
+#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
+#define OPCODE_MOV_R32_TO_RM32 (0x89) /* /r */
+#define OPCODE_MOV_RM32_TO_R32 (0x8b) /* /r */
+#define OPCODE_MOVZX_RM8_TO_R32 (0xb6) /* 0x0f 0xb6/r */
+#define OPCODE_MOVZX_RM16_TO_R32 (0xb7) /* 0x0f 0xb7/r */
+#define OPCODE_LEA_MEM_TO_R32 (0x8d) /* /r */
+#define OPCODE_AND_R32_TO_RM32 (0x21) /* /r */
+#define OPCODE_OR_R32_TO_RM32 (0x09) /* /r */
+#define OPCODE_XOR_R32_TO_RM32 (0x31) /* /r */
+#define OPCODE_ADD_R32_TO_RM32 (0x01)
+#define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */
+#define OPCODE_ADD_I8_TO_RM32 (0x83) /* /0 */
+#define OPCODE_SUB_R32_FROM_RM32 (0x29)
+#define OPCODE_SUB_I32_FROM_RM32 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM32 (0x83) /* /5 */
+//#define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
+//#define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
+//#define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
+#define OPCODE_SHL_RM32_CL (0xd3) /* /4 */
+#define OPCODE_SAR_RM32_CL (0xd3) /* /7 */
+//#define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+//#define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
+#define OPCODE_CMP_R32_WITH_RM32 (0x39)
+//#define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */
+#define OPCODE_JMP_REL8 (0xeb)
+#define OPCODE_JMP_REL32 (0xe9)
+#define OPCODE_JCC_REL8 (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A (0x0f)
+#define OPCODE_JCC_REL32_B (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A (0x0f)
+#define OPCODE_SETCC_RM8_B (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32 (0xe8)
+#define OPCODE_CALL_RM32 (0xff) /* /2 */
+#define OPCODE_LEAVE (0xc9)
+
+#define MODRM_R32(x) ((x) << 3)
+#define MODRM_RM_DISP0 (0x00)
+#define MODRM_RM_DISP8 (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG (0xc0)
+#define MODRM_RM_R32(x) (x)
+
+#define OP_SIZE_PREFIX (0x66)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+struct _asm_x86_t {
+ uint pass;
+ mp_uint_t code_offset;
+ mp_uint_t code_size;
+ byte *code_base;
+ byte dummy_data[8];
+
+ mp_uint_t max_num_labels;
+ mp_uint_t *label_offsets;
+ int num_locals;
+};
+
+asm_x86_t *asm_x86_new(mp_uint_t max_num_labels) {
+ asm_x86_t *as;
+
+ as = m_new0(asm_x86_t, 1);
+ as->max_num_labels = max_num_labels;
+ as->label_offsets = m_new(mp_uint_t, max_num_labels);
+
+ return as;
+}
+
+void asm_x86_free(asm_x86_t *as, bool free_code) {
+ if (free_code) {
+ MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+ }
+ m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
+ m_del_obj(asm_x86_t, as);
+}
+
+void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass) {
+ if (pass == ASM_X86_PASS_COMPUTE) {
+ // reset all labels
+ memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
+ } else if (pass == ASM_X86_PASS_EMIT) {
+ MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size);
+ if (as->code_base == NULL) {
+ assert(0);
+ }
+ }
+ as->pass = pass;
+ as->code_offset = 0;
+}
+
+void asm_x86_end_pass(asm_x86_t *as) {
+ (void)as;
+}
+
+// all functions must go through this one to emit bytes
+STATIC byte *asm_x86_get_cur_to_write_bytes(asm_x86_t *as, int num_bytes_to_write) {
+ //printf("emit %d\n", num_bytes_to_write);
+ if (as->pass < ASM_X86_PASS_EMIT) {
+ as->code_offset += num_bytes_to_write;
+ return as->dummy_data;
+ } else {
+ assert(as->code_offset + num_bytes_to_write <= as->code_size);
+ byte *c = as->code_base + as->code_offset;
+ as->code_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+mp_uint_t asm_x86_get_code_pos(asm_x86_t *as) {
+ return as->code_offset;
+}
+
+mp_uint_t asm_x86_get_code_size(asm_x86_t *as) {
+ return as->code_size;
+}
+
+void *asm_x86_get_code(asm_x86_t *as) {
+ return as->code_base;
+}
+
+STATIC void asm_x86_write_byte_1(asm_x86_t *as, byte b1) {
+ byte* c = asm_x86_get_cur_to_write_bytes(as, 1);
+ c[0] = b1;
+}
+
+STATIC void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) {
+ byte* c = asm_x86_get_cur_to_write_bytes(as, 2);
+ c[0] = b1;
+ c[1] = b2;
+}
+
+STATIC void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) {
+ byte* c = asm_x86_get_cur_to_write_bytes(as, 3);
+ c[0] = b1;
+ c[1] = b2;
+ c[2] = b3;
+}
+
+STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) {
+ byte* c = asm_x86_get_cur_to_write_bytes(as, 4);
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+}
+
+// align must be a multiple of 2
+void asm_x86_align(asm_x86_t* as, mp_uint_t align) {
+ // TODO fill unused data with NOPs?
+ as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
+}
+
+void asm_x86_data(asm_x86_t* as, mp_uint_t bytesize, mp_uint_t val) {
+ byte *c = asm_x86_get_cur_to_write_bytes(as, bytesize);
+ // machine is little endian
+ for (uint i = 0; i < bytesize; i++) {
+ *c++ = val;
+ val >>= 8;
+ }
+}
+
+STATIC void asm_x86_write_r32_disp(asm_x86_t *as, int r32, int disp_r32, int disp_offset) {
+ assert(disp_r32 != ASM_X86_REG_ESP);
+
+ if (disp_offset == 0 && disp_r32 != ASM_X86_REG_EBP) {
+ asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP0 | MODRM_RM_R32(disp_r32));
+ } else if (SIGNED_FIT8(disp_offset)) {
+ asm_x86_write_byte_2(as, MODRM_R32(r32) | MODRM_RM_DISP8 | MODRM_RM_R32(disp_r32), IMM32_L0(disp_offset));
+ } else {
+ asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP32 | MODRM_RM_R32(disp_r32));
+ asm_x86_write_word32(as, disp_offset);
+ }
+}
+
+STATIC void asm_x86_generic_r32_r32(asm_x86_t *as, int dest_r32, int src_r32, int op) {
+ asm_x86_write_byte_2(as, op, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+
+STATIC void asm_x86_nop(asm_x86_t *as) {
+ asm_x86_write_byte_1(as, OPCODE_NOP);
+}
+
+STATIC void asm_x86_push_r32(asm_x86_t *as, int src_r32) {
+ asm_x86_write_byte_1(as, OPCODE_PUSH_R32 | src_r32);
+}
+
+#if 0
+void asm_x86_push_i32(asm_x86_t *as, int src_i32) {
+ asm_x86_write_byte_1(as, OPCODE_PUSH_I32);
+ asm_x86_write_word32(as, src_i32);
+}
+
+void asm_x86_push_disp(asm_x86_t *as, int src_r32, int src_offset) {
+ asm_x86_write_byte_1(as, OPCODE_PUSH_M32);
+ asm_x86_write_r32_disp(as, 6, src_r32, src_offset);
+}
+#endif
+
+STATIC void asm_x86_pop_r32(asm_x86_t *as, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_POP_R32 | dest_r32);
+}
+
+STATIC void asm_x86_ret(asm_x86_t *as) {
+ asm_x86_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_MOV_R32_TO_RM32);
+}
+
+void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
+ asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+ asm_x86_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R32_TO_RM32);
+ asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_R32_TO_RM32);
+ asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_RM32_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+STATIC void asm_x86_lea_disp_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_LEA_MEM_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+#if 0
+void asm_x86_mov_i8_to_r8(asm_x86_t *as, int src_i8, int dest_r32) {
+ asm_x86_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r32, src_i8);
+}
+#endif
+
+void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_I32_TO_R32 | dest_r32);
+ asm_x86_write_word32(as, src_i32);
+}
+
+// src_i32 is stored as a full word in the code, and aligned to machine-word boundary
+void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32) {
+ // mov instruction uses 1 byte for the instruction, before the i32
+ while (((as->code_offset + 1) & (WORD_SIZE - 1)) != 0) {
+ asm_x86_nop(as);
+ }
+ asm_x86_mov_i32_to_r32(as, src_i32, dest_r32);
+}
+
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_AND_R32_TO_RM32);
+}
+
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_OR_R32_TO_RM32);
+}
+
+void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_XOR_R32_TO_RM32);
+}
+
+void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, 4, OPCODE_SHL_RM32_CL);
+}
+
+void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, 7, OPCODE_SAR_RM32_CL);
+}
+
+void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_ADD_R32_TO_RM32);
+}
+
+STATIC void asm_x86_add_i32_to_r32(asm_x86_t *as, int src_i32, int dest_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ asm_x86_write_byte_2(as, OPCODE_ADD_I8_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_word32(as, src_i32);
+ }
+}
+
+void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_SUB_R32_FROM_RM32);
+}
+
+STATIC void asm_x86_sub_r32_i32(asm_x86_t *as, int dest_r32, int src_i32) {
+ if (SIGNED_FIT8(src_i32)) {
+ // defaults to 32 bit operation
+ asm_x86_write_byte_2(as, OPCODE_SUB_I8_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ // defaults to 32 bit operation
+ asm_x86_write_byte_2(as, OPCODE_SUB_I32_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_word32(as, src_i32);
+ }
+}
+
+void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ // imul reg32, reg/mem32 -- 0x0f 0xaf /r
+ asm_x86_write_byte_3(as, 0x0f, 0xaf, MODRM_R32(dest_r32) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+}
+
+#if 0
+/* shifts not tested */
+void asm_x86_shl_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+ asm_x86_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(r32));
+ asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_shr_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+ asm_x86_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(r32));
+ asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_sar_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+ asm_x86_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(r32));
+ asm_x86_write_byte_1(as, imm);
+}
+#endif
+
+void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+ asm_x86_write_byte_2(as, OPCODE_CMP_R32_WITH_RM32, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
+}
+
+#if 0
+void asm_x86_cmp_i32_with_r32(asm_x86_t *as, int src_i32, int src_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ asm_x86_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+ asm_x86_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ asm_x86_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+ asm_x86_write_word32(as, src_i32);
+ }
+}
+#endif
+
+void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+ // TODO implement for other registers
+ assert(src_r32_a == ASM_X86_REG_EAX);
+ assert(src_r32_b == ASM_X86_REG_EAX);
+ asm_x86_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
+}
+
+void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) {
+ asm_x86_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r8));
+}
+
+void asm_x86_label_assign(asm_x86_t *as, mp_uint_t label) {
+ assert(label < as->max_num_labels);
+ if (as->pass < ASM_X86_PASS_EMIT) {
+ // assign label offset
+ assert(as->label_offsets[label] == (mp_uint_t)-1);
+ as->label_offsets[label] = as->code_offset;
+ } else {
+ // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+ //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset);
+ assert(as->label_offsets[label] == as->code_offset);
+ }
+}
+
+STATIC mp_uint_t get_label_dest(asm_x86_t *as, mp_uint_t label) {
+ assert(label < as->max_num_labels);
+ return as->label_offsets[label];
+}
+
+void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x86_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 5;
+ asm_x86_write_byte_1(as, OPCODE_JMP_REL32);
+ asm_x86_write_word32(as, rel);
+ }
+}
+
+void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x86_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 6;
+ asm_x86_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+ asm_x86_write_word32(as, rel);
+ }
+}
+
+void asm_x86_entry(asm_x86_t *as, mp_uint_t num_locals) {
+ asm_x86_push_r32(as, ASM_X86_REG_EBP);
+ asm_x86_mov_r32_r32(as, ASM_X86_REG_EBP, ASM_X86_REG_ESP);
+ if (num_locals > 0) {
+ asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, num_locals * WORD_SIZE);
+ }
+ asm_x86_push_r32(as, ASM_X86_REG_EBX);
+ asm_x86_push_r32(as, ASM_X86_REG_ESI);
+ asm_x86_push_r32(as, ASM_X86_REG_EDI);
+ // TODO align stack on 16-byte boundary
+ as->num_locals = num_locals;
+}
+
+void asm_x86_exit(asm_x86_t *as) {
+ asm_x86_pop_r32(as, ASM_X86_REG_EDI);
+ asm_x86_pop_r32(as, ASM_X86_REG_ESI);
+ asm_x86_pop_r32(as, ASM_X86_REG_EBX);
+ asm_x86_write_byte_1(as, OPCODE_LEAVE);
+ asm_x86_ret(as);
+}
+
+#if 0
+void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) {
+ asm_x86_push_disp(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE);
+}
+#endif
+
+void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) {
+ asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE, dest_r32);
+}
+
+#if 0
+void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) {
+ asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_EBP, 2 * WORD_SIZE + dest_arg_num * WORD_SIZE);
+}
+#endif
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through as->num_locals-1
+// - EBP points above the last local
+//
+// | EBP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+//
+STATIC int asm_x86_local_offset_from_ebp(asm_x86_t *as, int local_num) {
+ return (-as->num_locals + local_num) * WORD_SIZE;
+}
+
+void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) {
+ asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32);
+}
+
+void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) {
+ asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num));
+}
+
+void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) {
+ int offset = asm_x86_local_offset_from_ebp(as, local_num);
+ if (offset == 0) {
+ asm_x86_mov_r32_r32(as, dest_r32, ASM_X86_REG_EBP);
+ } else {
+ asm_x86_lea_disp_to_r32(as, ASM_X86_REG_EBP, offset, dest_r32);
+ }
+}
+
+#if 0
+void asm_x86_push_local(asm_x86_t *as, int local_num) {
+ asm_x86_push_disp(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, local_num));
+}
+
+void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32)
+{
+ asm_x86_mov_r32_r32(as, temp_r32, ASM_X86_REG_EBP);
+ asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_ebp(as, local_num), temp_r32);
+ asm_x86_push_r32(as, temp_r32);
+}
+#endif
+
+void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32) {
+ // TODO align stack on 16-byte boundary before the call
+ assert(n_args <= 5);
+ if (n_args > 4) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_5);
+ }
+ if (n_args > 3) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_4);
+ }
+ if (n_args > 2) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_3);
+ }
+ if (n_args > 1) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_2);
+ }
+ if (n_args > 0) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_1);
+ }
+#ifdef __LP64__
+ // We wouldn't run x86 code on an x64 machine. This is here to enable
+ // testing of the x86 emitter only.
+ asm_x86_mov_i32_to_r32(as, (int32_t)(int64_t)ptr, temp_r32);
+#else
+ // If we get here, sizeof(int) == sizeof(void*).
+ asm_x86_mov_i32_to_r32(as, (int32_t)ptr, temp_r32);
+#endif
+ asm_x86_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R32(2) | MODRM_RM_REG | MODRM_RM_R32(temp_r32));
+ // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all
+ /*
+ asm_x86_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4));
+ */
+
+ // the caller must clean up the stack
+ if (n_args > 0) {
+ asm_x86_add_i32_to_r32(as, WORD_SIZE * n_args, ASM_X86_REG_ESP);
+ }
+}
+
+#endif // MICROPY_EMIT_X86
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/asmx86.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,122 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_ASMX86_H__ +#define __MICROPY_INCLUDED_PY_ASMX86_H__ + +#include "py/mpconfig.h" +#include "py/misc.h" + +// x86 cdecl calling convention is: +// - args passed on the stack in reverse order +// - return value in EAX +// - caller cleans up the stack after a call +// - stack must be aligned to 16-byte boundary before all calls +// - EAX, ECX, EDX are caller-save +// - EBX, ESI, EDI, EBP, ESP, EIP are callee-save + +// In the functions below, argument order follows x86 docs and generally +// the destination is the first argument. +// NOTE: this is a change from the old convention used in this file and +// some functions still use the old (reverse) convention. + +#define ASM_X86_PASS_COMPUTE (1) +#define ASM_X86_PASS_EMIT (2) + +#define ASM_X86_REG_EAX (0) +#define ASM_X86_REG_ECX (1) +#define ASM_X86_REG_EDX (2) +#define ASM_X86_REG_EBX (3) +#define ASM_X86_REG_ESP (4) +#define ASM_X86_REG_EBP (5) +#define ASM_X86_REG_ESI (6) +#define ASM_X86_REG_EDI (7) + +// x86 passes values on the stack, but the emitter is register based, so we need +// to define registers that can temporarily hold the function arguments. They +// need to be defined here so that asm_x86_call_ind can push them onto the stack +// before the call. +#define ASM_X86_REG_ARG_1 ASM_X86_REG_EAX +#define ASM_X86_REG_ARG_2 ASM_X86_REG_ECX +#define ASM_X86_REG_ARG_3 ASM_X86_REG_EDX +#define ASM_X86_REG_ARG_4 ASM_X86_REG_EBX +#define ASM_X86_REG_ARG_5 ASM_X86_REG_ESI + +// condition codes, used for jcc and setcc (despite their j-name!) +#define ASM_X86_CC_JB (0x2) // below, unsigned +#define ASM_X86_CC_JZ (0x4) +#define ASM_X86_CC_JE (0x4) +#define ASM_X86_CC_JNZ (0x5) +#define ASM_X86_CC_JNE (0x5) +#define ASM_X86_CC_JL (0xc) // less, signed +#define ASM_X86_CC_JGE (0xd) // greater or equal, signed +#define ASM_X86_CC_JLE (0xe) // less or equal, signed +#define ASM_X86_CC_JG (0xf) // greater, signed + +typedef struct _asm_x86_t asm_x86_t; + +asm_x86_t* asm_x86_new(mp_uint_t max_num_labels); +void asm_x86_free(asm_x86_t* as, bool free_code); +void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass); +void asm_x86_end_pass(asm_x86_t *as); +mp_uint_t asm_x86_get_code_pos(asm_x86_t *as); +mp_uint_t asm_x86_get_code_size(asm_x86_t* as); +void* asm_x86_get_code(asm_x86_t* as); + +void asm_x86_align(asm_x86_t *as, mp_uint_t align); +void asm_x86_data(asm_x86_t *as, mp_uint_t bytesize, mp_uint_t val); + +void asm_x86_mov_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); +void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32); +void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32); +void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); +void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); +void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); +void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32); +void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32); +void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32); +void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); +void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); +void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); +void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32); +void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32); +void asm_x86_add_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); +void asm_x86_sub_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); +void asm_x86_mul_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); +void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b); +void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b); +void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8); +void asm_x86_label_assign(asm_x86_t* as, mp_uint_t label); +void asm_x86_jmp_label(asm_x86_t* as, mp_uint_t label); +void asm_x86_jcc_label(asm_x86_t* as, mp_uint_t jcc_type, mp_uint_t label); +void asm_x86_entry(asm_x86_t* as, mp_uint_t num_locals); +void asm_x86_exit(asm_x86_t* as); +void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32); +void asm_x86_mov_local_to_r32(asm_x86_t* as, int src_local_num, int dest_r32); +void asm_x86_mov_r32_to_local(asm_x86_t* as, int src_r32, int dest_local_num); +void asm_x86_mov_local_addr_to_r32(asm_x86_t* as, int local_num, int dest_r32); +void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32); + +#endif // __MICROPY_INCLUDED_PY_ASMX86_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/bc.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,398 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objfun.h"
+#include "py/runtime0.h"
+#include "py/bc0.h"
+#include "py/bc.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+mp_uint_t mp_decode_uint(const byte **ptr) {
+ mp_uint_t unum = 0;
+ byte val;
+ const byte *p = *ptr;
+ do {
+ val = *p++;
+ unum = (unum << 7) | (val & 0x7f);
+ } while ((val & 0x80) != 0);
+ *ptr = p;
+ return unum;
+}
+
+STATIC NORETURN void fun_pos_args_mismatch(mp_obj_fun_bc_t *f, size_t expected, size_t given) {
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
+ // generic message, used also for other argument issues
+ (void)f;
+ (void)expected;
+ (void)given;
+ mp_arg_error_terse_mismatch();
+#elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
+ (void)f;
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function takes %d positional arguments but %d were given", expected, given));
+#elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "%q() takes %d positional arguments but %d were given",
+ mp_obj_fun_get_name(MP_OBJ_FROM_PTR(f)), expected, given));
+#endif
+}
+
+#if DEBUG_PRINT
+STATIC void dump_args(const mp_obj_t *a, size_t sz) {
+ DEBUG_printf("%p: ", a);
+ for (size_t i = 0; i < sz; i++) {
+ DEBUG_printf("%p ", a[i]);
+ }
+ DEBUG_printf("\n");
+}
+#else
+#define dump_args(...) (void)0
+#endif
+
+// On entry code_state should be allocated somewhere (stack/heap) and
+// contain the following valid entries:
+// - code_state->ip should contain the offset in bytes from the start of
+// the bytecode chunk to just after n_state and n_exc_stack
+// - code_state->n_state should be set to the state size (locals plus stack)
+void mp_setup_code_state(mp_code_state *code_state, mp_obj_fun_bc_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ // This function is pretty complicated. It's main aim is to be efficient in speed and RAM
+ // usage for the common case of positional only args.
+ size_t n_state = code_state->n_state;
+
+ // ip comes in as an offset into bytecode, so turn it into a true pointer
+ code_state->ip = self->bytecode + (size_t)code_state->ip;
+
+ // store pointer to constant table
+ code_state->const_table = self->const_table;
+
+ #if MICROPY_STACKLESS
+ code_state->prev = NULL;
+ #endif
+
+ // get params
+ size_t scope_flags = *code_state->ip++;
+ size_t n_pos_args = *code_state->ip++;
+ size_t n_kwonly_args = *code_state->ip++;
+ size_t n_def_pos_args = *code_state->ip++;
+
+ code_state->sp = &code_state->state[0] - 1;
+ code_state->exc_sp = (mp_exc_stack_t*)(code_state->state + n_state) - 1;
+
+ // zero out the local stack to begin with
+ memset(code_state->state, 0, n_state * sizeof(*code_state->state));
+
+ const mp_obj_t *kwargs = args + n_args;
+
+ // var_pos_kw_args points to the stack where the var-args tuple, and var-kw dict, should go (if they are needed)
+ mp_obj_t *var_pos_kw_args = &code_state->state[n_state - 1 - n_pos_args - n_kwonly_args];
+
+ // check positional arguments
+
+ if (n_args > n_pos_args) {
+ // given more than enough arguments
+ if ((scope_flags & MP_SCOPE_FLAG_VARARGS) == 0) {
+ fun_pos_args_mismatch(self, n_pos_args, n_args);
+ }
+ // put extra arguments in varargs tuple
+ *var_pos_kw_args-- = mp_obj_new_tuple(n_args - n_pos_args, args + n_pos_args);
+ n_args = n_pos_args;
+ } else {
+ if ((scope_flags & MP_SCOPE_FLAG_VARARGS) != 0) {
+ DEBUG_printf("passing empty tuple as *args\n");
+ *var_pos_kw_args-- = mp_const_empty_tuple;
+ }
+ // Apply processing and check below only if we don't have kwargs,
+ // otherwise, kw handling code below has own extensive checks.
+ if (n_kw == 0 && (scope_flags & MP_SCOPE_FLAG_DEFKWARGS) == 0) {
+ if (n_args >= (size_t)(n_pos_args - n_def_pos_args)) {
+ // given enough arguments, but may need to use some default arguments
+ for (size_t i = n_args; i < n_pos_args; i++) {
+ code_state->state[n_state - 1 - i] = self->extra_args[i - (n_pos_args - n_def_pos_args)];
+ }
+ } else {
+ fun_pos_args_mismatch(self, n_pos_args - n_def_pos_args, n_args);
+ }
+ }
+ }
+
+ // copy positional args into state
+ for (size_t i = 0; i < n_args; i++) {
+ code_state->state[n_state - 1 - i] = args[i];
+ }
+
+ // check keyword arguments
+
+ if (n_kw != 0 || (scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
+ DEBUG_printf("Initial args: ");
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+ mp_obj_t dict = MP_OBJ_NULL;
+ if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+ dict = mp_obj_new_dict(n_kw); // TODO: better go conservative with 0?
+ *var_pos_kw_args = dict;
+ }
+
+ // get pointer to arg_names array
+ const mp_obj_t *arg_names = (const mp_obj_t*)code_state->const_table;
+
+ for (size_t i = 0; i < n_kw; i++) {
+ mp_obj_t wanted_arg_name = kwargs[2 * i];
+ for (size_t j = 0; j < n_pos_args + n_kwonly_args; j++) {
+ if (wanted_arg_name == arg_names[j]) {
+ if (code_state->state[n_state - 1 - j] != MP_OBJ_NULL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function got multiple values for argument '%q'", MP_OBJ_QSTR_VALUE(wanted_arg_name)));
+ }
+ code_state->state[n_state - 1 - j] = kwargs[2 * i + 1];
+ goto continue2;
+ }
+ }
+ // Didn't find name match with positional args
+ if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "function does not take keyword arguments"));
+ }
+ mp_obj_dict_store(dict, kwargs[2 * i], kwargs[2 * i + 1]);
+continue2:;
+ }
+
+ DEBUG_printf("Args with kws flattened: ");
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+ // fill in defaults for positional args
+ mp_obj_t *d = &code_state->state[n_state - n_pos_args];
+ mp_obj_t *s = &self->extra_args[n_def_pos_args - 1];
+ for (size_t i = n_def_pos_args; i > 0; i--, d++, s--) {
+ if (*d == MP_OBJ_NULL) {
+ *d = *s;
+ }
+ }
+
+ DEBUG_printf("Args after filling default positional: ");
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+ // Check that all mandatory positional args are specified
+ while (d < &code_state->state[n_state]) {
+ if (*d++ == MP_OBJ_NULL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function missing required positional argument #%d", &code_state->state[n_state] - d));
+ }
+ }
+
+ // Check that all mandatory keyword args are specified
+ // Fill in default kw args if we have them
+ for (size_t i = 0; i < n_kwonly_args; i++) {
+ if (code_state->state[n_state - 1 - n_pos_args - i] == MP_OBJ_NULL) {
+ mp_map_elem_t *elem = NULL;
+ if ((scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
+ elem = mp_map_lookup(&((mp_obj_dict_t*)MP_OBJ_TO_PTR(self->extra_args[n_def_pos_args]))->map, arg_names[n_pos_args + i], MP_MAP_LOOKUP);
+ }
+ if (elem != NULL) {
+ code_state->state[n_state - 1 - n_pos_args - i] = elem->value;
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function missing required keyword argument '%q'", MP_OBJ_QSTR_VALUE(arg_names[n_pos_args + i])));
+ }
+ }
+ }
+
+ } else {
+ // no keyword arguments given
+ if (n_kwonly_args != 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "function missing keyword-only argument"));
+ }
+ if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+ *var_pos_kw_args = mp_obj_new_dict(0);
+ }
+ }
+
+ // get the ip and skip argument names
+ const byte *ip = code_state->ip;
+
+ // store pointer to code_info and jump over it
+ {
+ code_state->code_info = ip;
+ const byte *ip2 = ip;
+ size_t code_info_size = mp_decode_uint(&ip2);
+ ip += code_info_size;
+ }
+
+ // bytecode prelude: initialise closed over variables
+ size_t local_num;
+ while ((local_num = *ip++) != 255) {
+ code_state->state[n_state - 1 - local_num] =
+ mp_obj_new_cell(code_state->state[n_state - 1 - local_num]);
+ }
+
+ // now that we skipped over the prelude, set the ip for the VM
+ code_state->ip = ip;
+
+ DEBUG_printf("Calling: n_pos_args=%d, n_kwonly_args=%d\n", n_pos_args, n_kwonly_args);
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+ dump_args(code_state->state, n_state);
+}
+
+#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
+
+// The following table encodes the number of bytes that a specific opcode
+// takes up. There are 3 special opcodes that always have an extra byte:
+// MP_BC_MAKE_CLOSURE
+// MP_BC_MAKE_CLOSURE_DEFARGS
+// MP_BC_RAISE_VARARGS
+// There are 4 special opcodes that have an extra byte only when
+// MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE is enabled:
+// MP_BC_LOAD_NAME
+// MP_BC_LOAD_GLOBAL
+// MP_BC_LOAD_ATTR
+// MP_BC_STORE_ATTR
+#define OC4(a, b, c, d) (a | (b << 2) | (c << 4) | (d << 6))
+#define U (0) // undefined opcode
+#define B (MP_OPCODE_BYTE) // single byte
+#define Q (MP_OPCODE_QSTR) // single byte plus 2-byte qstr
+#define V (MP_OPCODE_VAR_UINT) // single byte plus variable encoded unsigned int
+#define O (MP_OPCODE_OFFSET) // single byte plus 2-byte bytecode offset
+STATIC const byte opcode_format_table[64] = {
+ OC4(U, U, U, U), // 0x00-0x03
+ OC4(U, U, U, U), // 0x04-0x07
+ OC4(U, U, U, U), // 0x08-0x0b
+ OC4(U, U, U, U), // 0x0c-0x0f
+ OC4(B, B, B, U), // 0x10-0x13
+ OC4(V, U, Q, V), // 0x14-0x17
+ OC4(B, U, V, V), // 0x18-0x1b
+ OC4(Q, Q, Q, Q), // 0x1c-0x1f
+ OC4(B, B, V, V), // 0x20-0x23
+ OC4(Q, Q, Q, B), // 0x24-0x27
+ OC4(V, V, Q, Q), // 0x28-0x2b
+ OC4(U, U, U, U), // 0x2c-0x2f
+ OC4(B, B, B, B), // 0x30-0x33
+ OC4(B, O, O, O), // 0x34-0x37
+ OC4(O, O, U, U), // 0x38-0x3b
+ OC4(U, O, B, O), // 0x3c-0x3f
+ OC4(O, B, B, O), // 0x40-0x43
+ OC4(B, B, O, U), // 0x44-0x47
+ OC4(U, U, U, U), // 0x48-0x4b
+ OC4(U, U, U, U), // 0x4c-0x4f
+ OC4(V, V, V, V), // 0x50-0x53
+ OC4(B, V, V, V), // 0x54-0x57
+ OC4(V, V, V, B), // 0x58-0x5b
+ OC4(B, B, B, U), // 0x5c-0x5f
+ OC4(V, V, V, V), // 0x60-0x63
+ OC4(V, V, V, V), // 0x64-0x67
+ OC4(Q, Q, B, U), // 0x68-0x6b
+ OC4(U, U, U, U), // 0x6c-0x6f
+
+ OC4(B, B, B, B), // 0x70-0x73
+ OC4(B, B, B, B), // 0x74-0x77
+ OC4(B, B, B, B), // 0x78-0x7b
+ OC4(B, B, B, B), // 0x7c-0x7f
+ OC4(B, B, B, B), // 0x80-0x83
+ OC4(B, B, B, B), // 0x84-0x87
+ OC4(B, B, B, B), // 0x88-0x8b
+ OC4(B, B, B, B), // 0x8c-0x8f
+ OC4(B, B, B, B), // 0x90-0x93
+ OC4(B, B, B, B), // 0x94-0x97
+ OC4(B, B, B, B), // 0x98-0x9b
+ OC4(B, B, B, B), // 0x9c-0x9f
+ OC4(B, B, B, B), // 0xa0-0xa3
+ OC4(B, B, B, B), // 0xa4-0xa7
+ OC4(B, B, B, B), // 0xa8-0xab
+ OC4(B, B, B, B), // 0xac-0xaf
+
+ OC4(B, B, B, B), // 0xb0-0xb3
+ OC4(B, B, B, B), // 0xb4-0xb7
+ OC4(B, B, B, B), // 0xb8-0xbb
+ OC4(B, B, B, B), // 0xbc-0xbf
+
+ OC4(B, B, B, B), // 0xc0-0xc3
+ OC4(B, B, B, B), // 0xc4-0xc7
+ OC4(B, B, B, B), // 0xc8-0xcb
+ OC4(B, B, B, B), // 0xcc-0xcf
+
+ OC4(B, B, B, B), // 0xd0-0xd3
+ OC4(B, B, B, B), // 0xd4-0xd7
+ OC4(B, B, B, B), // 0xd8-0xdb
+ OC4(B, B, B, B), // 0xdc-0xdf
+
+ OC4(B, B, B, B), // 0xe0-0xe3
+ OC4(B, B, B, B), // 0xe4-0xe7
+ OC4(B, B, B, B), // 0xe8-0xeb
+ OC4(B, B, B, B), // 0xec-0xef
+
+ OC4(B, B, B, B), // 0xf0-0xf3
+ OC4(B, B, B, B), // 0xf4-0xf7
+ OC4(B, B, B, U), // 0xf8-0xfb
+ OC4(U, U, U, U), // 0xfc-0xff
+};
+#undef OC4
+#undef U
+#undef B
+#undef Q
+#undef V
+#undef O
+
+uint mp_opcode_format(const byte *ip, size_t *opcode_size) {
+ uint f = (opcode_format_table[*ip >> 2] >> (2 * (*ip & 3))) & 3;
+ const byte *ip_start = ip;
+ if (f == MP_OPCODE_QSTR) {
+ ip += 3;
+ } else {
+ int extra_byte = (
+ *ip == MP_BC_RAISE_VARARGS
+ || *ip == MP_BC_MAKE_CLOSURE
+ || *ip == MP_BC_MAKE_CLOSURE_DEFARGS
+ #if MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+ || *ip == MP_BC_LOAD_NAME
+ || *ip == MP_BC_LOAD_GLOBAL
+ || *ip == MP_BC_LOAD_ATTR
+ || *ip == MP_BC_STORE_ATTR
+ #endif
+ );
+ ip += 1;
+ if (f == MP_OPCODE_VAR_UINT) {
+ while ((*ip++ & 0x80) != 0) {
+ }
+ } else if (f == MP_OPCODE_OFFSET) {
+ ip += 2;
+ }
+ ip += extra_byte;
+ }
+ *opcode_size = ip - ip_start;
+ return f;
+}
+
+#endif // MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/bc.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,118 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_BC_H__
+#define __MICROPY_INCLUDED_PY_BC_H__
+
+#include "py/runtime.h"
+#include "py/obj.h"
+
+// bytecode layout:
+//
+// n_state : var uint
+// n_exc_stack : var uint
+// scope_flags : byte
+// n_pos_args : byte number of arguments this function takes
+// n_kwonly_args : byte number of keyword-only arguments this function takes
+// n_def_pos_args : byte number of default positional arguments
+//
+// code_info_size : var uint | code_info_size counts bytes in this chunk
+// simple_name : var qstr |
+// source_file : var qstr |
+// <line number info> |
+// <word alignment padding> | only needed if bytecode contains pointers
+//
+// local_num0 : byte |
+// ... : byte |
+// local_numN : byte | N = num_cells
+// 255 : byte | end of list sentinel
+// <bytecode> |
+//
+//
+// constant table layout:
+//
+// argname0 : obj (qstr)
+// ... : obj (qstr)
+// argnameN : obj (qstr) N = num_pos_args + num_kwonly_args
+// const0 : obj
+// constN : obj
+
+// Exception stack entry
+typedef struct _mp_exc_stack {
+ const byte *handler;
+ // bit 0 is saved currently_in_except_block value
+ // bit 1 is whether the opcode was SETUP_WITH or SETUP_FINALLY
+ mp_obj_t *val_sp;
+ // Saved exception, valid if currently_in_except_block bit is 1
+ mp_obj_base_t *prev_exc;
+} mp_exc_stack_t;
+
+typedef struct _mp_code_state {
+ const byte *code_info;
+ const byte *ip;
+ const mp_uint_t *const_table;
+ mp_obj_t *sp;
+ // bit 0 is saved currently_in_except_block value
+ mp_exc_stack_t *exc_sp;
+ mp_obj_dict_t *old_globals;
+ #if MICROPY_STACKLESS
+ struct _mp_code_state *prev;
+ #endif
+ size_t n_state;
+ // Variable-length
+ mp_obj_t state[0];
+ // Variable-length, never accessed by name, only as (void*)(state + n_state)
+ //mp_exc_stack_t exc_state[0];
+} mp_code_state;
+
+mp_uint_t mp_decode_uint(const byte **ptr);
+
+mp_vm_return_kind_t mp_execute_bytecode(mp_code_state *code_state, volatile mp_obj_t inject_exc);
+mp_code_state *mp_obj_fun_bc_prepare_codestate(mp_obj_t func, size_t n_args, size_t n_kw, const mp_obj_t *args);
+struct _mp_obj_fun_bc_t;
+void mp_setup_code_state(mp_code_state *code_state, struct _mp_obj_fun_bc_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_bytecode_print(const void *descr, const byte *code, mp_uint_t len, const mp_uint_t *const_table);
+void mp_bytecode_print2(const byte *code, mp_uint_t len);
+const byte *mp_bytecode_print_str(const byte *ip);
+#define mp_bytecode_print_inst(code) mp_bytecode_print2(code, 1)
+
+// Helper macros to access pointer with least significant bits holding flags
+#define MP_TAGPTR_PTR(x) ((void*)((uintptr_t)(x) & ~((uintptr_t)3)))
+#define MP_TAGPTR_TAG0(x) ((uintptr_t)(x) & 1)
+#define MP_TAGPTR_TAG1(x) ((uintptr_t)(x) & 2)
+#define MP_TAGPTR_MAKE(ptr, tag) ((void*)((uintptr_t)(ptr) | (tag)))
+
+#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
+
+#define MP_OPCODE_BYTE (0)
+#define MP_OPCODE_QSTR (1)
+#define MP_OPCODE_VAR_UINT (2)
+#define MP_OPCODE_OFFSET (3)
+
+uint mp_opcode_format(const byte *ip, size_t *opcode_size);
+
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_BC_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/bc0.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,119 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_BC0_H__ +#define __MICROPY_INCLUDED_PY_BC0_H__ + +// Micro Python byte-codes. +// The comment at the end of the line (if it exists) tells the arguments to the byte-code. + +#define MP_BC_LOAD_CONST_FALSE (0x10) +#define MP_BC_LOAD_CONST_NONE (0x11) +#define MP_BC_LOAD_CONST_TRUE (0x12) +#define MP_BC_LOAD_CONST_SMALL_INT (0x14) // signed var-int +#define MP_BC_LOAD_CONST_STRING (0x16) // qstr +#define MP_BC_LOAD_CONST_OBJ (0x17) // ptr +#define MP_BC_LOAD_NULL (0x18) + +#define MP_BC_LOAD_FAST_N (0x1a) // uint +#define MP_BC_LOAD_DEREF (0x1b) // uint +#define MP_BC_LOAD_NAME (0x1c) // qstr +#define MP_BC_LOAD_GLOBAL (0x1d) // qstr +#define MP_BC_LOAD_ATTR (0x1e) // qstr +#define MP_BC_LOAD_METHOD (0x1f) // qstr +#define MP_BC_LOAD_BUILD_CLASS (0x20) +#define MP_BC_LOAD_SUBSCR (0x21) + +#define MP_BC_STORE_FAST_N (0x22) // uint +#define MP_BC_STORE_DEREF (0x23) // uint +#define MP_BC_STORE_NAME (0x24) // qstr +#define MP_BC_STORE_GLOBAL (0x25) // qstr +#define MP_BC_STORE_ATTR (0x26) // qstr +#define MP_BC_STORE_SUBSCR (0x27) + +#define MP_BC_DELETE_FAST (0x28) // uint +#define MP_BC_DELETE_DEREF (0x29) // uint +#define MP_BC_DELETE_NAME (0x2a) // qstr +#define MP_BC_DELETE_GLOBAL (0x2b) // qstr + +#define MP_BC_DUP_TOP (0x30) +#define MP_BC_DUP_TOP_TWO (0x31) +#define MP_BC_POP_TOP (0x32) +#define MP_BC_ROT_TWO (0x33) +#define MP_BC_ROT_THREE (0x34) + +#define MP_BC_JUMP (0x35) // rel byte code offset, 16-bit signed, in excess +#define MP_BC_POP_JUMP_IF_TRUE (0x36) // rel byte code offset, 16-bit signed, in excess +#define MP_BC_POP_JUMP_IF_FALSE (0x37) // rel byte code offset, 16-bit signed, in excess +#define MP_BC_JUMP_IF_TRUE_OR_POP (0x38) // rel byte code offset, 16-bit signed, in excess +#define MP_BC_JUMP_IF_FALSE_OR_POP (0x39) // rel byte code offset, 16-bit signed, in excess +#define MP_BC_SETUP_WITH (0x3d) // rel byte code offset, 16-bit unsigned +#define MP_BC_WITH_CLEANUP (0x3e) +#define MP_BC_SETUP_EXCEPT (0x3f) // rel byte code offset, 16-bit unsigned +#define MP_BC_SETUP_FINALLY (0x40) // rel byte code offset, 16-bit unsigned +#define MP_BC_END_FINALLY (0x41) +#define MP_BC_GET_ITER (0x42) +#define MP_BC_FOR_ITER (0x43) // rel byte code offset, 16-bit unsigned +#define MP_BC_POP_BLOCK (0x44) +#define MP_BC_POP_EXCEPT (0x45) +#define MP_BC_UNWIND_JUMP (0x46) // rel byte code offset, 16-bit signed, in excess; then a byte + +#define MP_BC_BUILD_TUPLE (0x50) // uint +#define MP_BC_BUILD_LIST (0x51) // uint +#define MP_BC_LIST_APPEND (0x52) // uint +#define MP_BC_BUILD_MAP (0x53) // uint +#define MP_BC_STORE_MAP (0x54) +#define MP_BC_MAP_ADD (0x55) // uint +#define MP_BC_BUILD_SET (0x56) // uint +#define MP_BC_SET_ADD (0x57) // uint +#define MP_BC_BUILD_SLICE (0x58) // uint +#define MP_BC_UNPACK_SEQUENCE (0x59) // uint +#define MP_BC_UNPACK_EX (0x5a) // uint + +#define MP_BC_RETURN_VALUE (0x5b) +#define MP_BC_RAISE_VARARGS (0x5c) // byte +#define MP_BC_YIELD_VALUE (0x5d) +#define MP_BC_YIELD_FROM (0x5e) + +#define MP_BC_MAKE_FUNCTION (0x60) // uint +#define MP_BC_MAKE_FUNCTION_DEFARGS (0x61) // uint +#define MP_BC_MAKE_CLOSURE (0x62) // uint +#define MP_BC_MAKE_CLOSURE_DEFARGS (0x63) // uint +#define MP_BC_CALL_FUNCTION (0x64) // uint +#define MP_BC_CALL_FUNCTION_VAR_KW (0x65) // uint +#define MP_BC_CALL_METHOD (0x66) // uint +#define MP_BC_CALL_METHOD_VAR_KW (0x67) // uint + +#define MP_BC_IMPORT_NAME (0x68) // qstr +#define MP_BC_IMPORT_FROM (0x69) // qstr +#define MP_BC_IMPORT_STAR (0x6a) + +#define MP_BC_LOAD_CONST_SMALL_INT_MULTI (0x70) // + N(64) +#define MP_BC_LOAD_FAST_MULTI (0xb0) // + N(16) +#define MP_BC_STORE_FAST_MULTI (0xc0) // + N(16) +#define MP_BC_UNARY_OP_MULTI (0xd0) // + op(7) +#define MP_BC_BINARY_OP_MULTI (0xd7) // + op(36) + +#endif // __MICROPY_INCLUDED_PY_BC0_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/binary.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,379 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/binary.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+
+// Helpers to work with binary-encoded data
+
+#ifndef alignof
+#define alignof(type) offsetof(struct { char c; type t; }, t)
+#endif
+
+size_t mp_binary_get_size(char struct_type, char val_type, mp_uint_t *palign) {
+ size_t size = 0;
+ int align = 1;
+ switch (struct_type) {
+ case '<': case '>':
+ switch (val_type) {
+ case 'b': case 'B':
+ size = 1; break;
+ case 'h': case 'H':
+ size = 2; break;
+ case 'i': case 'I':
+ size = 4; break;
+ case 'l': case 'L':
+ size = 4; break;
+ case 'q': case 'Q':
+ size = 8; break;
+ case 'P': case 'O': case 'S':
+ size = sizeof(void*); break;
+ case 'f':
+ size = sizeof(float); break;
+ case 'd':
+ size = sizeof(double); break;
+ }
+ break;
+ case '@': {
+ // TODO:
+ // The simplest heuristic for alignment is to align by value
+ // size, but that doesn't work for "bigger than int" types,
+ // for example, long long may very well have long alignment
+ // So, we introduce separate alignment handling, but having
+ // formal support for that is different from actually supporting
+ // particular (or any) ABI.
+ switch (val_type) {
+ case BYTEARRAY_TYPECODE:
+ case 'b': case 'B':
+ align = size = 1; break;
+ case 'h': case 'H':
+ align = alignof(short);
+ size = sizeof(short); break;
+ case 'i': case 'I':
+ align = alignof(int);
+ size = sizeof(int); break;
+ case 'l': case 'L':
+ align = alignof(long);
+ size = sizeof(long); break;
+ case 'q': case 'Q':
+ align = alignof(long long);
+ size = sizeof(long long); break;
+ case 'P': case 'O': case 'S':
+ align = alignof(void*);
+ size = sizeof(void*); break;
+ case 'f':
+ align = alignof(float);
+ size = sizeof(float); break;
+ case 'd':
+ align = alignof(double);
+ size = sizeof(double); break;
+ }
+ }
+ }
+ if (palign != NULL) {
+ *palign = align;
+ }
+ return size;
+}
+
+mp_obj_t mp_binary_get_val_array(char typecode, void *p, mp_uint_t index) {
+ mp_int_t val = 0;
+ switch (typecode) {
+ case 'b':
+ val = ((signed char*)p)[index];
+ break;
+ case BYTEARRAY_TYPECODE:
+ case 'B':
+ val = ((unsigned char*)p)[index];
+ break;
+ case 'h':
+ val = ((short*)p)[index];
+ break;
+ case 'H':
+ val = ((unsigned short*)p)[index];
+ break;
+ case 'i':
+ return mp_obj_new_int(((int*)p)[index]);
+ case 'I':
+ return mp_obj_new_int_from_uint(((unsigned int*)p)[index]);
+ case 'l':
+ return mp_obj_new_int(((long*)p)[index]);
+ case 'L':
+ return mp_obj_new_int_from_uint(((unsigned long*)p)[index]);
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ case 'q':
+ return mp_obj_new_int_from_ll(((long long*)p)[index]);
+ case 'Q':
+ return mp_obj_new_int_from_ull(((unsigned long long*)p)[index]);
+ #endif
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'f':
+ return mp_obj_new_float(((float*)p)[index]);
+ case 'd':
+ return mp_obj_new_float(((double*)p)[index]);
+#endif
+ // Extension to CPython: array of objects
+ case 'O':
+ return ((mp_obj_t*)p)[index];
+ // Extension to CPython: array of pointers
+ case 'P':
+ return mp_obj_new_int((mp_int_t)(uintptr_t)((void**)p)[index]);
+ }
+ return MP_OBJ_NEW_SMALL_INT(val);
+}
+
+// The long long type is guaranteed to hold at least 64 bits, and size is at
+// most 8 (for q and Q), so we will always be able to parse the given data
+// and fit it into a long long.
+long long mp_binary_get_int(mp_uint_t size, bool is_signed, bool big_endian, const byte *src) {
+ int delta;
+ if (!big_endian) {
+ delta = -1;
+ src += size - 1;
+ } else {
+ delta = 1;
+ }
+
+ long long val = 0;
+ if (is_signed && *src & 0x80) {
+ val = -1;
+ }
+ for (uint i = 0; i < size; i++) {
+ val <<= 8;
+ val |= *src;
+ src += delta;
+ }
+
+ return val;
+}
+
+#define is_signed(typecode) (typecode > 'Z')
+mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte **ptr) {
+ byte *p = *ptr;
+ mp_uint_t align;
+
+ size_t size = mp_binary_get_size(struct_type, val_type, &align);
+ if (struct_type == '@') {
+ // Make pointer aligned
+ p = (byte*)MP_ALIGN(p, (size_t)align);
+ #if MP_ENDIANNESS_LITTLE
+ struct_type = '<';
+ #else
+ struct_type = '>';
+ #endif
+ }
+ *ptr = p + size;
+
+ long long val = mp_binary_get_int(size, is_signed(val_type), (struct_type == '>'), p);
+
+ if (val_type == 'O') {
+ return (mp_obj_t)(mp_uint_t)val;
+ } else if (val_type == 'S') {
+ const char *s_val = (const char*)(uintptr_t)(mp_uint_t)val;
+ return mp_obj_new_str(s_val, strlen(s_val), false);
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (val_type == 'f') {
+ union { uint32_t i; float f; } fpu = {val};
+ return mp_obj_new_float(fpu.f);
+ } else if (val_type == 'd') {
+ union { uint64_t i; double f; } fpu = {val};
+ return mp_obj_new_float(fpu.f);
+#endif
+ } else if (is_signed(val_type)) {
+ if ((long long)MP_SMALL_INT_MIN <= val && val <= (long long)MP_SMALL_INT_MAX) {
+ return mp_obj_new_int((mp_int_t)val);
+ } else {
+ return mp_obj_new_int_from_ll(val);
+ }
+ } else {
+ if ((unsigned long long)val <= (unsigned long long)MP_SMALL_INT_MAX) {
+ return mp_obj_new_int_from_uint((mp_uint_t)val);
+ } else {
+ return mp_obj_new_int_from_ull(val);
+ }
+ }
+}
+
+void mp_binary_set_int(mp_uint_t val_sz, bool big_endian, byte *dest, mp_uint_t val) {
+ if (MP_ENDIANNESS_LITTLE && !big_endian) {
+ memcpy(dest, &val, val_sz);
+ } else if (MP_ENDIANNESS_BIG && big_endian) {
+ // only copy the least-significant val_sz bytes
+ memcpy(dest, (byte*)&val + sizeof(mp_uint_t) - val_sz, val_sz);
+ } else {
+ const byte *src;
+ if (MP_ENDIANNESS_LITTLE) {
+ src = (const byte*)&val + val_sz;
+ } else {
+ src = (const byte*)&val + sizeof(mp_uint_t);
+ }
+ while (val_sz--) {
+ *dest++ = *--src;
+ }
+ }
+}
+
+void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte **ptr) {
+ byte *p = *ptr;
+ mp_uint_t align;
+
+ size_t size = mp_binary_get_size(struct_type, val_type, &align);
+ if (struct_type == '@') {
+ // Make pointer aligned
+ p = (byte*)MP_ALIGN(p, (size_t)align);
+ if (MP_ENDIANNESS_LITTLE) {
+ struct_type = '<';
+ } else {
+ struct_type = '>';
+ }
+ }
+ *ptr = p + size;
+
+ mp_uint_t val;
+ switch (val_type) {
+ case 'O':
+ val = (mp_uint_t)val_in;
+ break;
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'f': {
+ union { uint32_t i; float f; } fp_sp;
+ fp_sp.f = mp_obj_get_float(val_in);
+ val = fp_sp.i;
+ break;
+ }
+ case 'd': {
+ union { uint64_t i64; uint32_t i32[2]; double f; } fp_dp;
+ fp_dp.f = mp_obj_get_float(val_in);
+ if (BYTES_PER_WORD == 8) {
+ val = fp_dp.i64;
+ } else {
+ int be = struct_type == '>';
+ mp_binary_set_int(sizeof(uint32_t), be, p, fp_dp.i32[MP_ENDIANNESS_BIG ^ be]);
+ p += sizeof(uint32_t);
+ val = fp_dp.i32[MP_ENDIANNESS_LITTLE ^ be];
+ }
+ break;
+ }
+#endif
+ default:
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (MP_OBJ_IS_TYPE(val_in, &mp_type_int)) {
+ mp_obj_int_to_bytes_impl(val_in, struct_type == '>', size, p);
+ return;
+ } else
+ #endif
+ {
+ val = mp_obj_get_int(val_in);
+ // sign extend if needed
+ if (BYTES_PER_WORD < 8 && size > sizeof(val) && is_signed(val_type) && (mp_int_t)val < 0) {
+ memset(p + sizeof(val), 0xff, size - sizeof(val));
+ }
+ }
+ }
+
+ mp_binary_set_int(MIN((size_t)size, sizeof(val)), struct_type == '>', p, val);
+}
+
+void mp_binary_set_val_array(char typecode, void *p, mp_uint_t index, mp_obj_t val_in) {
+ switch (typecode) {
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'f':
+ ((float*)p)[index] = mp_obj_get_float(val_in);
+ break;
+ case 'd':
+ ((double*)p)[index] = mp_obj_get_float(val_in);
+ break;
+#endif
+ // Extension to CPython: array of objects
+ case 'O':
+ ((mp_obj_t*)p)[index] = val_in;
+ break;
+ default:
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if ((typecode | 0x20) == 'q' && MP_OBJ_IS_TYPE(val_in, &mp_type_int)) {
+ mp_obj_int_to_bytes_impl(val_in, MP_ENDIANNESS_BIG,
+ sizeof(long long), (byte*)&((long long*)p)[index]);
+ return;
+ }
+ #endif
+ mp_binary_set_val_array_from_int(typecode, p, index, mp_obj_get_int(val_in));
+ }
+}
+
+void mp_binary_set_val_array_from_int(char typecode, void *p, mp_uint_t index, mp_int_t val) {
+ switch (typecode) {
+ case 'b':
+ ((signed char*)p)[index] = val;
+ break;
+ case BYTEARRAY_TYPECODE:
+ case 'B':
+ ((unsigned char*)p)[index] = val;
+ break;
+ case 'h':
+ ((short*)p)[index] = val;
+ break;
+ case 'H':
+ ((unsigned short*)p)[index] = val;
+ break;
+ case 'i':
+ ((int*)p)[index] = val;
+ break;
+ case 'I':
+ ((unsigned int*)p)[index] = val;
+ break;
+ case 'l':
+ ((long*)p)[index] = val;
+ break;
+ case 'L':
+ ((unsigned long*)p)[index] = val;
+ break;
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ case 'q':
+ ((long long*)p)[index] = val;
+ case 'Q':
+ ((unsigned long long*)p)[index] = val;
+ break;
+ #endif
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'f':
+ ((float*)p)[index] = val;
+ break;
+ case 'd':
+ ((double*)p)[index] = val;
+ break;
+#endif
+ // Extension to CPython: array of pointers
+ case 'P':
+ ((void**)p)[index] = (void*)(uintptr_t)val;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/binary.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,44 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_BINARY_H__
+#define __MICROPY_INCLUDED_PY_BINARY_H__
+
+#include "py/obj.h"
+
+// Use special typecode to differentiate repr() of bytearray vs array.array('B')
+// (underlyingly they're same).
+#define BYTEARRAY_TYPECODE 0
+
+size_t mp_binary_get_size(char struct_type, char val_type, mp_uint_t *palign);
+mp_obj_t mp_binary_get_val_array(char typecode, void *p, mp_uint_t index);
+void mp_binary_set_val_array(char typecode, void *p, mp_uint_t index, mp_obj_t val_in);
+void mp_binary_set_val_array_from_int(char typecode, void *p, mp_uint_t index, mp_int_t val);
+mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte **ptr);
+void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte **ptr);
+long long mp_binary_get_int(mp_uint_t size, bool is_signed, bool big_endian, const byte *src);
+void mp_binary_set_int(mp_uint_t val_sz, bool big_endian, byte *dest, mp_uint_t val);
+
+#endif // __MICROPY_INCLUDED_PY_BINARY_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/builtin.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,113 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_BUILTIN_H__ +#define __MICROPY_INCLUDED_PY_BUILTIN_H__ + +#include "py/obj.h" + +mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args); +mp_obj_t mp_builtin_open(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs); +mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args); + +MP_DECLARE_CONST_FUN_OBJ(mp_builtin___build_class___obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin___import___obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin___repl_print___obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_abs_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_all_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_any_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_bin_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_callable_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_compile_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_chr_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_dir_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_divmod_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_eval_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_exec_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_execfile_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_getattr_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_setattr_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_globals_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_hasattr_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_hash_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_hex_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_id_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_isinstance_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_issubclass_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_iter_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_len_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_list_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_locals_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_max_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_min_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_next_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_oct_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_ord_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_pow_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_print_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_repr_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_round_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_sorted_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_builtin_sum_obj); + +MP_DECLARE_CONST_FUN_OBJ(mp_namedtuple_obj); + +MP_DECLARE_CONST_FUN_OBJ(mp_op_contains_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_op_getitem_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_op_setitem_obj); +MP_DECLARE_CONST_FUN_OBJ(mp_op_delitem_obj); + +extern const mp_obj_module_t mp_module___main__; +extern const mp_obj_module_t mp_module_builtins; +extern const mp_obj_module_t mp_module_array; +extern const mp_obj_module_t mp_module_collections; +extern const mp_obj_module_t mp_module_io; +extern const mp_obj_module_t mp_module_math; +extern const mp_obj_module_t mp_module_cmath; +extern const mp_obj_module_t mp_module_micropython; +extern const mp_obj_module_t mp_module_ustruct; +extern const mp_obj_module_t mp_module_sys; +extern const mp_obj_module_t mp_module_gc; + +extern const mp_obj_dict_t mp_module_builtins_globals; + +// extmod modules +extern const mp_obj_module_t mp_module_uctypes; +extern const mp_obj_module_t mp_module_uzlib; +extern const mp_obj_module_t mp_module_ujson; +extern const mp_obj_module_t mp_module_ure; +extern const mp_obj_module_t mp_module_uheapq; +extern const mp_obj_module_t mp_module_uhashlib; +extern const mp_obj_module_t mp_module_ubinascii; +extern const mp_obj_module_t mp_module_urandom; +extern const mp_obj_module_t mp_module_ussl; +extern const mp_obj_module_t mp_module_machine; +extern const mp_obj_module_t mp_module_lwip; +extern const mp_obj_module_t mp_module_websocket; + +// extmod functions +MP_DECLARE_CONST_FUN_OBJ(pyb_mount_obj); + +#endif // __MICROPY_INCLUDED_PY_BUILTIN_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/builtinevex.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,168 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include "py/nlr.h"
+#include "py/objfun.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+
+#if MICROPY_PY_BUILTINS_COMPILE
+
+typedef struct _mp_obj_code_t {
+ mp_obj_base_t base;
+ mp_obj_t module_fun;
+} mp_obj_code_t;
+
+STATIC const mp_obj_type_t mp_type_code = {
+ { &mp_type_type },
+ .name = MP_QSTR_code,
+};
+
+STATIC mp_obj_t code_execute(mp_obj_code_t *self, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
+ // save context and set new context
+ mp_obj_dict_t *old_globals = mp_globals_get();
+ mp_obj_dict_t *old_locals = mp_locals_get();
+ mp_globals_set(globals);
+ mp_locals_set(locals);
+
+ // a bit of a hack: fun_bc will re-set globals, so need to make sure it's
+ // the correct one
+ if (MP_OBJ_IS_TYPE(self->module_fun, &mp_type_fun_bc)) {
+ mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(self->module_fun);
+ fun_bc->globals = globals;
+ }
+
+ // execute code
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t ret = mp_call_function_0(self->module_fun);
+ nlr_pop();
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ return ret;
+ } else {
+ // exception; restore context and re-raise same exception
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ nlr_jump(nlr.ret_val);
+ }
+}
+
+STATIC mp_obj_t mp_builtin_compile(size_t n_args, const mp_obj_t *args) {
+ (void)n_args;
+
+ // get the source
+ mp_uint_t str_len;
+ const char *str = mp_obj_str_get_data(args[0], &str_len);
+
+ // get the filename
+ qstr filename = mp_obj_str_get_qstr(args[1]);
+
+ // create the lexer
+ mp_lexer_t *lex = mp_lexer_new_from_str_len(filename, str, str_len, 0);
+
+ // get the compile mode
+ qstr mode = mp_obj_str_get_qstr(args[2]);
+ mp_parse_input_kind_t parse_input_kind;
+ switch (mode) {
+ case MP_QSTR_single: parse_input_kind = MP_PARSE_SINGLE_INPUT; break;
+ case MP_QSTR_exec: parse_input_kind = MP_PARSE_FILE_INPUT; break;
+ case MP_QSTR_eval: parse_input_kind = MP_PARSE_EVAL_INPUT; break;
+ default:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "bad compile mode"));
+ }
+
+ mp_obj_code_t *code = m_new_obj(mp_obj_code_t);
+ code->base.type = &mp_type_code;
+ code->module_fun = mp_parse_compile_execute(lex, parse_input_kind, NULL, NULL);
+ return MP_OBJ_FROM_PTR(code);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_compile_obj, 3, 6, mp_builtin_compile);
+
+#endif // MICROPY_PY_BUILTINS_COMPILE
+
+#if MICROPY_PY_BUILTINS_EVAL_EXEC
+
+STATIC mp_obj_t eval_exec_helper(size_t n_args, const mp_obj_t *args, mp_parse_input_kind_t parse_input_kind) {
+ // work out the context
+ mp_obj_dict_t *globals = mp_globals_get();
+ mp_obj_dict_t *locals = mp_locals_get();
+ if (n_args > 1) {
+ globals = MP_OBJ_TO_PTR(args[1]);
+ if (n_args > 2) {
+ locals = MP_OBJ_TO_PTR(args[2]);
+ } else {
+ locals = globals;
+ }
+ }
+
+ #if MICROPY_PY_BUILTINS_COMPILE
+ if (MP_OBJ_IS_TYPE(args[0], &mp_type_code)) {
+ return code_execute(MP_OBJ_TO_PTR(args[0]), globals, locals);
+ }
+ #endif
+
+ mp_uint_t str_len;
+ const char *str = mp_obj_str_get_data(args[0], &str_len);
+
+ // create the lexer
+ // MP_PARSE_SINGLE_INPUT is used to indicate a file input
+ mp_lexer_t *lex;
+ if (MICROPY_PY_BUILTINS_EXECFILE && parse_input_kind == MP_PARSE_SINGLE_INPUT) {
+ lex = mp_lexer_new_from_file(str);
+ if (lex == NULL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_OSError, "could not open file '%s'", str));
+ }
+ parse_input_kind = MP_PARSE_FILE_INPUT;
+ } else {
+ lex = mp_lexer_new_from_str_len(MP_QSTR__lt_string_gt_, str, str_len, 0);
+ }
+
+ return mp_parse_compile_execute(lex, parse_input_kind, globals, locals);
+}
+
+STATIC mp_obj_t mp_builtin_eval(size_t n_args, const mp_obj_t *args) {
+ return eval_exec_helper(n_args, args, MP_PARSE_EVAL_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_eval_obj, 1, 3, mp_builtin_eval);
+
+STATIC mp_obj_t mp_builtin_exec(size_t n_args, const mp_obj_t *args) {
+ return eval_exec_helper(n_args, args, MP_PARSE_FILE_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_exec_obj, 1, 3, mp_builtin_exec);
+
+#endif // MICROPY_PY_BUILTINS_EVAL_EXEC
+
+#if MICROPY_PY_BUILTINS_EXECFILE
+STATIC mp_obj_t mp_builtin_execfile(size_t n_args, const mp_obj_t *args) {
+ // MP_PARSE_SINGLE_INPUT is used to indicate a file input
+ return eval_exec_helper(n_args, args, MP_PARSE_SINGLE_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_execfile_obj, 1, 3, mp_builtin_execfile);
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/builtinimport.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,477 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/compile.h"
+#include "py/objmodule.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/frozenmod.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#define PATH_SEP_CHAR '/'
+
+#if MICROPY_MODULE_WEAK_LINKS
+STATIC const mp_rom_map_elem_t mp_builtin_module_weak_links_table[] = {
+ MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS
+};
+
+STATIC MP_DEFINE_CONST_MAP(mp_builtin_module_weak_links_map, mp_builtin_module_weak_links_table);
+#endif
+
+bool mp_obj_is_package(mp_obj_t module) {
+ mp_obj_t dest[2];
+ mp_load_method_maybe(module, MP_QSTR___path__, dest);
+ return dest[0] != MP_OBJ_NULL;
+}
+
+STATIC mp_import_stat_t stat_dir_or_file(vstr_t *path) {
+ mp_import_stat_t stat = mp_import_stat(vstr_null_terminated_str(path));
+ DEBUG_printf("stat %s: %d\n", vstr_str(path), stat);
+ if (stat == MP_IMPORT_STAT_DIR) {
+ return stat;
+ }
+
+ vstr_add_str(path, ".py");
+ stat = mp_import_stat(vstr_null_terminated_str(path));
+ if (stat == MP_IMPORT_STAT_FILE) {
+ return stat;
+ }
+
+ #if MICROPY_PERSISTENT_CODE_LOAD
+ vstr_ins_byte(path, path->len - 2, 'm');
+ stat = mp_import_stat(vstr_null_terminated_str(path));
+ if (stat == MP_IMPORT_STAT_FILE) {
+ return stat;
+ }
+ #endif
+
+ return MP_IMPORT_STAT_NO_EXIST;
+}
+
+STATIC mp_import_stat_t find_file(const char *file_str, uint file_len, vstr_t *dest) {
+#if MICROPY_PY_SYS
+ // extract the list of paths
+ mp_uint_t path_num;
+ mp_obj_t *path_items;
+ mp_obj_list_get(mp_sys_path, &path_num, &path_items);
+
+ if (path_num == 0) {
+#endif
+ // mp_sys_path is empty, so just use the given file name
+ vstr_add_strn(dest, file_str, file_len);
+ return stat_dir_or_file(dest);
+#if MICROPY_PY_SYS
+ } else {
+ // go through each path looking for a directory or file
+ for (mp_uint_t i = 0; i < path_num; i++) {
+ vstr_reset(dest);
+ mp_uint_t p_len;
+ const char *p = mp_obj_str_get_data(path_items[i], &p_len);
+ if (p_len > 0) {
+ vstr_add_strn(dest, p, p_len);
+ vstr_add_char(dest, PATH_SEP_CHAR);
+ }
+ vstr_add_strn(dest, file_str, file_len);
+ mp_import_stat_t stat = stat_dir_or_file(dest);
+ if (stat != MP_IMPORT_STAT_NO_EXIST) {
+ return stat;
+ }
+ }
+
+ // could not find a directory or file
+ return MP_IMPORT_STAT_NO_EXIST;
+ }
+#endif
+}
+
+#if MICROPY_ENABLE_COMPILER
+STATIC void do_load_from_lexer(mp_obj_t module_obj, mp_lexer_t *lex, const char *fname) {
+
+ if (lex == NULL) {
+ // we verified the file exists using stat, but lexer could still fail
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ImportError, "module not found"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ImportError,
+ "no module named '%s'", fname));
+ }
+ }
+
+ #if MICROPY_PY___FILE__
+ qstr source_name = lex->source_name;
+ mp_store_attr(module_obj, MP_QSTR___file__, MP_OBJ_NEW_QSTR(source_name));
+ #endif
+
+ // parse, compile and execute the module in its context
+ mp_obj_dict_t *mod_globals = mp_obj_module_get_globals(module_obj);
+ mp_parse_compile_execute(lex, MP_PARSE_FILE_INPUT, mod_globals, mod_globals);
+}
+#endif
+
+#if MICROPY_PERSISTENT_CODE_LOAD
+STATIC void do_execute_raw_code(mp_obj_t module_obj, mp_raw_code_t *raw_code) {
+ #if MICROPY_PY___FILE__
+ // TODO
+ //qstr source_name = lex->source_name;
+ //mp_store_attr(module_obj, MP_QSTR___file__, MP_OBJ_NEW_QSTR(source_name));
+ #endif
+
+ // execute the module in its context
+ mp_obj_dict_t *mod_globals = mp_obj_module_get_globals(module_obj);
+
+ // save context
+ mp_obj_dict_t *volatile old_globals = mp_globals_get();
+ mp_obj_dict_t *volatile old_locals = mp_locals_get();
+
+ // set new context
+ mp_globals_set(mod_globals);
+ mp_locals_set(mod_globals);
+
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t module_fun = mp_make_function_from_raw_code(raw_code, MP_OBJ_NULL, MP_OBJ_NULL);
+ mp_call_function_0(module_fun);
+
+ // finish nlr block, restore context
+ nlr_pop();
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ } else {
+ // exception; restore context and re-raise same exception
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ nlr_jump(nlr.ret_val);
+ }
+}
+#endif
+
+STATIC void do_load(mp_obj_t module_obj, vstr_t *file) {
+ // create the lexer
+ char *file_str = vstr_null_terminated_str(file);
+
+ #if MICROPY_PERSISTENT_CODE_LOAD
+ if (file_str[file->len - 3] == 'm') {
+ mp_raw_code_t *raw_code = mp_raw_code_load_file(file_str);
+ do_execute_raw_code(module_obj, raw_code);
+ return;
+ }
+ #endif
+
+ #if MICROPY_ENABLE_COMPILER
+ {
+ mp_lexer_t *lex = mp_lexer_new_from_file(file_str);
+ do_load_from_lexer(module_obj, lex, file_str);
+ }
+ #else
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ImportError,
+ "script compilation not supported"));
+ #endif
+}
+
+STATIC void chop_component(const char *start, const char **end) {
+ const char *p = *end;
+ while (p > start) {
+ if (*--p == '.') {
+ *end = p;
+ return;
+ }
+ }
+ *end = p;
+}
+
+mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
+#if DEBUG_PRINT
+ DEBUG_printf("__import__:\n");
+ for (mp_uint_t i = 0; i < n_args; i++) {
+ DEBUG_printf(" ");
+ mp_obj_print(args[i], PRINT_REPR);
+ DEBUG_printf("\n");
+ }
+#endif
+
+ mp_obj_t module_name = args[0];
+ mp_obj_t fromtuple = mp_const_none;
+ mp_int_t level = 0;
+ if (n_args >= 4) {
+ fromtuple = args[3];
+ if (n_args >= 5) {
+ level = MP_OBJ_SMALL_INT_VALUE(args[4]);
+ }
+ }
+
+ mp_uint_t mod_len;
+ const char *mod_str = mp_obj_str_get_data(module_name, &mod_len);
+
+ if (level != 0) {
+ // What we want to do here is to take name of current module,
+ // chop <level> trailing components, and concatenate with passed-in
+ // module name, thus resolving relative import name into absolue.
+ // This even appears to be correct per
+ // http://legacy.python.org/dev/peps/pep-0328/#relative-imports-and-name
+ // "Relative imports use a module's __name__ attribute to determine that
+ // module's position in the package hierarchy."
+ level--;
+ mp_obj_t this_name_q = mp_obj_dict_get(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(MP_QSTR___name__));
+ assert(this_name_q != MP_OBJ_NULL);
+ #if MICROPY_CPYTHON_COMPAT
+ if (MP_OBJ_QSTR_VALUE(this_name_q) == MP_QSTR___main__) {
+ // This is a module run by -m command-line switch, get its real name from backup attribute
+ this_name_q = mp_obj_dict_get(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+ }
+ #endif
+ mp_map_t *globals_map = &mp_globals_get()->map;
+ mp_map_elem_t *elem = mp_map_lookup(globals_map, MP_OBJ_NEW_QSTR(MP_QSTR___path__), MP_MAP_LOOKUP);
+ bool is_pkg = (elem != NULL);
+
+#if DEBUG_PRINT
+ DEBUG_printf("Current module/package: ");
+ mp_obj_print(this_name_q, PRINT_REPR);
+ DEBUG_printf(", is_package: %d", is_pkg);
+ DEBUG_printf("\n");
+#endif
+
+ mp_uint_t this_name_l;
+ const char *this_name = mp_obj_str_get_data(this_name_q, &this_name_l);
+
+ const char *p = this_name + this_name_l;
+ if (!is_pkg) {
+ // We have module, but relative imports are anchored at package, so
+ // go there.
+ chop_component(this_name, &p);
+ }
+
+
+ uint dots_seen = 0;
+ while (level--) {
+ chop_component(this_name, &p);
+ dots_seen++;
+ }
+
+ if (dots_seen == 0 && level >= 1) {
+ // http://legacy.python.org/dev/peps/pep-0328/#relative-imports-and-name
+ // "If the module's name does not contain any package information
+ // (e.g. it is set to '__main__') then relative imports are
+ // resolved as if the module were a top level module, regardless
+ // of where the module is actually located on the file system."
+ // Supposedly this if catches this condition and resolve it properly
+ // TODO: But nobody knows for sure. This condition happens when
+ // package's __init__.py does something like "import .submod". So,
+ // maybe we should check for package here? But quote above doesn't
+ // talk about packages, it talks about dot-less module names.
+ DEBUG_printf("Warning: no dots in current module name and level>0\n");
+ p = this_name + this_name_l;
+ } else if (level != -1) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ImportError, "Invalid relative import"));
+ }
+
+ uint new_mod_l = (mod_len == 0 ? (size_t)(p - this_name) : (size_t)(p - this_name) + 1 + mod_len);
+ char *new_mod = alloca(new_mod_l);
+ memcpy(new_mod, this_name, p - this_name);
+ if (mod_len != 0) {
+ new_mod[p - this_name] = '.';
+ memcpy(new_mod + (p - this_name) + 1, mod_str, mod_len);
+ }
+
+ qstr new_mod_q = qstr_from_strn(new_mod, new_mod_l);
+ DEBUG_printf("Resolved base name for relative import: '%s'\n", qstr_str(new_mod_q));
+ if (new_mod_q == MP_QSTR_) {
+ // CPython raises SystemError
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ImportError, "cannot perform relative import"));
+ }
+ module_name = MP_OBJ_NEW_QSTR(new_mod_q);
+ mod_str = new_mod;
+ mod_len = new_mod_l;
+ }
+
+ // check if module already exists
+ qstr module_name_qstr = mp_obj_str_get_qstr(module_name);
+ mp_obj_t module_obj = mp_module_get(module_name_qstr);
+ if (module_obj != MP_OBJ_NULL) {
+ DEBUG_printf("Module already loaded\n");
+ // If it's not a package, return module right away
+ char *p = strchr(mod_str, '.');
+ if (p == NULL) {
+ return module_obj;
+ }
+ // If fromlist is not empty, return leaf module
+ if (fromtuple != mp_const_none) {
+ return module_obj;
+ }
+ // Otherwise, we need to return top-level package
+ qstr pkg_name = qstr_from_strn(mod_str, p - mod_str);
+ return mp_module_get(pkg_name);
+ }
+ DEBUG_printf("Module not yet loaded\n");
+
+ #if MICROPY_MODULE_FROZEN
+ mp_lexer_t *lex = mp_find_frozen_module(mod_str, mod_len);
+ if (lex != NULL) {
+ module_obj = mp_obj_new_module(module_name_qstr);
+ // if args[3] (fromtuple) has magic value False, set up
+ // this module for command-line "-m" option (set module's
+ // name to __main__ instead of real name).
+ // TODO: Duplicated below too.
+ if (fromtuple == mp_const_false) {
+ mp_obj_module_t *o = MP_OBJ_TO_PTR(module_obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+ }
+ do_load_from_lexer(module_obj, lex, mod_str);
+ return module_obj;
+ }
+ #endif
+
+ uint last = 0;
+ VSTR_FIXED(path, MICROPY_ALLOC_PATH_MAX)
+ module_obj = MP_OBJ_NULL;
+ mp_obj_t top_module_obj = MP_OBJ_NULL;
+ mp_obj_t outer_module_obj = MP_OBJ_NULL;
+ uint i;
+ for (i = 1; i <= mod_len; i++) {
+ if (i == mod_len || mod_str[i] == '.') {
+ // create a qstr for the module name up to this depth
+ qstr mod_name = qstr_from_strn(mod_str, i);
+ DEBUG_printf("Processing module: %s\n", qstr_str(mod_name));
+ DEBUG_printf("Previous path: =%.*s=\n", vstr_len(&path), vstr_str(&path));
+
+ // find the file corresponding to the module name
+ mp_import_stat_t stat;
+ if (vstr_len(&path) == 0) {
+ // first module in the dotted-name; search for a directory or file
+ stat = find_file(mod_str, i, &path);
+ } else {
+ // latter module in the dotted-name; append to path
+ vstr_add_char(&path, PATH_SEP_CHAR);
+ vstr_add_strn(&path, mod_str + last, i - last);
+ stat = stat_dir_or_file(&path);
+ }
+ DEBUG_printf("Current path: %.*s\n", vstr_len(&path), vstr_str(&path));
+
+ if (stat == MP_IMPORT_STAT_NO_EXIST) {
+ #if MICROPY_MODULE_WEAK_LINKS
+ // check if there is a weak link to this module
+ if (i == mod_len) {
+ mp_map_elem_t *el = mp_map_lookup((mp_map_t*)&mp_builtin_module_weak_links_map, MP_OBJ_NEW_QSTR(mod_name), MP_MAP_LOOKUP);
+ if (el == NULL) {
+ goto no_exist;
+ }
+ // found weak linked module
+ module_obj = el->value;
+ } else {
+ no_exist:
+ #else
+ {
+ #endif
+ // couldn't find the file, so fail
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ImportError, "module not found"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ImportError,
+ "no module named '%q'", mod_name));
+ }
+ }
+ } else {
+ // found the file, so get the module
+ module_obj = mp_module_get(mod_name);
+ }
+
+ if (module_obj == MP_OBJ_NULL) {
+ // module not already loaded, so load it!
+
+ module_obj = mp_obj_new_module(mod_name);
+
+ // if args[3] (fromtuple) has magic value False, set up
+ // this module for command-line "-m" option (set module's
+ // name to __main__ instead of real name).
+ if (i == mod_len && fromtuple == mp_const_false) {
+ mp_obj_module_t *o = MP_OBJ_TO_PTR(module_obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+ #if MICROPY_CPYTHON_COMPAT
+ // Store real name in "__main__" attribute. Choosen semi-randonly, to reuse existing qstr's.
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___main__), MP_OBJ_NEW_QSTR(mod_name));
+ #endif
+ }
+
+ if (stat == MP_IMPORT_STAT_DIR) {
+ DEBUG_printf("%.*s is dir\n", vstr_len(&path), vstr_str(&path));
+ // https://docs.python.org/3/reference/import.html
+ // "Specifically, any module that contains a __path__ attribute is considered a package."
+ mp_store_attr(module_obj, MP_QSTR___path__, mp_obj_new_str(vstr_str(&path), vstr_len(&path), false));
+ vstr_add_char(&path, PATH_SEP_CHAR);
+ vstr_add_str(&path, "__init__.py");
+ if (mp_import_stat(vstr_null_terminated_str(&path)) != MP_IMPORT_STAT_FILE) {
+ vstr_cut_tail_bytes(&path, sizeof("/__init__.py") - 1); // cut off /__init__.py
+ mp_warning("%s is imported as namespace package", vstr_str(&path));
+ } else {
+ do_load(module_obj, &path);
+ vstr_cut_tail_bytes(&path, sizeof("/__init__.py") - 1); // cut off /__init__.py
+ }
+ } else { // MP_IMPORT_STAT_FILE
+ do_load(module_obj, &path);
+ // TODO: We cannot just break here, at the very least, we must execute
+ // trailer code below. But otherwise if there're remaining components,
+ // that would be (??) object path within module, not modules path within FS.
+ // break;
+ }
+ }
+ if (outer_module_obj != MP_OBJ_NULL) {
+ qstr s = qstr_from_strn(mod_str + last, i - last);
+ mp_store_attr(outer_module_obj, s, module_obj);
+ }
+ outer_module_obj = module_obj;
+ if (top_module_obj == MP_OBJ_NULL) {
+ top_module_obj = module_obj;
+ }
+ last = i + 1;
+ }
+ }
+
+ if (i < mod_len) {
+ // we loaded a package, now need to load objects from within that package
+ // TODO
+ assert(0);
+ }
+
+ // If fromlist is not empty, return leaf module
+ if (fromtuple != mp_const_none) {
+ return module_obj;
+ }
+ // Otherwise, we need to return top-level package
+ return top_module_obj;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin___import___obj, 1, 5, mp_builtin___import__);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/compile.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,3342 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/scope.h"
+#include "py/emit.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+// TODO need to mangle __attr names
+
+typedef enum {
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#include "py/grammar.h"
+#undef DEF_RULE
+ PN_maximum_number_of,
+ PN_string, // special node for non-interned string
+ PN_bytes, // special node for non-interned bytes
+ PN_const_object, // special node for a constant, generic Python object
+} pn_kind_t;
+
+#define NEED_METHOD_TABLE MICROPY_EMIT_NATIVE
+
+#if NEED_METHOD_TABLE
+
+// we need a method table to do the lookup for the emitter functions
+#define EMIT(fun) (comp->emit_method_table->fun(comp->emit))
+#define EMIT_ARG(fun, ...) (comp->emit_method_table->fun(comp->emit, __VA_ARGS__))
+#define EMIT_LOAD_FAST(qst, local_num) (comp->emit_method_table->load_id.fast(comp->emit, qst, local_num))
+#define EMIT_LOAD_GLOBAL(qst) (comp->emit_method_table->load_id.global(comp->emit, qst))
+
+#else
+
+// if we only have the bytecode emitter enabled then we can do a direct call to the functions
+#define EMIT(fun) (mp_emit_bc_##fun(comp->emit))
+#define EMIT_ARG(fun, ...) (mp_emit_bc_##fun(comp->emit, __VA_ARGS__))
+#define EMIT_LOAD_FAST(qst, local_num) (mp_emit_bc_load_fast(comp->emit, qst, local_num))
+#define EMIT_LOAD_GLOBAL(qst) (mp_emit_bc_load_global(comp->emit, qst))
+
+#endif
+
+#define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm))
+#define EMIT_INLINE_ASM_ARG(fun, ...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, __VA_ARGS__))
+
+// elements in this struct are ordered to make it compact
+typedef struct _compiler_t {
+ qstr source_file;
+
+ uint8_t is_repl;
+ uint8_t pass; // holds enum type pass_kind_t
+ uint8_t func_arg_is_super; // used to compile special case of super() function call
+ uint8_t have_star;
+
+ // try to keep compiler clean from nlr
+ mp_obj_t compile_error; // set to an exception object if there's an error
+ size_t compile_error_line; // set to best guess of line of error
+
+ uint next_label;
+
+ uint16_t num_dict_params;
+ uint16_t num_default_params;
+
+ uint16_t break_label; // highest bit set indicates we are breaking out of a for loop
+ uint16_t continue_label;
+ uint16_t cur_except_level; // increased for SETUP_EXCEPT, SETUP_FINALLY; decreased for POP_BLOCK, POP_EXCEPT
+ uint16_t break_continue_except_level;
+
+ scope_t *scope_head;
+ scope_t *scope_cur;
+
+ emit_t *emit; // current emitter
+ #if NEED_METHOD_TABLE
+ const emit_method_table_t *emit_method_table; // current emit method table
+ #endif
+
+ #if MICROPY_EMIT_INLINE_THUMB
+ emit_inline_asm_t *emit_inline_asm; // current emitter for inline asm
+ const emit_inline_asm_method_table_t *emit_inline_asm_method_table; // current emit method table for inline asm
+ #endif
+} compiler_t;
+
+STATIC void compile_error_set_line(compiler_t *comp, mp_parse_node_t pn) {
+ // if the line of the error is unknown then try to update it from the pn
+ if (comp->compile_error_line == 0 && MP_PARSE_NODE_IS_STRUCT(pn)) {
+ comp->compile_error_line = ((mp_parse_node_struct_t*)pn)->source_line;
+ }
+}
+
+STATIC void compile_syntax_error(compiler_t *comp, mp_parse_node_t pn, const char *msg) {
+ // only register the error if there has been no other error
+ if (comp->compile_error == MP_OBJ_NULL) {
+ comp->compile_error = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+ compile_error_set_line(comp, pn);
+ }
+}
+
+STATIC void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra);
+STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind);
+STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn);
+
+STATIC uint comp_next_label(compiler_t *comp) {
+ return comp->next_label++;
+}
+
+STATIC void compile_increase_except_level(compiler_t *comp) {
+ comp->cur_except_level += 1;
+ if (comp->cur_except_level > comp->scope_cur->exc_stack_size) {
+ comp->scope_cur->exc_stack_size = comp->cur_except_level;
+ }
+}
+
+STATIC void compile_decrease_except_level(compiler_t *comp) {
+ assert(comp->cur_except_level > 0);
+ comp->cur_except_level -= 1;
+}
+
+STATIC scope_t *scope_new_and_link(compiler_t *comp, scope_kind_t kind, mp_parse_node_t pn, uint emit_options) {
+ scope_t *scope = scope_new(kind, pn, comp->source_file, emit_options);
+ scope->parent = comp->scope_cur;
+ scope->next = NULL;
+ if (comp->scope_head == NULL) {
+ comp->scope_head = scope;
+ } else {
+ scope_t *s = comp->scope_head;
+ while (s->next != NULL) {
+ s = s->next;
+ }
+ s->next = scope;
+ }
+ return scope;
+}
+
+typedef void (*apply_list_fun_t)(compiler_t *comp, mp_parse_node_t pn);
+
+STATIC void apply_to_single_or_list(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_list_kind, apply_list_fun_t f) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, pn_list_kind)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ f(comp, pns->nodes[i]);
+ }
+ } else if (!MP_PARSE_NODE_IS_NULL(pn)) {
+ f(comp, pn);
+ }
+}
+
+STATIC void compile_generic_all_nodes(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ compile_node(comp, pns->nodes[i]);
+ if (comp->compile_error != MP_OBJ_NULL) {
+ // add line info for the error in case it didn't have a line number
+ compile_error_set_line(comp, pns->nodes[i]);
+ return;
+ }
+ }
+}
+
+STATIC void compile_load_id(compiler_t *comp, qstr qst) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_emit_common_get_id_for_load(comp->scope_cur, qst);
+ } else {
+ #if NEED_METHOD_TABLE
+ mp_emit_common_id_op(comp->emit, &comp->emit_method_table->load_id, comp->scope_cur, qst);
+ #else
+ mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_load_id_ops, comp->scope_cur, qst);
+ #endif
+ }
+}
+
+STATIC void compile_store_id(compiler_t *comp, qstr qst) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_emit_common_get_id_for_modification(comp->scope_cur, qst);
+ } else {
+ #if NEED_METHOD_TABLE
+ mp_emit_common_id_op(comp->emit, &comp->emit_method_table->store_id, comp->scope_cur, qst);
+ #else
+ mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_store_id_ops, comp->scope_cur, qst);
+ #endif
+ }
+}
+
+STATIC void compile_delete_id(compiler_t *comp, qstr qst) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_emit_common_get_id_for_modification(comp->scope_cur, qst);
+ } else {
+ #if NEED_METHOD_TABLE
+ mp_emit_common_id_op(comp->emit, &comp->emit_method_table->delete_id, comp->scope_cur, qst);
+ #else
+ mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_delete_id_ops, comp->scope_cur, qst);
+ #endif
+ }
+}
+
+STATIC void c_tuple(compiler_t *comp, mp_parse_node_t pn, mp_parse_node_struct_t *pns_list) {
+ int total = 0;
+ if (!MP_PARSE_NODE_IS_NULL(pn)) {
+ compile_node(comp, pn);
+ total += 1;
+ }
+ if (pns_list != NULL) {
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns_list);
+ for (int i = 0; i < n; i++) {
+ compile_node(comp, pns_list->nodes[i]);
+ }
+ total += n;
+ }
+ EMIT_ARG(build_tuple, total);
+}
+
+STATIC void compile_generic_tuple(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // a simple tuple expression
+ c_tuple(comp, MP_PARSE_NODE_NULL, pns);
+}
+
+STATIC bool node_is_const_false(mp_parse_node_t pn) {
+ return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_FALSE)
+ || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) == 0);
+}
+
+STATIC bool node_is_const_true(mp_parse_node_t pn) {
+ return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_TRUE)
+ || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) != 0);
+}
+
+STATIC void c_if_cond(compiler_t *comp, mp_parse_node_t pn, bool jump_if, int label) {
+ if (node_is_const_false(pn)) {
+ if (jump_if == false) {
+ EMIT_ARG(jump, label);
+ }
+ return;
+ } else if (node_is_const_true(pn)) {
+ if (jump_if == true) {
+ EMIT_ARG(jump, label);
+ }
+ return;
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_or_test) {
+ if (jump_if == false) {
+ and_or_logic1:;
+ uint label2 = comp_next_label(comp);
+ for (int i = 0; i < n - 1; i++) {
+ c_if_cond(comp, pns->nodes[i], !jump_if, label2);
+ }
+ c_if_cond(comp, pns->nodes[n - 1], jump_if, label);
+ EMIT_ARG(label_assign, label2);
+ } else {
+ and_or_logic2:
+ for (int i = 0; i < n; i++) {
+ c_if_cond(comp, pns->nodes[i], jump_if, label);
+ }
+ }
+ return;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_and_test) {
+ if (jump_if == false) {
+ goto and_or_logic2;
+ } else {
+ goto and_or_logic1;
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_not_test_2) {
+ c_if_cond(comp, pns->nodes[0], !jump_if, label);
+ return;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_atom_paren) {
+ // cond is something in parenthesis
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty tuple, acts as false for the condition
+ if (jump_if == false) {
+ EMIT_ARG(jump, label);
+ }
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+ // non-empty tuple, acts as true for the condition
+ if (jump_if == true) {
+ EMIT_ARG(jump, label);
+ }
+ }
+ return;
+ }
+ }
+
+ // nothing special, fall back to default compiling for node and jump
+ compile_node(comp, pn);
+ EMIT_ARG(pop_jump_if, jump_if, label);
+}
+
+typedef enum { ASSIGN_STORE, ASSIGN_AUG_LOAD, ASSIGN_AUG_STORE } assign_kind_t;
+STATIC void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t kind);
+
+STATIC void c_assign_power(compiler_t *comp, mp_parse_node_struct_t *pns, assign_kind_t assign_kind) {
+ if (assign_kind != ASSIGN_AUG_STORE) {
+ compile_node(comp, pns->nodes[0]);
+ }
+
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_power_trailers) {
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+ if (assign_kind != ASSIGN_AUG_STORE) {
+ for (int i = 0; i < n - 1; i++) {
+ compile_node(comp, pns1->nodes[i]);
+ }
+ }
+ assert(MP_PARSE_NODE_IS_STRUCT(pns1->nodes[n - 1]));
+ pns1 = (mp_parse_node_struct_t*)pns1->nodes[n - 1];
+ }
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) {
+ if (assign_kind == ASSIGN_AUG_STORE) {
+ EMIT(rot_three);
+ EMIT(store_subscr);
+ } else {
+ compile_node(comp, pns1->nodes[0]);
+ if (assign_kind == ASSIGN_AUG_LOAD) {
+ EMIT(dup_top_two);
+ EMIT(load_subscr);
+ } else {
+ EMIT(store_subscr);
+ }
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) {
+ assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+ if (assign_kind == ASSIGN_AUG_LOAD) {
+ EMIT(dup_top);
+ EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]));
+ } else {
+ if (assign_kind == ASSIGN_AUG_STORE) {
+ EMIT(rot_two);
+ }
+ EMIT_ARG(store_attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]));
+ }
+ } else {
+ goto cannot_assign;
+ }
+ } else {
+ goto cannot_assign;
+ }
+
+ if (!MP_PARSE_NODE_IS_NULL(pns->nodes[2])) {
+ goto cannot_assign;
+ }
+
+ return;
+
+cannot_assign:
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "can't assign to expression");
+}
+
+// we need to allow for a caller passing in 1 initial node (node_head) followed by an array of nodes (nodes_tail)
+STATIC void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num_tail, mp_parse_node_t *nodes_tail) {
+ uint num_head = (node_head == MP_PARSE_NODE_NULL) ? 0 : 1;
+
+ // look for star expression
+ uint have_star_index = -1;
+ if (num_head != 0 && MP_PARSE_NODE_IS_STRUCT_KIND(node_head, PN_star_expr)) {
+ EMIT_ARG(unpack_ex, 0, num_tail);
+ have_star_index = 0;
+ }
+ for (uint i = 0; i < num_tail; i++) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes_tail[i], PN_star_expr)) {
+ if (have_star_index == (uint)-1) {
+ EMIT_ARG(unpack_ex, num_head + i, num_tail - i - 1);
+ have_star_index = num_head + i;
+ } else {
+ compile_syntax_error(comp, nodes_tail[i], "multiple *x in assignment");
+ return;
+ }
+ }
+ }
+ if (have_star_index == (uint)-1) {
+ EMIT_ARG(unpack_sequence, num_head + num_tail);
+ }
+ if (num_head != 0) {
+ if (0 == have_star_index) {
+ c_assign(comp, ((mp_parse_node_struct_t*)node_head)->nodes[0], ASSIGN_STORE);
+ } else {
+ c_assign(comp, node_head, ASSIGN_STORE);
+ }
+ }
+ for (uint i = 0; i < num_tail; i++) {
+ if (num_head + i == have_star_index) {
+ c_assign(comp, ((mp_parse_node_struct_t*)nodes_tail[i])->nodes[0], ASSIGN_STORE);
+ } else {
+ c_assign(comp, nodes_tail[i], ASSIGN_STORE);
+ }
+ }
+}
+
+// assigns top of stack to pn
+STATIC void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t assign_kind) {
+ assert(!MP_PARSE_NODE_IS_NULL(pn));
+ if (MP_PARSE_NODE_IS_LEAF(pn)) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ qstr arg = MP_PARSE_NODE_LEAF_ARG(pn);
+ switch (assign_kind) {
+ case ASSIGN_STORE:
+ case ASSIGN_AUG_STORE:
+ compile_store_id(comp, arg);
+ break;
+ case ASSIGN_AUG_LOAD:
+ default:
+ compile_load_id(comp, arg);
+ break;
+ }
+ } else {
+ compile_syntax_error(comp, pn, "can't assign to literal");
+ return;
+ }
+ } else {
+ // pn must be a struct
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ switch (MP_PARSE_NODE_STRUCT_KIND(pns)) {
+ case PN_power:
+ // lhs is an index or attribute
+ c_assign_power(comp, pns, assign_kind);
+ break;
+
+ case PN_testlist_star_expr:
+ case PN_exprlist:
+ // lhs is a tuple
+ if (assign_kind != ASSIGN_STORE) {
+ goto bad_aug;
+ }
+ c_assign_tuple(comp, MP_PARSE_NODE_NULL, MP_PARSE_NODE_STRUCT_NUM_NODES(pns), pns->nodes);
+ break;
+
+ case PN_atom_paren:
+ // lhs is something in parenthesis
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty tuple
+ goto cannot_assign;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+ if (assign_kind != ASSIGN_STORE) {
+ goto bad_aug;
+ }
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ goto testlist_comp;
+ }
+ break;
+
+ case PN_atom_bracket:
+ // lhs is something in brackets
+ if (assign_kind != ASSIGN_STORE) {
+ goto bad_aug;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty list, assignment allowed
+ c_assign_tuple(comp, MP_PARSE_NODE_NULL, 0, NULL);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ goto testlist_comp;
+ } else {
+ // brackets around 1 item
+ c_assign_tuple(comp, pns->nodes[0], 0, NULL);
+ }
+ break;
+
+ default:
+ goto cannot_assign;
+ }
+ return;
+
+ testlist_comp:
+ // lhs is a sequence
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_testlist_comp_3b) {
+ // sequence of one item, with trailing comma
+ assert(MP_PARSE_NODE_IS_NULL(pns2->nodes[0]));
+ c_assign_tuple(comp, pns->nodes[0], 0, NULL);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_testlist_comp_3c) {
+ // sequence of many items
+ uint n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns2);
+ c_assign_tuple(comp, pns->nodes[0], n, pns2->nodes);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_comp_for) {
+ // TODO can we ever get here? can it be compiled?
+ goto cannot_assign;
+ } else {
+ // sequence with 2 items
+ goto sequence_with_2_items;
+ }
+ } else {
+ // sequence with 2 items
+ sequence_with_2_items:
+ c_assign_tuple(comp, MP_PARSE_NODE_NULL, 2, pns->nodes);
+ }
+ return;
+ }
+ return;
+
+ cannot_assign:
+ compile_syntax_error(comp, pn, "can't assign to expression");
+ return;
+
+ bad_aug:
+ compile_syntax_error(comp, pn, "illegal expression for augmented assignment");
+}
+
+// stuff for lambda and comprehensions and generators:
+// if n_pos_defaults > 0 then there is a tuple on the stack with the positional defaults
+// if n_kw_defaults > 0 then there is a dictionary on the stack with the keyword defaults
+// if both exist, the tuple is above the dictionary (ie the first pop gets the tuple)
+STATIC void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int n_pos_defaults, int n_kw_defaults) {
+ assert(n_pos_defaults >= 0);
+ assert(n_kw_defaults >= 0);
+
+ // set flags
+ if (n_kw_defaults > 0) {
+ this_scope->scope_flags |= MP_SCOPE_FLAG_DEFKWARGS;
+ }
+ this_scope->num_def_pos_args = n_pos_defaults;
+
+ // make closed over variables, if any
+ // ensure they are closed over in the order defined in the outer scope (mainly to agree with CPython)
+ int nfree = 0;
+ if (comp->scope_cur->kind != SCOPE_MODULE) {
+ for (int i = 0; i < comp->scope_cur->id_info_len; i++) {
+ id_info_t *id = &comp->scope_cur->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE) {
+ for (int j = 0; j < this_scope->id_info_len; j++) {
+ id_info_t *id2 = &this_scope->id_info[j];
+ if (id2->kind == ID_INFO_KIND_FREE && id->qst == id2->qst) {
+ // in Micro Python we load closures using LOAD_FAST
+ EMIT_LOAD_FAST(id->qst, id->local_num);
+ nfree += 1;
+ }
+ }
+ }
+ }
+ }
+
+ // make the function/closure
+ if (nfree == 0) {
+ EMIT_ARG(make_function, this_scope, n_pos_defaults, n_kw_defaults);
+ } else {
+ EMIT_ARG(make_closure, this_scope, nfree, n_pos_defaults, n_kw_defaults);
+ }
+}
+
+STATIC void compile_funcdef_lambdef_param(compiler_t *comp, mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_typedargslist_star)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_varargslist_star)) {
+ comp->have_star = true;
+ /* don't need to distinguish bare from named star
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // bare star
+ } else {
+ // named star
+ }
+ */
+
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_typedargslist_dbl_star)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_varargslist_dbl_star)) {
+ // named double star
+ // TODO do we need to do anything with this?
+
+ } else {
+ mp_parse_node_t pn_id;
+ mp_parse_node_t pn_colon;
+ mp_parse_node_t pn_equal;
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ // this parameter is just an id
+
+ pn_id = pn;
+ pn_colon = MP_PARSE_NODE_NULL;
+ pn_equal = MP_PARSE_NODE_NULL;
+
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_typedargslist_name)) {
+ // this parameter has a colon and/or equal specifier
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ pn_id = pns->nodes[0];
+ pn_colon = pns->nodes[1];
+ pn_equal = pns->nodes[2];
+
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_varargslist_name)); // should be
+ // this parameter has an equal specifier
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ pn_id = pns->nodes[0];
+ pn_equal = pns->nodes[1];
+ }
+
+ if (MP_PARSE_NODE_IS_NULL(pn_equal)) {
+ // this parameter does not have a default value
+
+ // check for non-default parameters given after default parameters (allowed by parser, but not syntactically valid)
+ if (!comp->have_star && comp->num_default_params != 0) {
+ compile_syntax_error(comp, pn, "non-default argument follows default argument");
+ return;
+ }
+
+ } else {
+ // this parameter has a default value
+ // in CPython, None (and True, False?) as default parameters are loaded with LOAD_NAME; don't understandy why
+
+ if (comp->have_star) {
+ comp->num_dict_params += 1;
+ // in Micro Python we put the default dict parameters into a dictionary using the bytecode
+ if (comp->num_dict_params == 1) {
+ // in Micro Python we put the default positional parameters into a tuple using the bytecode
+ // we need to do this here before we start building the map for the default keywords
+ if (comp->num_default_params > 0) {
+ EMIT_ARG(build_tuple, comp->num_default_params);
+ } else {
+ EMIT(load_null); // sentinel indicating empty default positional args
+ }
+ // first default dict param, so make the map
+ EMIT_ARG(build_map, 0);
+ }
+
+ // compile value then key, then store it to the dict
+ compile_node(comp, pn_equal);
+ EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pn_id));
+ EMIT(store_map);
+ } else {
+ comp->num_default_params += 1;
+ compile_node(comp, pn_equal);
+ }
+ }
+
+ // TODO pn_colon not implemented
+ (void)pn_colon;
+ }
+}
+
+STATIC void compile_funcdef_lambdef(compiler_t *comp, scope_t *scope, mp_parse_node_t pn_params, pn_kind_t pn_list_kind) {
+ // When we call compile_funcdef_lambdef_param below it can compile an arbitrary
+ // expression for default arguments, which may contain a lambda. The lambda will
+ // call here in a nested way, so we must save and restore the relevant state.
+ bool orig_have_star = comp->have_star;
+ uint16_t orig_num_dict_params = comp->num_dict_params;
+ uint16_t orig_num_default_params = comp->num_default_params;
+
+ // compile default parameters
+ comp->have_star = false;
+ comp->num_dict_params = 0;
+ comp->num_default_params = 0;
+ apply_to_single_or_list(comp, pn_params, pn_list_kind, compile_funcdef_lambdef_param);
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ return;
+ }
+
+ // in Micro Python we put the default positional parameters into a tuple using the bytecode
+ // the default keywords args may have already made the tuple; if not, do it now
+ if (comp->num_default_params > 0 && comp->num_dict_params == 0) {
+ EMIT_ARG(build_tuple, comp->num_default_params);
+ EMIT(load_null); // sentinel indicating empty default keyword args
+ }
+
+ // make the function
+ close_over_variables_etc(comp, scope, comp->num_default_params, comp->num_dict_params);
+
+ // restore state
+ comp->have_star = orig_have_star;
+ comp->num_dict_params = orig_num_dict_params;
+ comp->num_default_params = orig_num_default_params;
+}
+
+// leaves function object on stack
+// returns function name
+STATIC qstr compile_funcdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint emit_options) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this function
+ scope_t *s = scope_new_and_link(comp, SCOPE_FUNCTION, (mp_parse_node_t)pns, emit_options);
+ // store the function scope so the compiling function can use it at each pass
+ pns->nodes[4] = (mp_parse_node_t)s;
+ }
+
+ // get the scope for this function
+ scope_t *fscope = (scope_t*)pns->nodes[4];
+
+ // compile the function definition
+ compile_funcdef_lambdef(comp, fscope, pns->nodes[1], PN_typedargslist);
+
+ // return its name (the 'f' in "def f(...):")
+ return fscope->simple_name;
+}
+
+// leaves class object on stack
+// returns class name
+STATIC qstr compile_classdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint emit_options) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this class
+ scope_t *s = scope_new_and_link(comp, SCOPE_CLASS, (mp_parse_node_t)pns, emit_options);
+ // store the class scope so the compiling function can use it at each pass
+ pns->nodes[3] = (mp_parse_node_t)s;
+ }
+
+ EMIT(load_build_class);
+
+ // scope for this class
+ scope_t *cscope = (scope_t*)pns->nodes[3];
+
+ // compile the class
+ close_over_variables_etc(comp, cscope, 0, 0);
+
+ // get its name
+ EMIT_ARG(load_const_str, cscope->simple_name);
+
+ // nodes[1] has parent classes, if any
+ // empty parenthesis (eg class C():) gets here as an empty PN_classdef_2 and needs special handling
+ mp_parse_node_t parents = pns->nodes[1];
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(parents, PN_classdef_2)) {
+ parents = MP_PARSE_NODE_NULL;
+ }
+ comp->func_arg_is_super = false;
+ compile_trailer_paren_helper(comp, parents, false, 2);
+
+ // return its name (the 'C' in class C(...):")
+ return cscope->simple_name;
+}
+
+// returns true if it was a built-in decorator (even if the built-in had an error)
+STATIC bool compile_built_in_decorator(compiler_t *comp, int name_len, mp_parse_node_t *name_nodes, uint *emit_options) {
+ if (MP_PARSE_NODE_LEAF_ARG(name_nodes[0]) != MP_QSTR_micropython) {
+ return false;
+ }
+
+ if (name_len != 2) {
+ compile_syntax_error(comp, name_nodes[0], "invalid micropython decorator");
+ return true;
+ }
+
+ qstr attr = MP_PARSE_NODE_LEAF_ARG(name_nodes[1]);
+ if (attr == MP_QSTR_bytecode) {
+ *emit_options = MP_EMIT_OPT_BYTECODE;
+#if MICROPY_EMIT_NATIVE
+ } else if (attr == MP_QSTR_native) {
+ *emit_options = MP_EMIT_OPT_NATIVE_PYTHON;
+ } else if (attr == MP_QSTR_viper) {
+ *emit_options = MP_EMIT_OPT_VIPER;
+#endif
+#if MICROPY_EMIT_INLINE_THUMB
+ } else if (attr == MP_QSTR_asm_thumb) {
+ *emit_options = MP_EMIT_OPT_ASM_THUMB;
+#endif
+ } else {
+ compile_syntax_error(comp, name_nodes[1], "invalid micropython decorator");
+ }
+
+ return true;
+}
+
+STATIC void compile_decorated(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // get the list of decorators
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns->nodes[0], PN_decorators, &nodes);
+
+ // inherit emit options for this function/class definition
+ uint emit_options = comp->scope_cur->emit_options;
+
+ // compile each decorator
+ int num_built_in_decorators = 0;
+ for (int i = 0; i < n; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(nodes[i], PN_decorator)); // should be
+ mp_parse_node_struct_t *pns_decorator = (mp_parse_node_struct_t*)nodes[i];
+
+ // nodes[0] contains the decorator function, which is a dotted name
+ mp_parse_node_t *name_nodes;
+ int name_len = mp_parse_node_extract_list(&pns_decorator->nodes[0], PN_dotted_name, &name_nodes);
+
+ // check for built-in decorators
+ if (compile_built_in_decorator(comp, name_len, name_nodes, &emit_options)) {
+ // this was a built-in
+ num_built_in_decorators += 1;
+
+ } else {
+ // not a built-in, compile normally
+
+ // compile the decorator function
+ compile_node(comp, name_nodes[0]);
+ for (int j = 1; j < name_len; j++) {
+ assert(MP_PARSE_NODE_IS_ID(name_nodes[j])); // should be
+ EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(name_nodes[j]));
+ }
+
+ // nodes[1] contains arguments to the decorator function, if any
+ if (!MP_PARSE_NODE_IS_NULL(pns_decorator->nodes[1])) {
+ // call the decorator function with the arguments in nodes[1]
+ comp->func_arg_is_super = false;
+ compile_node(comp, pns_decorator->nodes[1]);
+ }
+ }
+ }
+
+ // compile the body (funcdef or classdef) and get its name
+ mp_parse_node_struct_t *pns_body = (mp_parse_node_struct_t*)pns->nodes[1];
+ qstr body_name = 0;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_funcdef) {
+ body_name = compile_funcdef_helper(comp, pns_body, emit_options);
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_classdef); // should be
+ body_name = compile_classdef_helper(comp, pns_body, emit_options);
+ }
+
+ // call each decorator
+ for (int i = 0; i < n - num_built_in_decorators; i++) {
+ EMIT_ARG(call_function, 1, 0, 0);
+ }
+
+ // store func/class object into name
+ compile_store_id(comp, body_name);
+}
+
+STATIC void compile_funcdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ qstr fname = compile_funcdef_helper(comp, pns, comp->scope_cur->emit_options);
+ // store function object into function name
+ compile_store_id(comp, fname);
+}
+
+STATIC void c_del_stmt(compiler_t *comp, mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ compile_delete_id(comp, MP_PARSE_NODE_LEAF_ARG(pn));
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_power)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+
+ compile_node(comp, pns->nodes[0]); // base of the power node
+
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_power_trailers) {
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+ for (int i = 0; i < n - 1; i++) {
+ compile_node(comp, pns1->nodes[i]);
+ }
+ assert(MP_PARSE_NODE_IS_STRUCT(pns1->nodes[n - 1]));
+ pns1 = (mp_parse_node_struct_t*)pns1->nodes[n - 1];
+ }
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) {
+ compile_node(comp, pns1->nodes[0]);
+ EMIT(delete_subscr);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) {
+ assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+ EMIT_ARG(delete_attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]));
+ } else {
+ goto cannot_delete;
+ }
+ } else {
+ goto cannot_delete;
+ }
+
+ if (!MP_PARSE_NODE_IS_NULL(pns->nodes[2])) {
+ goto cannot_delete;
+ }
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_paren)) {
+ pn = ((mp_parse_node_struct_t*)pn)->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ goto cannot_delete;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_testlist_comp));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ // TODO perhaps factorise testlist_comp code with other uses of PN_testlist_comp
+
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_testlist_comp_3b) {
+ // sequence of one item, with trailing comma
+ assert(MP_PARSE_NODE_IS_NULL(pns1->nodes[0]));
+ c_del_stmt(comp, pns->nodes[0]);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_testlist_comp_3c) {
+ // sequence of many items
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+ c_del_stmt(comp, pns->nodes[0]);
+ for (int i = 0; i < n; i++) {
+ c_del_stmt(comp, pns1->nodes[i]);
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_comp_for) {
+ // TODO not implemented; can't del comprehension? can we get here?
+ goto cannot_delete;
+ } else {
+ // sequence with 2 items
+ goto sequence_with_2_items;
+ }
+ } else {
+ // sequence with 2 items
+ sequence_with_2_items:
+ c_del_stmt(comp, pns->nodes[0]);
+ c_del_stmt(comp, pns->nodes[1]);
+ }
+ }
+ } else {
+ // some arbitrary statment that we can't delete (eg del 1)
+ goto cannot_delete;
+ }
+
+ return;
+
+cannot_delete:
+ compile_syntax_error(comp, (mp_parse_node_t)pn, "can't delete expression");
+}
+
+STATIC void compile_del_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ apply_to_single_or_list(comp, pns->nodes[0], PN_exprlist, c_del_stmt);
+}
+
+STATIC void compile_break_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->break_label == 0) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "'break' outside loop");
+ }
+ assert(comp->cur_except_level >= comp->break_continue_except_level);
+ EMIT_ARG(break_loop, comp->break_label, comp->cur_except_level - comp->break_continue_except_level);
+}
+
+STATIC void compile_continue_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->continue_label == 0) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "'continue' outside loop");
+ }
+ assert(comp->cur_except_level >= comp->break_continue_except_level);
+ EMIT_ARG(continue_loop, comp->continue_label, comp->cur_except_level - comp->break_continue_except_level);
+}
+
+STATIC void compile_return_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->scope_cur->kind != SCOPE_FUNCTION) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "'return' outside function");
+ return;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // no argument to 'return', so return None
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_test_if_expr)) {
+ // special case when returning an if-expression; to match CPython optimisation
+ mp_parse_node_struct_t *pns_test_if_expr = (mp_parse_node_struct_t*)pns->nodes[0];
+ mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t*)pns_test_if_expr->nodes[1];
+
+ uint l_fail = comp_next_label(comp);
+ c_if_cond(comp, pns_test_if_else->nodes[0], false, l_fail); // condition
+ compile_node(comp, pns_test_if_expr->nodes[0]); // success value
+ EMIT(return_value);
+ EMIT_ARG(label_assign, l_fail);
+ compile_node(comp, pns_test_if_else->nodes[1]); // failure value
+ } else {
+ compile_node(comp, pns->nodes[0]);
+ }
+ EMIT(return_value);
+}
+
+STATIC void compile_yield_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[0]);
+ EMIT(pop_top);
+}
+
+STATIC void compile_raise_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // raise
+ EMIT_ARG(raise_varargs, 0);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_raise_stmt_arg)) {
+ // raise x from y
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ compile_node(comp, pns->nodes[0]);
+ compile_node(comp, pns->nodes[1]);
+ EMIT_ARG(raise_varargs, 2);
+ } else {
+ // raise x
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(raise_varargs, 1);
+ }
+}
+
+// q_base holds the base of the name
+// eg a -> q_base=a
+// a.b.c -> q_base=a
+STATIC void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) {
+ bool is_as = false;
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_as_name)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ // a name of the form x as y; unwrap it
+ *q_base = MP_PARSE_NODE_LEAF_ARG(pns->nodes[1]);
+ pn = pns->nodes[0];
+ is_as = true;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // empty name (eg, from . import x)
+ *q_base = MP_QSTR_;
+ EMIT_ARG(import_name, MP_QSTR_); // import the empty string
+ } else if (MP_PARSE_NODE_IS_ID(pn)) {
+ // just a simple name
+ qstr q_full = MP_PARSE_NODE_LEAF_ARG(pn);
+ if (!is_as) {
+ *q_base = q_full;
+ }
+ EMIT_ARG(import_name, q_full);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_name)); // should be
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ {
+ // a name of the form a.b.c
+ if (!is_as) {
+ *q_base = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ }
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ int len = n - 1;
+ for (int i = 0; i < n; i++) {
+ len += qstr_len(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]));
+ }
+ byte *q_ptr;
+ byte *str_dest = qstr_build_start(len, &q_ptr);
+ for (int i = 0; i < n; i++) {
+ if (i > 0) {
+ *str_dest++ = '.';
+ }
+ size_t str_src_len;
+ const byte *str_src = qstr_data(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), &str_src_len);
+ memcpy(str_dest, str_src, str_src_len);
+ str_dest += str_src_len;
+ }
+ qstr q_full = qstr_build_end(q_ptr);
+ EMIT_ARG(import_name, q_full);
+ if (is_as) {
+ for (int i = 1; i < n; i++) {
+ EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]));
+ }
+ }
+ }
+ }
+}
+
+STATIC void compile_dotted_as_name(compiler_t *comp, mp_parse_node_t pn) {
+ EMIT_ARG(load_const_small_int, 0); // level 0 import
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // not importing from anything
+ qstr q_base;
+ do_import_name(comp, pn, &q_base);
+ compile_store_id(comp, q_base);
+}
+
+STATIC void compile_import_name(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ apply_to_single_or_list(comp, pns->nodes[0], PN_dotted_as_names, compile_dotted_as_name);
+}
+
+STATIC void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ mp_parse_node_t pn_import_source = pns->nodes[0];
+
+ // extract the preceeding .'s (if any) for a relative import, to compute the import level
+ uint import_level = 0;
+ do {
+ mp_parse_node_t pn_rel;
+ if (MP_PARSE_NODE_IS_TOKEN(pn_import_source) || MP_PARSE_NODE_IS_STRUCT_KIND(pn_import_source, PN_one_or_more_period_or_ellipsis)) {
+ // This covers relative imports with dots only like "from .. import"
+ pn_rel = pn_import_source;
+ pn_import_source = MP_PARSE_NODE_NULL;
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn_import_source, PN_import_from_2b)) {
+ // This covers relative imports starting with dot(s) like "from .foo import"
+ mp_parse_node_struct_t *pns_2b = (mp_parse_node_struct_t*)pn_import_source;
+ pn_rel = pns_2b->nodes[0];
+ pn_import_source = pns_2b->nodes[1];
+ assert(!MP_PARSE_NODE_IS_NULL(pn_import_source)); // should not be
+ } else {
+ // Not a relative import
+ break;
+ }
+
+ // get the list of . and/or ...'s
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pn_rel, PN_one_or_more_period_or_ellipsis, &nodes);
+
+ // count the total number of .'s
+ for (int i = 0; i < n; i++) {
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(nodes[i], MP_TOKEN_DEL_PERIOD)) {
+ import_level++;
+ } else {
+ // should be an MP_TOKEN_ELLIPSIS
+ import_level += 3;
+ }
+ }
+ } while (0);
+
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[1], MP_TOKEN_OP_STAR)) {
+ EMIT_ARG(load_const_small_int, import_level);
+
+ // build the "fromlist" tuple
+ EMIT_ARG(load_const_str, MP_QSTR__star_);
+ EMIT_ARG(build_tuple, 1);
+
+ // do the import
+ qstr dummy_q;
+ do_import_name(comp, pn_import_source, &dummy_q);
+ EMIT(import_star);
+
+ } else {
+ EMIT_ARG(load_const_small_int, import_level);
+
+ // build the "fromlist" tuple
+ mp_parse_node_t *pn_nodes;
+ int n = mp_parse_node_extract_list(&pns->nodes[1], PN_import_as_names, &pn_nodes);
+ for (int i = 0; i < n; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name));
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t*)pn_nodes[i];
+ qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id
+ EMIT_ARG(load_const_str, id2);
+ }
+ EMIT_ARG(build_tuple, n);
+
+ // do the import
+ qstr dummy_q;
+ do_import_name(comp, pn_import_source, &dummy_q);
+ for (int i = 0; i < n; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name));
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t*)pn_nodes[i];
+ qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id
+ EMIT_ARG(import_from, id2);
+ if (MP_PARSE_NODE_IS_NULL(pns3->nodes[1])) {
+ compile_store_id(comp, id2);
+ } else {
+ compile_store_id(comp, MP_PARSE_NODE_LEAF_ARG(pns3->nodes[1]));
+ }
+ }
+ EMIT(pop_top);
+ }
+}
+
+STATIC void compile_declare_global(compiler_t *comp, mp_parse_node_t pn, qstr qst) {
+ bool added;
+ id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, &added);
+ if (!added && id_info->kind != ID_INFO_KIND_GLOBAL_EXPLICIT) {
+ compile_syntax_error(comp, pn, "identifier redefined as global");
+ return;
+ }
+ id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+
+ // if the id exists in the global scope, set its kind to EXPLICIT_GLOBAL
+ id_info = scope_find_global(comp->scope_cur, qst);
+ if (id_info != NULL) {
+ id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+ }
+}
+
+STATIC void compile_global_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns->nodes[0], PN_name_list, &nodes);
+ for (int i = 0; i < n; i++) {
+ compile_declare_global(comp, (mp_parse_node_t)pns, MP_PARSE_NODE_LEAF_ARG(nodes[i]));
+ }
+ }
+}
+
+STATIC void compile_declare_nonlocal(compiler_t *comp, mp_parse_node_t pn, qstr qst) {
+ bool added;
+ id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, &added);
+ if (!added && id_info->kind != ID_INFO_KIND_FREE) {
+ compile_syntax_error(comp, pn, "identifier redefined as nonlocal");
+ return;
+ }
+ id_info_t *id_info2 = scope_find_local_in_parent(comp->scope_cur, qst);
+ if (id_info2 == NULL || !(id_info2->kind == ID_INFO_KIND_LOCAL || id_info2->kind == ID_INFO_KIND_CELL || id_info2->kind == ID_INFO_KIND_FREE)) {
+ compile_syntax_error(comp, pn, "no binding for nonlocal found");
+ return;
+ }
+ id_info->kind = ID_INFO_KIND_FREE;
+ scope_close_over_in_parents(comp->scope_cur, qst);
+}
+
+STATIC void compile_nonlocal_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ if (comp->scope_cur->kind == SCOPE_MODULE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "can't declare nonlocal in outer code");
+ return;
+ }
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns->nodes[0], PN_name_list, &nodes);
+ for (int i = 0; i < n; i++) {
+ compile_declare_nonlocal(comp, (mp_parse_node_t)pns, MP_PARSE_NODE_LEAF_ARG(nodes[i]));
+ }
+ }
+}
+
+STATIC void compile_assert_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ uint l_end = comp_next_label(comp);
+ c_if_cond(comp, pns->nodes[0], true, l_end);
+ EMIT_LOAD_GLOBAL(MP_QSTR_AssertionError); // we load_global instead of load_id, to be consistent with CPython
+ if (!MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
+ // assertion message
+ compile_node(comp, pns->nodes[1]);
+ EMIT_ARG(call_function, 1, 0, 0);
+ }
+ EMIT_ARG(raise_varargs, 1);
+ EMIT_ARG(label_assign, l_end);
+}
+
+STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // TODO proper and/or short circuiting
+
+ uint l_end = comp_next_label(comp);
+
+ // optimisation: don't emit anything when "if False"
+ if (!node_is_const_false(pns->nodes[0])) {
+ uint l_fail = comp_next_label(comp);
+ c_if_cond(comp, pns->nodes[0], false, l_fail); // if condition
+
+ compile_node(comp, pns->nodes[1]); // if block
+
+ // optimisation: skip everything else when "if True"
+ if (node_is_const_true(pns->nodes[0])) {
+ goto done;
+ }
+
+ if (
+ // optimisation: don't jump over non-existent elif/else blocks
+ !(MP_PARSE_NODE_IS_NULL(pns->nodes[2]) && MP_PARSE_NODE_IS_NULL(pns->nodes[3]))
+ // optimisation: don't jump if last instruction was return
+ && !EMIT(last_emit_was_return_value)
+ ) {
+ // jump over elif/else blocks
+ EMIT_ARG(jump, l_end);
+ }
+
+ EMIT_ARG(label_assign, l_fail);
+ }
+
+ // compile elif blocks (if any)
+ mp_parse_node_t *pn_elif;
+ int n_elif = mp_parse_node_extract_list(&pns->nodes[2], PN_if_stmt_elif_list, &pn_elif);
+ for (int i = 0; i < n_elif; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_elif[i], PN_if_stmt_elif)); // should be
+ mp_parse_node_struct_t *pns_elif = (mp_parse_node_struct_t*)pn_elif[i];
+
+ // optimisation: don't emit anything when "if False"
+ if (!node_is_const_false(pns_elif->nodes[0])) {
+ uint l_fail = comp_next_label(comp);
+ c_if_cond(comp, pns_elif->nodes[0], false, l_fail); // elif condition
+
+ compile_node(comp, pns_elif->nodes[1]); // elif block
+
+ // optimisation: skip everything else when "elif True"
+ if (node_is_const_true(pns_elif->nodes[0])) {
+ goto done;
+ }
+
+ // optimisation: don't jump if last instruction was return
+ if (!EMIT(last_emit_was_return_value)) {
+ EMIT_ARG(jump, l_end);
+ }
+ EMIT_ARG(label_assign, l_fail);
+ }
+ }
+
+ // compile else block
+ compile_node(comp, pns->nodes[3]); // can be null
+
+done:
+ EMIT_ARG(label_assign, l_end);
+}
+
+#define START_BREAK_CONTINUE_BLOCK \
+ uint16_t old_break_label = comp->break_label; \
+ uint16_t old_continue_label = comp->continue_label; \
+ uint16_t old_break_continue_except_level = comp->break_continue_except_level; \
+ uint break_label = comp_next_label(comp); \
+ uint continue_label = comp_next_label(comp); \
+ comp->break_label = break_label; \
+ comp->continue_label = continue_label; \
+ comp->break_continue_except_level = comp->cur_except_level;
+
+#define END_BREAK_CONTINUE_BLOCK \
+ comp->break_label = old_break_label; \
+ comp->continue_label = old_continue_label; \
+ comp->break_continue_except_level = old_break_continue_except_level;
+
+STATIC void compile_while_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ START_BREAK_CONTINUE_BLOCK
+
+ if (!node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False"
+ uint top_label = comp_next_label(comp);
+ if (!node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True"
+ EMIT_ARG(jump, continue_label);
+ }
+ EMIT_ARG(label_assign, top_label);
+ compile_node(comp, pns->nodes[1]); // body
+ EMIT_ARG(label_assign, continue_label);
+ c_if_cond(comp, pns->nodes[0], true, top_label); // condition
+ }
+
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ compile_node(comp, pns->nodes[2]); // else
+
+ EMIT_ARG(label_assign, break_label);
+}
+
+// This function compiles an optimised for-loop of the form:
+// for <var> in range(<start>, <end>, <step>):
+// <body>
+// else:
+// <else>
+// <var> must be an identifier and <step> must be a small-int.
+//
+// Semantics of for-loop require:
+// - final failing value should not be stored in the loop variable
+// - if the loop never runs, the loop variable should never be assigned
+// - assignments to <var>, <end> or <step> in the body do not alter the loop
+// (<step> is a constant for us, so no need to worry about it changing)
+//
+// If <end> is a small-int, then the stack during the for-loop contains just
+// the current value of <var>. Otherwise, the stack contains <end> then the
+// current value of <var>.
+STATIC void compile_for_stmt_optimised_range(compiler_t *comp, mp_parse_node_t pn_var, mp_parse_node_t pn_start, mp_parse_node_t pn_end, mp_parse_node_t pn_step, mp_parse_node_t pn_body, mp_parse_node_t pn_else) {
+ START_BREAK_CONTINUE_BLOCK
+
+ uint top_label = comp_next_label(comp);
+ uint entry_label = comp_next_label(comp);
+
+ // put the end value on the stack if it's not a small-int constant
+ bool end_on_stack = !MP_PARSE_NODE_IS_SMALL_INT(pn_end);
+ if (end_on_stack) {
+ compile_node(comp, pn_end);
+ }
+
+ // compile: start
+ compile_node(comp, pn_start);
+
+ EMIT_ARG(jump, entry_label);
+ EMIT_ARG(label_assign, top_label);
+
+ // duplicate next value and store it to var
+ EMIT(dup_top);
+ c_assign(comp, pn_var, ASSIGN_STORE);
+
+ // compile body
+ compile_node(comp, pn_body);
+
+ EMIT_ARG(label_assign, continue_label);
+
+ // compile: var + step
+ compile_node(comp, pn_step);
+ EMIT_ARG(binary_op, MP_BINARY_OP_INPLACE_ADD);
+
+ EMIT_ARG(label_assign, entry_label);
+
+ // compile: if var <cond> end: goto top
+ if (end_on_stack) {
+ EMIT(dup_top_two);
+ EMIT(rot_two);
+ } else {
+ EMIT(dup_top);
+ compile_node(comp, pn_end);
+ }
+ assert(MP_PARSE_NODE_IS_SMALL_INT(pn_step));
+ if (MP_PARSE_NODE_LEAF_SMALL_INT(pn_step) >= 0) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_LESS);
+ } else {
+ EMIT_ARG(binary_op, MP_BINARY_OP_MORE);
+ }
+ EMIT_ARG(pop_jump_if, true, top_label);
+
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ compile_node(comp, pn_else);
+
+ EMIT_ARG(label_assign, break_label);
+
+ // discard final value of var that failed the loop condition
+ EMIT(pop_top);
+
+ // discard <end> value if it's on the stack
+ if (end_on_stack) {
+ EMIT(pop_top);
+ }
+}
+
+STATIC void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // this bit optimises: for <x> in range(...), turning it into an explicitly incremented variable
+ // this is actually slower, but uses no heap memory
+ // for viper it will be much, much faster
+ if (/*comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER &&*/ MP_PARSE_NODE_IS_ID(pns->nodes[0]) && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_power)) {
+ mp_parse_node_struct_t *pns_it = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_IS_ID(pns_it->nodes[0])
+ && MP_PARSE_NODE_LEAF_ARG(pns_it->nodes[0]) == MP_QSTR_range
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns_it->nodes[1], PN_trailer_paren)
+ && MP_PARSE_NODE_IS_NULL(pns_it->nodes[2])) {
+ mp_parse_node_t pn_range_args = ((mp_parse_node_struct_t*)pns_it->nodes[1])->nodes[0];
+ mp_parse_node_t *args;
+ int n_args = mp_parse_node_extract_list(&pn_range_args, PN_arglist, &args);
+ mp_parse_node_t pn_range_start;
+ mp_parse_node_t pn_range_end;
+ mp_parse_node_t pn_range_step;
+ bool optimize = false;
+ if (1 <= n_args && n_args <= 3) {
+ optimize = true;
+ if (n_args == 1) {
+ pn_range_start = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 0);
+ pn_range_end = args[0];
+ pn_range_step = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 1);
+ } else if (n_args == 2) {
+ pn_range_start = args[0];
+ pn_range_end = args[1];
+ pn_range_step = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 1);
+ } else {
+ pn_range_start = args[0];
+ pn_range_end = args[1];
+ pn_range_step = args[2];
+ // We need to know sign of step. This is possible only if it's constant
+ if (!MP_PARSE_NODE_IS_SMALL_INT(pn_range_step)) {
+ optimize = false;
+ }
+ }
+ // arguments must be able to be compiled as standard expressions
+ if (optimize && MP_PARSE_NODE_IS_STRUCT(pn_range_start)) {
+ int k = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t*)pn_range_start);
+ if (k == PN_arglist_star || k == PN_arglist_dbl_star || k == PN_argument) {
+ optimize = false;
+ }
+ }
+ if (optimize && MP_PARSE_NODE_IS_STRUCT(pn_range_end)) {
+ int k = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t*)pn_range_end);
+ if (k == PN_arglist_star || k == PN_arglist_dbl_star || k == PN_argument) {
+ optimize = false;
+ }
+ }
+ }
+ if (optimize) {
+ compile_for_stmt_optimised_range(comp, pns->nodes[0], pn_range_start, pn_range_end, pn_range_step, pns->nodes[2], pns->nodes[3]);
+ return;
+ }
+ }
+ }
+
+ START_BREAK_CONTINUE_BLOCK
+ comp->break_label |= MP_EMIT_BREAK_FROM_FOR;
+
+ uint pop_label = comp_next_label(comp);
+
+ compile_node(comp, pns->nodes[1]); // iterator
+ EMIT(get_iter);
+ EMIT_ARG(label_assign, continue_label);
+ EMIT_ARG(for_iter, pop_label);
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // variable
+ compile_node(comp, pns->nodes[2]); // body
+ if (!EMIT(last_emit_was_return_value)) {
+ EMIT_ARG(jump, continue_label);
+ }
+ EMIT_ARG(label_assign, pop_label);
+ EMIT(for_iter_end);
+
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ compile_node(comp, pns->nodes[3]); // else (not tested)
+
+ EMIT_ARG(label_assign, break_label);
+}
+
+STATIC void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_excepts, mp_parse_node_t pn_else) {
+ // setup code
+ uint l1 = comp_next_label(comp);
+ uint success_label = comp_next_label(comp);
+
+ EMIT_ARG(setup_except, l1);
+ compile_increase_except_level(comp);
+
+ compile_node(comp, pn_body); // body
+ EMIT(pop_block);
+ EMIT_ARG(jump, success_label); // jump over exception handler
+
+ EMIT_ARG(label_assign, l1); // start of exception handler
+ EMIT(start_except_handler);
+
+ uint l2 = comp_next_label(comp);
+
+ for (int i = 0; i < n_except; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_excepts[i], PN_try_stmt_except)); // should be
+ mp_parse_node_struct_t *pns_except = (mp_parse_node_struct_t*)pn_excepts[i];
+
+ qstr qstr_exception_local = 0;
+ uint end_finally_label = comp_next_label(comp);
+
+ if (MP_PARSE_NODE_IS_NULL(pns_except->nodes[0])) {
+ // this is a catch all exception handler
+ if (i + 1 != n_except) {
+ compile_syntax_error(comp, pn_excepts[i], "default 'except:' must be last");
+ compile_decrease_except_level(comp);
+ return;
+ }
+ } else {
+ // this exception handler requires a match to a certain type of exception
+ mp_parse_node_t pns_exception_expr = pns_except->nodes[0];
+ if (MP_PARSE_NODE_IS_STRUCT(pns_exception_expr)) {
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t*)pns_exception_expr;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_try_stmt_as_name) {
+ // handler binds the exception to a local
+ pns_exception_expr = pns3->nodes[0];
+ qstr_exception_local = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[1]);
+ }
+ }
+ EMIT(dup_top);
+ compile_node(comp, pns_exception_expr);
+ EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+ EMIT_ARG(pop_jump_if, false, end_finally_label);
+ }
+
+ EMIT(pop_top);
+
+ if (qstr_exception_local == 0) {
+ EMIT(pop_top);
+ } else {
+ compile_store_id(comp, qstr_exception_local);
+ }
+
+ EMIT(pop_top);
+
+ uint l3 = 0;
+ if (qstr_exception_local != 0) {
+ l3 = comp_next_label(comp);
+ EMIT_ARG(setup_finally, l3);
+ compile_increase_except_level(comp);
+ }
+ compile_node(comp, pns_except->nodes[1]);
+ if (qstr_exception_local != 0) {
+ EMIT(pop_block);
+ }
+ EMIT(pop_except);
+ if (qstr_exception_local != 0) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(label_assign, l3);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ compile_store_id(comp, qstr_exception_local);
+ compile_delete_id(comp, qstr_exception_local);
+
+ compile_decrease_except_level(comp);
+ EMIT(end_finally);
+ }
+ EMIT_ARG(jump, l2);
+ EMIT_ARG(label_assign, end_finally_label);
+ EMIT_ARG(adjust_stack_size, 3); // stack adjust for the 3 exception items
+ }
+
+ compile_decrease_except_level(comp);
+ EMIT(end_finally);
+ EMIT(end_except_handler);
+
+ EMIT_ARG(label_assign, success_label);
+ compile_node(comp, pn_else); // else block, can be null
+ EMIT_ARG(label_assign, l2);
+}
+
+STATIC void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_except, mp_parse_node_t pn_else, mp_parse_node_t pn_finally) {
+ uint l_finally_block = comp_next_label(comp);
+
+ EMIT_ARG(setup_finally, l_finally_block);
+ compile_increase_except_level(comp);
+
+ if (n_except == 0) {
+ assert(MP_PARSE_NODE_IS_NULL(pn_else));
+ EMIT_ARG(adjust_stack_size, 3); // stack adjust for possible UNWIND_JUMP state
+ compile_node(comp, pn_body);
+ EMIT_ARG(adjust_stack_size, -3);
+ } else {
+ compile_try_except(comp, pn_body, n_except, pn_except, pn_else);
+ }
+ EMIT(pop_block);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(label_assign, l_finally_block);
+ compile_node(comp, pn_finally);
+
+ compile_decrease_except_level(comp);
+ EMIT(end_finally);
+}
+
+STATIC void compile_try_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should be
+ {
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_try_stmt_finally) {
+ // just try-finally
+ compile_try_finally(comp, pns->nodes[0], 0, NULL, MP_PARSE_NODE_NULL, pns2->nodes[0]);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_try_stmt_except_and_more) {
+ // try-except and possibly else and/or finally
+ mp_parse_node_t *pn_excepts;
+ int n_except = mp_parse_node_extract_list(&pns2->nodes[0], PN_try_stmt_except_list, &pn_excepts);
+ if (MP_PARSE_NODE_IS_NULL(pns2->nodes[2])) {
+ // no finally
+ compile_try_except(comp, pns->nodes[0], n_except, pn_excepts, pns2->nodes[1]);
+ } else {
+ // have finally
+ compile_try_finally(comp, pns->nodes[0], n_except, pn_excepts, pns2->nodes[1], ((mp_parse_node_struct_t*)pns2->nodes[2])->nodes[0]);
+ }
+ } else {
+ // just try-except
+ mp_parse_node_t *pn_excepts;
+ int n_except = mp_parse_node_extract_list(&pns->nodes[1], PN_try_stmt_except_list, &pn_excepts);
+ compile_try_except(comp, pns->nodes[0], n_except, pn_excepts, MP_PARSE_NODE_NULL);
+ }
+ }
+}
+
+STATIC void compile_with_stmt_helper(compiler_t *comp, int n, mp_parse_node_t *nodes, mp_parse_node_t body) {
+ if (n == 0) {
+ // no more pre-bits, compile the body of the with
+ compile_node(comp, body);
+ } else {
+ uint l_end = comp_next_label(comp);
+ if (MICROPY_EMIT_NATIVE && comp->scope_cur->emit_options != MP_EMIT_OPT_BYTECODE) {
+ // we need to allocate an extra label for the native emitter
+ // it will use l_end+1 as an auxiliary label
+ comp_next_label(comp);
+ }
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) {
+ // this pre-bit is of the form "a as b"
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)nodes[0];
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(setup_with, l_end);
+ c_assign(comp, pns->nodes[1], ASSIGN_STORE);
+ } else {
+ // this pre-bit is just an expression
+ compile_node(comp, nodes[0]);
+ EMIT_ARG(setup_with, l_end);
+ EMIT(pop_top);
+ }
+ compile_increase_except_level(comp);
+ // compile additional pre-bits and the body
+ compile_with_stmt_helper(comp, n - 1, nodes + 1, body);
+ // finish this with block
+ EMIT_ARG(with_cleanup, l_end);
+ compile_decrease_except_level(comp);
+ EMIT(end_finally);
+ }
+}
+
+STATIC void compile_with_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // get the nodes for the pre-bit of the with (the a as b, c as d, ... bit)
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns->nodes[0], PN_with_stmt_list, &nodes);
+ assert(n > 0);
+
+ // compile in a nested fashion
+ compile_with_stmt_helper(comp, n, nodes, pns->nodes[1]);
+}
+
+STATIC void compile_expr_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
+ if (comp->is_repl && comp->scope_cur->kind == SCOPE_MODULE) {
+ // for REPL, evaluate then print the expression
+ compile_load_id(comp, MP_QSTR___repl_print__);
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(call_function, 1, 0, 0);
+ EMIT(pop_top);
+
+ } else {
+ // for non-REPL, evaluate then discard the expression
+ if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0]) && !MP_PARSE_NODE_IS_ID(pns->nodes[0]))
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_string)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_bytes)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_const_object)) {
+ // do nothing with a lonely constant
+ } else {
+ compile_node(comp, pns->nodes[0]); // just an expression
+ EMIT(pop_top); // discard last result since this is a statement and leaves nothing on the stack
+ }
+ }
+ } else if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
+ int kind = MP_PARSE_NODE_STRUCT_KIND(pns1);
+ if (kind == PN_expr_stmt_augassign) {
+ c_assign(comp, pns->nodes[0], ASSIGN_AUG_LOAD); // lhs load for aug assign
+ compile_node(comp, pns1->nodes[1]); // rhs
+ assert(MP_PARSE_NODE_IS_TOKEN(pns1->nodes[0]));
+ mp_binary_op_t op;
+ switch (MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0])) {
+ case MP_TOKEN_DEL_PIPE_EQUAL: op = MP_BINARY_OP_INPLACE_OR; break;
+ case MP_TOKEN_DEL_CARET_EQUAL: op = MP_BINARY_OP_INPLACE_XOR; break;
+ case MP_TOKEN_DEL_AMPERSAND_EQUAL: op = MP_BINARY_OP_INPLACE_AND; break;
+ case MP_TOKEN_DEL_DBL_LESS_EQUAL: op = MP_BINARY_OP_INPLACE_LSHIFT; break;
+ case MP_TOKEN_DEL_DBL_MORE_EQUAL: op = MP_BINARY_OP_INPLACE_RSHIFT; break;
+ case MP_TOKEN_DEL_PLUS_EQUAL: op = MP_BINARY_OP_INPLACE_ADD; break;
+ case MP_TOKEN_DEL_MINUS_EQUAL: op = MP_BINARY_OP_INPLACE_SUBTRACT; break;
+ case MP_TOKEN_DEL_STAR_EQUAL: op = MP_BINARY_OP_INPLACE_MULTIPLY; break;
+ case MP_TOKEN_DEL_DBL_SLASH_EQUAL: op = MP_BINARY_OP_INPLACE_FLOOR_DIVIDE; break;
+ case MP_TOKEN_DEL_SLASH_EQUAL: op = MP_BINARY_OP_INPLACE_TRUE_DIVIDE; break;
+ case MP_TOKEN_DEL_PERCENT_EQUAL: op = MP_BINARY_OP_INPLACE_MODULO; break;
+ case MP_TOKEN_DEL_DBL_STAR_EQUAL: default: op = MP_BINARY_OP_INPLACE_POWER; break;
+ }
+ EMIT_ARG(binary_op, op);
+ c_assign(comp, pns->nodes[0], ASSIGN_AUG_STORE); // lhs store for aug assign
+ } else if (kind == PN_expr_stmt_assign_list) {
+ int rhs = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1) - 1;
+ compile_node(comp, pns1->nodes[rhs]); // rhs
+ // following CPython, we store left-most first
+ if (rhs > 0) {
+ EMIT(dup_top);
+ }
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // lhs store
+ for (int i = 0; i < rhs; i++) {
+ if (i + 1 < rhs) {
+ EMIT(dup_top);
+ }
+ c_assign(comp, pns1->nodes[i], ASSIGN_STORE); // middle store
+ }
+ } else {
+ plain_assign:
+ if (MICROPY_COMP_DOUBLE_TUPLE_ASSIGN
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_testlist_star_expr)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_star_expr)
+ && MP_PARSE_NODE_STRUCT_NUM_NODES((mp_parse_node_struct_t*)pns->nodes[1]) == 2
+ && MP_PARSE_NODE_STRUCT_NUM_NODES((mp_parse_node_struct_t*)pns->nodes[0]) == 2) {
+ // optimisation for a, b = c, d
+ mp_parse_node_struct_t *pns10 = (mp_parse_node_struct_t*)pns->nodes[1];
+ mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t*)pns->nodes[0];
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[0], PN_star_expr)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[1], PN_star_expr)) {
+ // can't optimise when it's a star expression on the lhs
+ goto no_optimisation;
+ }
+ compile_node(comp, pns10->nodes[0]); // rhs
+ compile_node(comp, pns10->nodes[1]); // rhs
+ EMIT(rot_two);
+ c_assign(comp, pns0->nodes[0], ASSIGN_STORE); // lhs store
+ c_assign(comp, pns0->nodes[1], ASSIGN_STORE); // lhs store
+ } else if (MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_testlist_star_expr)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_star_expr)
+ && MP_PARSE_NODE_STRUCT_NUM_NODES((mp_parse_node_struct_t*)pns->nodes[1]) == 3
+ && MP_PARSE_NODE_STRUCT_NUM_NODES((mp_parse_node_struct_t*)pns->nodes[0]) == 3) {
+ // optimisation for a, b, c = d, e, f
+ mp_parse_node_struct_t *pns10 = (mp_parse_node_struct_t*)pns->nodes[1];
+ mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t*)pns->nodes[0];
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[0], PN_star_expr)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[1], PN_star_expr)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[2], PN_star_expr)) {
+ // can't optimise when it's a star expression on the lhs
+ goto no_optimisation;
+ }
+ compile_node(comp, pns10->nodes[0]); // rhs
+ compile_node(comp, pns10->nodes[1]); // rhs
+ compile_node(comp, pns10->nodes[2]); // rhs
+ EMIT(rot_three);
+ EMIT(rot_two);
+ c_assign(comp, pns0->nodes[0], ASSIGN_STORE); // lhs store
+ c_assign(comp, pns0->nodes[1], ASSIGN_STORE); // lhs store
+ c_assign(comp, pns0->nodes[2], ASSIGN_STORE); // lhs store
+ } else {
+ no_optimisation:
+ compile_node(comp, pns->nodes[1]); // rhs
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // lhs store
+ }
+ }
+ } else {
+ goto plain_assign;
+ }
+}
+
+STATIC void c_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns, mp_binary_op_t binary_op) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ for (int i = 1; i < num_nodes; i += 1) {
+ compile_node(comp, pns->nodes[i]);
+ EMIT_ARG(binary_op, binary_op);
+ }
+}
+
+STATIC void compile_test_if_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_test_if_else));
+ mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t*)pns->nodes[1];
+
+ uint l_fail = comp_next_label(comp);
+ uint l_end = comp_next_label(comp);
+ c_if_cond(comp, pns_test_if_else->nodes[0], false, l_fail); // condition
+ compile_node(comp, pns->nodes[0]); // success value
+ EMIT_ARG(jump, l_end);
+ EMIT_ARG(label_assign, l_fail);
+ EMIT_ARG(adjust_stack_size, -1); // adjust stack size
+ compile_node(comp, pns_test_if_else->nodes[1]); // failure value
+ EMIT_ARG(label_assign, l_end);
+}
+
+STATIC void compile_lambdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this lambda
+ scope_t *s = scope_new_and_link(comp, SCOPE_LAMBDA, (mp_parse_node_t)pns, comp->scope_cur->emit_options);
+ // store the lambda scope so the compiling function (this one) can use it at each pass
+ pns->nodes[2] = (mp_parse_node_t)s;
+ }
+
+ // get the scope for this lambda
+ scope_t *this_scope = (scope_t*)pns->nodes[2];
+
+ // compile the lambda definition
+ compile_funcdef_lambdef(comp, this_scope, pns->nodes[0], PN_varargslist);
+}
+
+STATIC void compile_or_and_test(compiler_t *comp, mp_parse_node_struct_t *pns, bool cond) {
+ uint l_end = comp_next_label(comp);
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < n; i += 1) {
+ compile_node(comp, pns->nodes[i]);
+ if (i + 1 < n) {
+ EMIT_ARG(jump_if_or_pop, cond, l_end);
+ }
+ }
+ EMIT_ARG(label_assign, l_end);
+}
+
+STATIC void compile_or_test(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_or_and_test(comp, pns, true);
+}
+
+STATIC void compile_and_test(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_or_and_test(comp, pns, false);
+}
+
+STATIC void compile_not_test_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(unary_op, MP_UNARY_OP_NOT);
+}
+
+STATIC void compile_comparison(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ bool multi = (num_nodes > 3);
+ uint l_fail = 0;
+ if (multi) {
+ l_fail = comp_next_label(comp);
+ }
+ for (int i = 1; i + 1 < num_nodes; i += 2) {
+ compile_node(comp, pns->nodes[i + 1]);
+ if (i + 2 < num_nodes) {
+ EMIT(dup_top);
+ EMIT(rot_three);
+ }
+ if (MP_PARSE_NODE_IS_TOKEN(pns->nodes[i])) {
+ mp_binary_op_t op;
+ switch (MP_PARSE_NODE_LEAF_ARG(pns->nodes[i])) {
+ case MP_TOKEN_OP_LESS: op = MP_BINARY_OP_LESS; break;
+ case MP_TOKEN_OP_MORE: op = MP_BINARY_OP_MORE; break;
+ case MP_TOKEN_OP_DBL_EQUAL: op = MP_BINARY_OP_EQUAL; break;
+ case MP_TOKEN_OP_LESS_EQUAL: op = MP_BINARY_OP_LESS_EQUAL; break;
+ case MP_TOKEN_OP_MORE_EQUAL: op = MP_BINARY_OP_MORE_EQUAL; break;
+ case MP_TOKEN_OP_NOT_EQUAL: op = MP_BINARY_OP_NOT_EQUAL; break;
+ case MP_TOKEN_KW_IN: default: op = MP_BINARY_OP_IN; break;
+ }
+ EMIT_ARG(binary_op, op);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[i])); // should be
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)pns->nodes[i];
+ int kind = MP_PARSE_NODE_STRUCT_KIND(pns2);
+ if (kind == PN_comp_op_not_in) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_NOT_IN);
+ } else {
+ assert(kind == PN_comp_op_is); // should be
+ if (MP_PARSE_NODE_IS_NULL(pns2->nodes[0])) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_IS);
+ } else {
+ EMIT_ARG(binary_op, MP_BINARY_OP_IS_NOT);
+ }
+ }
+ }
+ if (i + 2 < num_nodes) {
+ EMIT_ARG(jump_if_or_pop, false, l_fail);
+ }
+ }
+ if (multi) {
+ uint l_end = comp_next_label(comp);
+ EMIT_ARG(jump, l_end);
+ EMIT_ARG(label_assign, l_fail);
+ EMIT_ARG(adjust_stack_size, 1);
+ EMIT(rot_two);
+ EMIT(pop_top);
+ EMIT_ARG(label_assign, l_end);
+ }
+}
+
+STATIC void compile_star_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "*x must be assignment target");
+}
+
+STATIC void compile_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ c_binary_op(comp, pns, MP_BINARY_OP_OR);
+}
+
+STATIC void compile_xor_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ c_binary_op(comp, pns, MP_BINARY_OP_XOR);
+}
+
+STATIC void compile_and_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ c_binary_op(comp, pns, MP_BINARY_OP_AND);
+}
+
+STATIC void compile_shift_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ for (int i = 1; i + 1 < num_nodes; i += 2) {
+ compile_node(comp, pns->nodes[i + 1]);
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_DBL_LESS)) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_LSHIFT);
+ } else {
+ assert(MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_DBL_MORE)); // should be
+ EMIT_ARG(binary_op, MP_BINARY_OP_RSHIFT);
+ }
+ }
+}
+
+STATIC void compile_arith_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ for (int i = 1; i + 1 < num_nodes; i += 2) {
+ compile_node(comp, pns->nodes[i + 1]);
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_PLUS)) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_ADD);
+ } else {
+ assert(MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_MINUS)); // should be
+ EMIT_ARG(binary_op, MP_BINARY_OP_SUBTRACT);
+ }
+ }
+}
+
+STATIC void compile_term(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ for (int i = 1; i + 1 < num_nodes; i += 2) {
+ compile_node(comp, pns->nodes[i + 1]);
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_STAR)) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_MULTIPLY);
+ } else if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_DBL_SLASH)) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_FLOOR_DIVIDE);
+ } else if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_SLASH)) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_TRUE_DIVIDE);
+ } else {
+ assert(MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[i], MP_TOKEN_OP_PERCENT)); // should be
+ EMIT_ARG(binary_op, MP_BINARY_OP_MODULO);
+ }
+ }
+}
+
+STATIC void compile_factor_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[1]);
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[0], MP_TOKEN_OP_PLUS)) {
+ EMIT_ARG(unary_op, MP_UNARY_OP_POSITIVE);
+ } else if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[0], MP_TOKEN_OP_MINUS)) {
+ EMIT_ARG(unary_op, MP_UNARY_OP_NEGATIVE);
+ } else {
+ assert(MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[0], MP_TOKEN_OP_TILDE)); // should be
+ EMIT_ARG(unary_op, MP_UNARY_OP_INVERT);
+ }
+}
+
+STATIC void compile_power(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // this is to handle special super() call
+ comp->func_arg_is_super = MP_PARSE_NODE_IS_ID(pns->nodes[0]) && MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]) == MP_QSTR_super;
+
+ compile_generic_all_nodes(comp, pns);
+
+ if (!MP_PARSE_NODE_IS_NULL(pns->nodes[2])) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_POWER);
+ }
+}
+
+STATIC void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra) {
+ // function to call is on top of stack
+
+ // this is to handle special super() call
+ if (MP_PARSE_NODE_IS_NULL(pn_arglist) && comp->func_arg_is_super && comp->scope_cur->kind == SCOPE_FUNCTION) {
+ compile_load_id(comp, MP_QSTR___class__);
+ // look for first argument to function (assumes it's "self")
+ for (int i = 0; i < comp->scope_cur->id_info_len; i++) {
+ if (comp->scope_cur->id_info[i].flags & ID_FLAG_IS_PARAM) {
+ // first argument found; load it and call super
+ EMIT_LOAD_FAST(MP_QSTR_, comp->scope_cur->id_info[i].local_num);
+ EMIT_ARG(call_function, 2, 0, 0);
+ return;
+ }
+ }
+ compile_syntax_error(comp, MP_PARSE_NODE_NULL, "super() call cannot find self"); // really a TypeError
+ return;
+ }
+
+ // get the list of arguments
+ mp_parse_node_t *args;
+ int n_args = mp_parse_node_extract_list(&pn_arglist, PN_arglist, &args);
+
+ // compile the arguments
+ // Rather than calling compile_node on the list, we go through the list of args
+ // explicitly here so that we can count the number of arguments and give sensible
+ // error messages.
+ int n_positional = n_positional_extra;
+ uint n_keyword = 0;
+ uint star_flags = 0;
+ mp_parse_node_struct_t *star_args_node = NULL, *dblstar_args_node = NULL;
+ for (int i = 0; i < n_args; i++) {
+ if (MP_PARSE_NODE_IS_STRUCT(args[i])) {
+ mp_parse_node_struct_t *pns_arg = (mp_parse_node_struct_t*)args[i];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_arglist_star) {
+ if (star_flags & MP_EMIT_STAR_FLAG_SINGLE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_arg, "can't have multiple *x");
+ return;
+ }
+ star_flags |= MP_EMIT_STAR_FLAG_SINGLE;
+ star_args_node = pns_arg;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_arglist_dbl_star) {
+ if (star_flags & MP_EMIT_STAR_FLAG_DOUBLE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_arg, "can't have multiple **x");
+ return;
+ }
+ star_flags |= MP_EMIT_STAR_FLAG_DOUBLE;
+ dblstar_args_node = pns_arg;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_argument) {
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns_arg->nodes[1], PN_comp_for)) {
+ if (!MP_PARSE_NODE_IS_ID(pns_arg->nodes[0])) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_arg, "LHS of keyword arg must be an id");
+ return;
+ }
+ EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pns_arg->nodes[0]));
+ compile_node(comp, pns_arg->nodes[1]);
+ n_keyword += 1;
+ } else {
+ compile_comprehension(comp, pns_arg, SCOPE_GEN_EXPR);
+ n_positional++;
+ }
+ } else {
+ goto normal_argument;
+ }
+ } else {
+ normal_argument:
+ if (n_keyword > 0) {
+ compile_syntax_error(comp, args[i], "non-keyword arg after keyword arg");
+ return;
+ }
+ compile_node(comp, args[i]);
+ n_positional++;
+ }
+ }
+
+ // compile the star/double-star arguments if we had them
+ // if we had one but not the other then we load "null" as a place holder
+ if (star_flags != 0) {
+ if (star_args_node == NULL) {
+ EMIT(load_null);
+ } else {
+ compile_node(comp, star_args_node->nodes[0]);
+ }
+ if (dblstar_args_node == NULL) {
+ EMIT(load_null);
+ } else {
+ compile_node(comp, dblstar_args_node->nodes[0]);
+ }
+ }
+
+ // emit the function/method call
+ if (is_method_call) {
+ EMIT_ARG(call_method, n_positional, n_keyword, star_flags);
+ } else {
+ EMIT_ARG(call_function, n_positional, n_keyword, star_flags);
+ }
+}
+
+STATIC void compile_power_trailers(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ if (i + 1 < num_nodes && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[i], PN_trailer_period) && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[i + 1], PN_trailer_paren)) {
+ // optimisation for method calls a.f(...), following PyPy
+ mp_parse_node_struct_t *pns_period = (mp_parse_node_struct_t*)pns->nodes[i];
+ mp_parse_node_struct_t *pns_paren = (mp_parse_node_struct_t*)pns->nodes[i + 1];
+ EMIT_ARG(load_method, MP_PARSE_NODE_LEAF_ARG(pns_period->nodes[0])); // get the method
+ compile_trailer_paren_helper(comp, pns_paren->nodes[0], true, 0);
+ i += 1;
+ } else {
+ compile_node(comp, pns->nodes[i]);
+ }
+ comp->func_arg_is_super = false;
+ }
+}
+
+STATIC void compile_atom_string(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // a list of strings
+
+ // check type of list (string or bytes) and count total number of bytes
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ size_t n_bytes = 0;
+ int string_kind = MP_PARSE_NODE_NULL;
+ for (int i = 0; i < n; i++) {
+ int pn_kind;
+ if (MP_PARSE_NODE_IS_LEAF(pns->nodes[i])) {
+ pn_kind = MP_PARSE_NODE_LEAF_KIND(pns->nodes[i]);
+ assert(pn_kind == MP_PARSE_NODE_STRING || pn_kind == MP_PARSE_NODE_BYTES);
+ n_bytes += qstr_len(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]));
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[i]));
+ mp_parse_node_struct_t *pns_string = (mp_parse_node_struct_t*)pns->nodes[i];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns_string) == PN_string) {
+ pn_kind = MP_PARSE_NODE_STRING;
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns_string) == PN_bytes);
+ pn_kind = MP_PARSE_NODE_BYTES;
+ }
+ n_bytes += pns_string->nodes[1];
+ }
+ if (i == 0) {
+ string_kind = pn_kind;
+ } else if (pn_kind != string_kind) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "cannot mix bytes and nonbytes literals");
+ return;
+ }
+ }
+
+ // if we are not in the last pass, just load a dummy object
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ return;
+ }
+
+ // concatenate string/bytes
+ vstr_t vstr;
+ vstr_init_len(&vstr, n_bytes);
+ byte *s_dest = (byte*)vstr.buf;
+ for (int i = 0; i < n; i++) {
+ if (MP_PARSE_NODE_IS_LEAF(pns->nodes[i])) {
+ size_t s_len;
+ const byte *s = qstr_data(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), &s_len);
+ memcpy(s_dest, s, s_len);
+ s_dest += s_len;
+ } else {
+ mp_parse_node_struct_t *pns_string = (mp_parse_node_struct_t*)pns->nodes[i];
+ memcpy(s_dest, (const char*)pns_string->nodes[0], pns_string->nodes[1]);
+ s_dest += pns_string->nodes[1];
+ }
+ }
+
+ // load the object
+ EMIT_ARG(load_const_obj, mp_obj_new_str_from_vstr(string_kind == MP_PARSE_NODE_STRING ? &mp_type_str : &mp_type_bytes, &vstr));
+}
+
+// pns needs to have 2 nodes, first is lhs of comprehension, second is PN_comp_for node
+STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind) {
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
+ mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t*)pns->nodes[1];
+
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this comprehension
+ scope_t *s = scope_new_and_link(comp, kind, (mp_parse_node_t)pns, comp->scope_cur->emit_options);
+ // store the comprehension scope so the compiling function (this one) can use it at each pass
+ pns_comp_for->nodes[3] = (mp_parse_node_t)s;
+ }
+
+ // get the scope for this comprehension
+ scope_t *this_scope = (scope_t*)pns_comp_for->nodes[3];
+
+ // compile the comprehension
+ close_over_variables_etc(comp, this_scope, 0, 0);
+
+ compile_node(comp, pns_comp_for->nodes[1]); // source of the iterator
+ EMIT(get_iter);
+ EMIT_ARG(call_function, 1, 0, 0);
+}
+
+STATIC void compile_atom_paren(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // an empty tuple
+ c_tuple(comp, MP_PARSE_NODE_NULL, NULL);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ assert(!MP_PARSE_NODE_IS_NULL(pns->nodes[1]));
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_testlist_comp_3b) {
+ // tuple of one item, with trailing comma
+ assert(MP_PARSE_NODE_IS_NULL(pns2->nodes[0]));
+ c_tuple(comp, pns->nodes[0], NULL);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_testlist_comp_3c) {
+ // tuple of many items
+ c_tuple(comp, pns->nodes[0], pns2);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_comp_for) {
+ // generator expression
+ compile_comprehension(comp, pns, SCOPE_GEN_EXPR);
+ } else {
+ // tuple with 2 items
+ goto tuple_with_2_items;
+ }
+ } else {
+ // tuple with 2 items
+ tuple_with_2_items:
+ c_tuple(comp, MP_PARSE_NODE_NULL, pns);
+ }
+ }
+}
+
+STATIC void compile_atom_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty list
+ EMIT_ARG(build_list, 0);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)pns->nodes[0];
+ if (MP_PARSE_NODE_IS_STRUCT(pns2->nodes[1])) {
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t*)pns2->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_testlist_comp_3b) {
+ // list of one item, with trailing comma
+ assert(MP_PARSE_NODE_IS_NULL(pns3->nodes[0]));
+ compile_node(comp, pns2->nodes[0]);
+ EMIT_ARG(build_list, 1);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_testlist_comp_3c) {
+ // list of many items
+ compile_node(comp, pns2->nodes[0]);
+ compile_generic_all_nodes(comp, pns3);
+ EMIT_ARG(build_list, 1 + MP_PARSE_NODE_STRUCT_NUM_NODES(pns3));
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_comp_for) {
+ // list comprehension
+ compile_comprehension(comp, pns2, SCOPE_LIST_COMP);
+ } else {
+ // list with 2 items
+ goto list_with_2_items;
+ }
+ } else {
+ // list with 2 items
+ list_with_2_items:
+ compile_node(comp, pns2->nodes[0]);
+ compile_node(comp, pns2->nodes[1]);
+ EMIT_ARG(build_list, 2);
+ }
+ } else {
+ // list with 1 item
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(build_list, 1);
+ }
+}
+
+STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ mp_parse_node_t pn = pns->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // empty dict
+ EMIT_ARG(build_map, 0);
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker_item) {
+ // dict with one element
+ EMIT_ARG(build_map, 1);
+ compile_node(comp, pn);
+ EMIT(store_map);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
+ // dict/set with multiple elements
+
+ // get tail elements (2nd, 3rd, ...)
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns1->nodes[0], PN_dictorsetmaker_list2, &nodes);
+
+ // first element sets whether it's a dict or set
+ bool is_dict;
+ if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) {
+ // a dictionary
+ EMIT_ARG(build_map, 1 + n);
+ compile_node(comp, pns->nodes[0]);
+ EMIT(store_map);
+ is_dict = true;
+ } else {
+ // a set
+ compile_node(comp, pns->nodes[0]); // 1st value of set
+ is_dict = false;
+ }
+
+ // process rest of elements
+ for (int i = 0; i < n; i++) {
+ mp_parse_node_t pn_i = nodes[i];
+ bool is_key_value = MP_PARSE_NODE_IS_STRUCT_KIND(pn_i, PN_dictorsetmaker_item);
+ compile_node(comp, pn_i);
+ if (is_dict) {
+ if (!is_key_value) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "expecting key:value for dictionary");
+ return;
+ }
+ EMIT(store_map);
+ } else {
+ if (is_key_value) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "expecting just a value for set");
+ return;
+ }
+ }
+ }
+
+ #if MICROPY_PY_BUILTINS_SET
+ // if it's a set, build it
+ if (!is_dict) {
+ EMIT_ARG(build_set, 1 + n);
+ }
+ #endif
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_comp_for); // should be
+ // dict/set comprehension
+ if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) {
+ // a dictionary comprehension
+ compile_comprehension(comp, pns, SCOPE_DICT_COMP);
+ } else {
+ // a set comprehension
+ compile_comprehension(comp, pns, SCOPE_SET_COMP);
+ }
+ }
+ } else {
+ // set with one element
+ goto set_with_one_element;
+ }
+ } else {
+ // set with one element
+ set_with_one_element:
+ #if MICROPY_PY_BUILTINS_SET
+ compile_node(comp, pn);
+ EMIT_ARG(build_set, 1);
+ #else
+ assert(0);
+ #endif
+ }
+}
+
+STATIC void compile_trailer_paren(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_trailer_paren_helper(comp, pns->nodes[0], false, 0);
+}
+
+STATIC void compile_trailer_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // object who's index we want is on top of stack
+ compile_node(comp, pns->nodes[0]); // the index
+ EMIT(load_subscr);
+}
+
+STATIC void compile_trailer_period(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // object who's attribute we want is on top of stack
+ EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0])); // attribute to get
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+STATIC void compile_subscript_3_helper(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3); // should always be
+ mp_parse_node_t pn = pns->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // [?:]
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(build_slice, 2);
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3c) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ pn = pns->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // [?::]
+ EMIT_ARG(build_slice, 2);
+ } else {
+ // [?::x]
+ compile_node(comp, pn);
+ EMIT_ARG(build_slice, 3);
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3d) {
+ compile_node(comp, pns->nodes[0]);
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be
+ pns = (mp_parse_node_struct_t*)pns->nodes[1];
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_sliceop); // should always be
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // [?:x:]
+ EMIT_ARG(build_slice, 2);
+ } else {
+ // [?:x:x]
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(build_slice, 3);
+ }
+ } else {
+ // [?:x]
+ compile_node(comp, pn);
+ EMIT_ARG(build_slice, 2);
+ }
+ } else {
+ // [?:x]
+ compile_node(comp, pn);
+ EMIT_ARG(build_slice, 2);
+ }
+}
+
+STATIC void compile_subscript_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[0]); // start of slice
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be
+ compile_subscript_3_helper(comp, (mp_parse_node_struct_t*)pns->nodes[1]);
+}
+
+STATIC void compile_subscript_3(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ compile_subscript_3_helper(comp, pns);
+}
+#endif // MICROPY_PY_BUILTINS_SLICE
+
+STATIC void compile_dictorsetmaker_item(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // if this is called then we are compiling a dict key:value pair
+ compile_node(comp, pns->nodes[1]); // value
+ compile_node(comp, pns->nodes[0]); // key
+}
+
+STATIC void compile_classdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ qstr cname = compile_classdef_helper(comp, pns, comp->scope_cur->emit_options);
+ // store class object into class name
+ compile_store_id(comp, cname);
+}
+
+STATIC void compile_yield_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->scope_cur->kind != SCOPE_FUNCTION && comp->scope_cur->kind != SCOPE_LAMBDA) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, "'yield' outside function");
+ return;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(yield_value);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_yield_arg_from)) {
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ compile_node(comp, pns->nodes[0]);
+ EMIT(get_iter);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(yield_from);
+ } else {
+ compile_node(comp, pns->nodes[0]);
+ EMIT(yield_value);
+ }
+}
+
+STATIC void compile_string(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // only create and load the actual str object on the last pass
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ } else {
+ EMIT_ARG(load_const_obj, mp_obj_new_str((const char*)pns->nodes[0], pns->nodes[1], false));
+ }
+}
+
+STATIC void compile_bytes(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // only create and load the actual bytes object on the last pass
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ } else {
+ EMIT_ARG(load_const_obj, mp_obj_new_bytes((const byte*)pns->nodes[0], pns->nodes[1]));
+ }
+}
+
+STATIC void compile_const_object(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // nodes are 32-bit pointers, but need to extract 64-bit object
+ EMIT_ARG(load_const_obj, (uint64_t)pns->nodes[0] | ((uint64_t)pns->nodes[1] << 32));
+ #else
+ EMIT_ARG(load_const_obj, (mp_obj_t)pns->nodes[0]);
+ #endif
+}
+
+typedef void (*compile_function_t)(compiler_t*, mp_parse_node_struct_t*);
+STATIC compile_function_t compile_function[] = {
+#define nc NULL
+#define c(f) compile_##f
+#define DEF_RULE(rule, comp, kind, ...) comp,
+#include "py/grammar.h"
+#undef nc
+#undef c
+#undef DEF_RULE
+ NULL,
+ compile_string,
+ compile_bytes,
+ compile_const_object,
+};
+
+STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // pass
+ } else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+ mp_int_t arg = MP_PARSE_NODE_LEAF_SMALL_INT(pn);
+ #if MICROPY_DYNAMIC_COMPILER
+ mp_uint_t sign_mask = -(1 << (mp_dynamic_compiler.small_int_bits - 1));
+ if ((arg & sign_mask) == 0 || (arg & sign_mask) == sign_mask) {
+ // integer fits in target runtime's small-int
+ EMIT_ARG(load_const_small_int, arg);
+ } else {
+ // integer doesn't fit, so create a multi-precision int object
+ // (but only create the actual object on the last pass)
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ } else {
+ EMIT_ARG(load_const_obj, mp_obj_new_int_from_ll(arg));
+ }
+ }
+ #else
+ EMIT_ARG(load_const_small_int, arg);
+ #endif
+ } else if (MP_PARSE_NODE_IS_LEAF(pn)) {
+ uintptr_t arg = MP_PARSE_NODE_LEAF_ARG(pn);
+ switch (MP_PARSE_NODE_LEAF_KIND(pn)) {
+ case MP_PARSE_NODE_ID: compile_load_id(comp, arg); break;
+ case MP_PARSE_NODE_STRING: EMIT_ARG(load_const_str, arg); break;
+ case MP_PARSE_NODE_BYTES:
+ // only create and load the actual bytes object on the last pass
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ } else {
+ size_t len;
+ const byte *data = qstr_data(arg, &len);
+ EMIT_ARG(load_const_obj, mp_obj_new_bytes(data, len));
+ }
+ break;
+ case MP_PARSE_NODE_TOKEN: default:
+ if (arg == MP_TOKEN_NEWLINE) {
+ // this can occur when file_input lets through a NEWLINE (eg if file starts with a newline)
+ // or when single_input lets through a NEWLINE (user enters a blank line)
+ // do nothing
+ } else {
+ EMIT_ARG(load_const_tok, arg);
+ }
+ break;
+ }
+ } else {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ EMIT_ARG(set_source_line, pns->source_line);
+ compile_function_t f = compile_function[MP_PARSE_NODE_STRUCT_KIND(pns)];
+ if (f == NULL) {
+#if MICROPY_DEBUG_PRINTERS
+ printf("node %u cannot be compiled\n", (uint)MP_PARSE_NODE_STRUCT_KIND(pns));
+ mp_parse_node_print(pn, 0);
+#endif
+ compile_syntax_error(comp, pn, "internal compiler error");
+ } else {
+ f(comp, pns);
+ }
+ }
+}
+
+STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_name, pn_kind_t pn_star, pn_kind_t pn_dbl_star) {
+ // check that **kw is last
+ if ((comp->scope_cur->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+ compile_syntax_error(comp, pn, "invalid syntax");
+ return;
+ }
+
+ qstr param_name = MP_QSTR_NULL;
+ uint param_flag = ID_FLAG_IS_PARAM;
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ param_name = MP_PARSE_NODE_LEAF_ARG(pn);
+ if (comp->have_star) {
+ // comes after a star, so counts as a keyword-only parameter
+ comp->scope_cur->num_kwonly_args += 1;
+ } else {
+ // comes before a star, so counts as a positional parameter
+ comp->scope_cur->num_pos_args += 1;
+ }
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT(pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_name) {
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ if (comp->have_star) {
+ // comes after a star, so counts as a keyword-only parameter
+ comp->scope_cur->num_kwonly_args += 1;
+ } else {
+ // comes before a star, so counts as a positional parameter
+ comp->scope_cur->num_pos_args += 1;
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_star) {
+ if (comp->have_star) {
+ // more than one star
+ compile_syntax_error(comp, pn, "invalid syntax");
+ return;
+ }
+ comp->have_star = true;
+ param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_STAR_PARAM;
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // bare star
+ // TODO see http://www.python.org/dev/peps/pep-3102/
+ //assert(comp->scope_cur->num_dict_params == 0);
+ } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0])) {
+ // named star
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)); // should be
+ // named star with possible annotation
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ }
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == pn_dbl_star); // should be
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_DBL_STAR_PARAM;
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARKEYWORDS;
+ }
+ }
+
+ if (param_name != MP_QSTR_NULL) {
+ bool added;
+ id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, param_name, &added);
+ if (!added) {
+ compile_syntax_error(comp, pn, "name reused for argument");
+ return;
+ }
+ id_info->kind = ID_INFO_KIND_LOCAL;
+ id_info->flags = param_flag;
+ }
+}
+
+STATIC void compile_scope_func_param(compiler_t *comp, mp_parse_node_t pn) {
+ compile_scope_func_lambda_param(comp, pn, PN_typedargslist_name, PN_typedargslist_star, PN_typedargslist_dbl_star);
+}
+
+STATIC void compile_scope_lambda_param(compiler_t *comp, mp_parse_node_t pn) {
+ compile_scope_func_lambda_param(comp, pn, PN_varargslist_name, PN_varargslist_star, PN_varargslist_dbl_star);
+}
+
+#if MICROPY_EMIT_NATIVE
+STATIC void compile_scope_func_annotations(compiler_t *comp, mp_parse_node_t pn) {
+ if (!MP_PARSE_NODE_IS_STRUCT(pn)) {
+ // no annotation
+ return;
+ }
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_name) {
+ // named parameter with possible annotation
+ // fallthrough
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_star) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)) {
+ // named star with possible annotation
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ // fallthrough
+ } else {
+ // no annotation
+ return;
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_dbl_star) {
+ // double star with possible annotation
+ // fallthrough
+ } else {
+ // no annotation
+ return;
+ }
+
+ mp_parse_node_t pn_annotation = pns->nodes[1];
+
+ if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ qstr param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ id_info_t *id_info = scope_find(comp->scope_cur, param_name);
+ assert(id_info != NULL);
+
+ if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr arg_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ARG, id_info->local_num, arg_type);
+ } else {
+ compile_syntax_error(comp, pn_annotation, "parameter annotation must be an identifier");
+ }
+ }
+}
+#endif // MICROPY_EMIT_NATIVE
+
+STATIC void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_struct_t *pns_comp_for, mp_parse_node_t pn_inner_expr, int for_depth) {
+ uint l_top = comp_next_label(comp);
+ uint l_end = comp_next_label(comp);
+ EMIT_ARG(label_assign, l_top);
+ EMIT_ARG(for_iter, l_end);
+ c_assign(comp, pns_comp_for->nodes[0], ASSIGN_STORE);
+ mp_parse_node_t pn_iter = pns_comp_for->nodes[2];
+
+ tail_recursion:
+ if (MP_PARSE_NODE_IS_NULL(pn_iter)) {
+ // no more nested if/for; compile inner expression
+ compile_node(comp, pn_inner_expr);
+ if (comp->scope_cur->kind == SCOPE_LIST_COMP) {
+ EMIT_ARG(list_append, for_depth + 2);
+ } else if (comp->scope_cur->kind == SCOPE_DICT_COMP) {
+ EMIT_ARG(map_add, for_depth + 2);
+ #if MICROPY_PY_BUILTINS_SET
+ } else if (comp->scope_cur->kind == SCOPE_SET_COMP) {
+ EMIT_ARG(set_add, for_depth + 2);
+ #endif
+ } else {
+ EMIT(yield_value);
+ EMIT(pop_top);
+ }
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn_iter, PN_comp_if)) {
+ // if condition
+ mp_parse_node_struct_t *pns_comp_if = (mp_parse_node_struct_t*)pn_iter;
+ c_if_cond(comp, pns_comp_if->nodes[0], false, l_top);
+ pn_iter = pns_comp_if->nodes[1];
+ goto tail_recursion;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_iter, PN_comp_for)); // should be
+ // for loop
+ mp_parse_node_struct_t *pns_comp_for2 = (mp_parse_node_struct_t*)pn_iter;
+ compile_node(comp, pns_comp_for2->nodes[1]);
+ EMIT(get_iter);
+ compile_scope_comp_iter(comp, pns_comp_for2, pn_inner_expr, for_depth + 1);
+ }
+
+ EMIT_ARG(jump, l_top);
+ EMIT_ARG(label_assign, l_end);
+ EMIT(for_iter_end);
+}
+
+STATIC void check_for_doc_string(compiler_t *comp, mp_parse_node_t pn) {
+#if MICROPY_ENABLE_DOC_STRING
+ // see http://www.python.org/dev/peps/pep-0257/
+
+ // look for the first statement
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_expr_stmt)) {
+ // a statement; fall through
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_file_input_2)) {
+ // file input; find the first non-newline node
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ pn = pns->nodes[i];
+ if (!(MP_PARSE_NODE_IS_LEAF(pn) && MP_PARSE_NODE_LEAF_KIND(pn) == MP_PARSE_NODE_TOKEN && MP_PARSE_NODE_LEAF_ARG(pn) == MP_TOKEN_NEWLINE)) {
+ // not a newline, so this is the first statement; finish search
+ break;
+ }
+ }
+ // if we didn't find a non-newline then it's okay to fall through; pn will be a newline and so doc-string test below will fail gracefully
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_suite_block_stmts)) {
+ // a list of statements; get the first one
+ pn = ((mp_parse_node_struct_t*)pn)->nodes[0];
+ } else {
+ return;
+ }
+
+ // check the first statement for a doc string
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_expr_stmt)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0])
+ && MP_PARSE_NODE_LEAF_KIND(pns->nodes[0]) == MP_PARSE_NODE_STRING)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_string)) {
+ // compile the doc string
+ compile_node(comp, pns->nodes[0]);
+ // store the doc string
+ compile_store_id(comp, MP_QSTR___doc__);
+ }
+ }
+#else
+ (void)comp;
+ (void)pn;
+#endif
+}
+
+STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
+ comp->pass = pass;
+ comp->scope_cur = scope;
+ comp->next_label = 1;
+ EMIT_ARG(start_pass, pass, scope);
+
+ if (comp->pass == MP_PASS_SCOPE) {
+ // reset maximum stack sizes in scope
+ // they will be computed in this first pass
+ scope->stack_size = 0;
+ scope->exc_stack_size = 0;
+ }
+
+ // compile
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(scope->pn, PN_eval_input)) {
+ assert(scope->kind == SCOPE_MODULE);
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)scope->pn;
+ compile_node(comp, pns->nodes[0]); // compile the expression
+ EMIT(return_value);
+ } else if (scope->kind == SCOPE_MODULE) {
+ if (!comp->is_repl) {
+ check_for_doc_string(comp, scope->pn);
+ }
+ compile_node(comp, scope->pn);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(return_value);
+ } else if (scope->kind == SCOPE_FUNCTION) {
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
+
+ // work out number of parameters, keywords and default parameters, and add them to the id_info array
+ // must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
+ if (comp->pass == MP_PASS_SCOPE) {
+ comp->have_star = false;
+ apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param);
+ }
+ #if MICROPY_EMIT_NATIVE
+ else if (scope->emit_options == MP_EMIT_OPT_VIPER) {
+ // compile annotations; only needed on latter compiler passes
+ // only needed for viper emitter
+
+ // argument annotations
+ apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_annotations);
+
+ // pns->nodes[2] is return/whole function annotation
+ mp_parse_node_t pn_annotation = pns->nodes[2];
+ if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ // nodes[2] can be null or a test-expr
+ if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr ret_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_RETURN, 0, ret_type);
+ } else {
+ compile_syntax_error(comp, pn_annotation, "return annotation must be an identifier");
+ }
+ }
+ }
+ #endif // MICROPY_EMIT_NATIVE
+
+ compile_node(comp, pns->nodes[3]); // 3 is function body
+ // emit return if it wasn't the last opcode
+ if (!EMIT(last_emit_was_return_value)) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(return_value);
+ }
+ } else if (scope->kind == SCOPE_LAMBDA) {
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 3);
+
+ // work out number of parameters, keywords and default parameters, and add them to the id_info array
+ // must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
+ if (comp->pass == MP_PASS_SCOPE) {
+ comp->have_star = false;
+ apply_to_single_or_list(comp, pns->nodes[0], PN_varargslist, compile_scope_lambda_param);
+ }
+
+ compile_node(comp, pns->nodes[1]); // 1 is lambda body
+
+ // if the lambda is a generator, then we return None, not the result of the expression of the lambda
+ if (scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ EMIT(pop_top);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ }
+ EMIT(return_value);
+ } else if (scope->kind == SCOPE_LIST_COMP || scope->kind == SCOPE_DICT_COMP || scope->kind == SCOPE_SET_COMP || scope->kind == SCOPE_GEN_EXPR) {
+ // a bit of a hack at the moment
+
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
+ mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t*)pns->nodes[1];
+
+ // We need a unique name for the comprehension argument (the iterator).
+ // CPython uses .0, but we should be able to use anything that won't
+ // clash with a user defined variable. Best to use an existing qstr,
+ // so we use the blank qstr.
+ qstr qstr_arg = MP_QSTR_;
+ if (comp->pass == MP_PASS_SCOPE) {
+ bool added;
+ id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qstr_arg, &added);
+ assert(added);
+ id_info->kind = ID_INFO_KIND_LOCAL;
+ scope->num_pos_args = 1;
+ }
+
+ if (scope->kind == SCOPE_LIST_COMP) {
+ EMIT_ARG(build_list, 0);
+ } else if (scope->kind == SCOPE_DICT_COMP) {
+ EMIT_ARG(build_map, 0);
+ #if MICROPY_PY_BUILTINS_SET
+ } else if (scope->kind == SCOPE_SET_COMP) {
+ EMIT_ARG(build_set, 0);
+ #endif
+ }
+
+ compile_load_id(comp, qstr_arg);
+ compile_scope_comp_iter(comp, pns_comp_for, pns->nodes[0], 0);
+
+ if (scope->kind == SCOPE_GEN_EXPR) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ }
+ EMIT(return_value);
+ } else {
+ assert(scope->kind == SCOPE_CLASS);
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_classdef);
+
+ if (comp->pass == MP_PASS_SCOPE) {
+ bool added;
+ id_info_t *id_info = scope_find_or_add_id(scope, MP_QSTR___class__, &added);
+ assert(added);
+ id_info->kind = ID_INFO_KIND_LOCAL;
+ }
+
+ compile_load_id(comp, MP_QSTR___name__);
+ compile_store_id(comp, MP_QSTR___module__);
+ EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0])); // 0 is class name
+ compile_store_id(comp, MP_QSTR___qualname__);
+
+ check_for_doc_string(comp, pns->nodes[2]);
+ compile_node(comp, pns->nodes[2]); // 2 is class body
+
+ id_info_t *id = scope_find(scope, MP_QSTR___class__);
+ assert(id != NULL);
+ if (id->kind == ID_INFO_KIND_LOCAL) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ } else {
+ EMIT_LOAD_FAST(MP_QSTR___class__, id->local_num);
+ }
+ EMIT(return_value);
+ }
+
+ EMIT(end_pass);
+
+ // make sure we match all the exception levels
+ assert(comp->cur_except_level == 0);
+}
+
+#if MICROPY_EMIT_INLINE_THUMB
+// requires 3 passes: SCOPE, CODE_SIZE, EMIT
+STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
+ comp->pass = pass;
+ comp->scope_cur = scope;
+ comp->next_label = 1;
+
+ if (scope->kind != SCOPE_FUNCTION) {
+ compile_syntax_error(comp, MP_PARSE_NODE_NULL, "inline assembler must be a function");
+ return;
+ }
+
+ if (comp->pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(start_pass, comp->pass, comp->scope_cur, &comp->compile_error);
+ }
+
+ // get the function definition parse node
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
+
+ //qstr f_id = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); // function name
+
+ // parameters are in pns->nodes[1]
+ if (comp->pass == MP_PASS_CODE_SIZE) {
+ mp_parse_node_t *pn_params;
+ int n_params = mp_parse_node_extract_list(&pns->nodes[1], PN_typedargslist, &pn_params);
+ scope->num_pos_args = EMIT_INLINE_ASM_ARG(count_params, n_params, pn_params);
+ if (comp->compile_error != MP_OBJ_NULL) {
+ goto inline_asm_error;
+ }
+ }
+
+ // pns->nodes[2] is function return annotation
+ mp_uint_t type_sig = MP_NATIVE_TYPE_INT;
+ mp_parse_node_t pn_annotation = pns->nodes[2];
+ if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ // nodes[2] can be null or a test-expr
+ if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr ret_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ switch (ret_type) {
+ case MP_QSTR_object: type_sig = MP_NATIVE_TYPE_OBJ; break;
+ case MP_QSTR_bool: type_sig = MP_NATIVE_TYPE_BOOL; break;
+ case MP_QSTR_int: type_sig = MP_NATIVE_TYPE_INT; break;
+ case MP_QSTR_uint: type_sig = MP_NATIVE_TYPE_UINT; break;
+ default: compile_syntax_error(comp, pn_annotation, "unknown type"); return;
+ }
+ } else {
+ compile_syntax_error(comp, pn_annotation, "return annotation must be an identifier");
+ }
+ }
+
+ mp_parse_node_t pn_body = pns->nodes[3]; // body
+ mp_parse_node_t *nodes;
+ int num = mp_parse_node_extract_list(&pn_body, PN_suite_block_stmts, &nodes);
+
+ for (int i = 0; i < num; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT(nodes[i]));
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)nodes[i];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_pass_stmt) {
+ // no instructions
+ continue;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) != PN_expr_stmt) {
+ // not an instruction; error
+ not_an_instruction:
+ compile_syntax_error(comp, nodes[i], "expecting an assembler instruction");
+ return;
+ }
+
+ // check structure of parse node
+ assert(MP_PARSE_NODE_IS_STRUCT(pns2->nodes[0]));
+ if (!MP_PARSE_NODE_IS_NULL(pns2->nodes[1])) {
+ goto not_an_instruction;
+ }
+ pns2 = (mp_parse_node_struct_t*)pns2->nodes[0];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) != PN_power) {
+ goto not_an_instruction;
+ }
+ if (!MP_PARSE_NODE_IS_ID(pns2->nodes[0])) {
+ goto not_an_instruction;
+ }
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns2->nodes[1], PN_trailer_paren)) {
+ goto not_an_instruction;
+ }
+ assert(MP_PARSE_NODE_IS_NULL(pns2->nodes[2]));
+
+ // parse node looks like an instruction
+ // get instruction name and args
+ qstr op = MP_PARSE_NODE_LEAF_ARG(pns2->nodes[0]);
+ pns2 = (mp_parse_node_struct_t*)pns2->nodes[1]; // PN_trailer_paren
+ mp_parse_node_t *pn_arg;
+ int n_args = mp_parse_node_extract_list(&pns2->nodes[0], PN_arglist, &pn_arg);
+
+ // emit instructions
+ if (op == MP_QSTR_label) {
+ if (!(n_args == 1 && MP_PARSE_NODE_IS_ID(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], "'label' requires 1 argument");
+ return;
+ }
+ uint lab = comp_next_label(comp);
+ if (pass > MP_PASS_SCOPE) {
+ if (!EMIT_INLINE_ASM_ARG(label, lab, MP_PARSE_NODE_LEAF_ARG(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], "label redefined");
+ return;
+ }
+ }
+ } else if (op == MP_QSTR_align) {
+ if (!(n_args == 1 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], "'align' requires 1 argument");
+ return;
+ }
+ if (pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(align, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0]));
+ }
+ } else if (op == MP_QSTR_data) {
+ if (!(n_args >= 2 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], "'data' requires at least 2 arguments");
+ return;
+ }
+ if (pass > MP_PASS_SCOPE) {
+ mp_int_t bytesize = MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0]);
+ for (uint j = 1; j < n_args; j++) {
+ if (!MP_PARSE_NODE_IS_SMALL_INT(pn_arg[j])) {
+ compile_syntax_error(comp, nodes[i], "'data' requires integer arguments");
+ return;
+ }
+ EMIT_INLINE_ASM_ARG(data, bytesize, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[j]));
+ }
+ }
+ } else {
+ if (pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(op, op, n_args, pn_arg);
+ }
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ pns = pns2; // this is the parse node that had the error
+ goto inline_asm_error;
+ }
+ }
+
+ if (comp->pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(end_pass, type_sig);
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ // inline assembler had an error; set line for its exception
+ inline_asm_error:
+ comp->compile_error_line = pns->source_line;
+ }
+}
+#endif
+
+STATIC void scope_compute_things(scope_t *scope) {
+ // in Micro Python we put the *x parameter after all other parameters (except **y)
+ if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) {
+ id_info_t *id_param = NULL;
+ for (int i = scope->id_info_len - 1; i >= 0; i--) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->flags & ID_FLAG_IS_STAR_PARAM) {
+ if (id_param != NULL) {
+ // swap star param with last param
+ id_info_t temp = *id_param; *id_param = *id; *id = temp;
+ }
+ break;
+ } else if (id_param == NULL && id->flags == ID_FLAG_IS_PARAM) {
+ id_param = id;
+ }
+ }
+ }
+
+ // in functions, turn implicit globals into explicit globals
+ // compute the index of each local
+ scope->num_locals = 0;
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (scope->kind == SCOPE_CLASS && id->qst == MP_QSTR___class__) {
+ // __class__ is not counted as a local; if it's used then it becomes a ID_INFO_KIND_CELL
+ continue;
+ }
+ if (scope->kind >= SCOPE_FUNCTION && scope->kind <= SCOPE_GEN_EXPR && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ id->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+ }
+ // params always count for 1 local, even if they are a cell
+ if (id->kind == ID_INFO_KIND_LOCAL || (id->flags & ID_FLAG_IS_PARAM)) {
+ id->local_num = scope->num_locals++;
+ }
+ }
+
+ // compute the index of cell vars
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ // in Micro Python the cells come right after the fast locals
+ // parameters are not counted here, since they remain at the start
+ // of the locals, even if they are cell vars
+ if (id->kind == ID_INFO_KIND_CELL && !(id->flags & ID_FLAG_IS_PARAM)) {
+ id->local_num = scope->num_locals;
+ scope->num_locals += 1;
+ }
+ }
+
+ // compute the index of free vars
+ // make sure they are in the order of the parent scope
+ if (scope->parent != NULL) {
+ int num_free = 0;
+ for (int i = 0; i < scope->parent->id_info_len; i++) {
+ id_info_t *id = &scope->parent->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE) {
+ for (int j = 0; j < scope->id_info_len; j++) {
+ id_info_t *id2 = &scope->id_info[j];
+ if (id2->kind == ID_INFO_KIND_FREE && id->qst == id2->qst) {
+ assert(!(id2->flags & ID_FLAG_IS_PARAM)); // free vars should not be params
+ // in Micro Python the frees come first, before the params
+ id2->local_num = num_free;
+ num_free += 1;
+ }
+ }
+ }
+ }
+ // in Micro Python shift all other locals after the free locals
+ if (num_free > 0) {
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->kind != ID_INFO_KIND_FREE || (id->flags & ID_FLAG_IS_PARAM)) {
+ id->local_num += num_free;
+ }
+ }
+ scope->num_pos_args += num_free; // free vars are counted as params for passing them into the function
+ scope->num_locals += num_free;
+ }
+ }
+}
+
+#if !MICROPY_PERSISTENT_CODE_SAVE
+STATIC
+#endif
+mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_file, uint emit_opt, bool is_repl) {
+ // put compiler state on the stack, it's relatively small
+ compiler_t comp_state = {0};
+ compiler_t *comp = &comp_state;
+
+ comp->source_file = source_file;
+ comp->is_repl = is_repl;
+
+ // create the module scope
+ scope_t *module_scope = scope_new_and_link(comp, SCOPE_MODULE, parse_tree->root, emit_opt);
+
+ // create standard emitter; it's used at least for MP_PASS_SCOPE
+ emit_t *emit_bc = emit_bc_new();
+
+ // compile pass 1
+ comp->emit = emit_bc;
+ #if MICROPY_EMIT_NATIVE
+ comp->emit_method_table = &emit_bc_method_table;
+ #endif
+ uint max_num_labels = 0;
+ for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+ if (false) {
+#if MICROPY_EMIT_INLINE_THUMB
+ } else if (s->emit_options == MP_EMIT_OPT_ASM_THUMB) {
+ compile_scope_inline_asm(comp, s, MP_PASS_SCOPE);
+#endif
+ } else {
+ compile_scope(comp, s, MP_PASS_SCOPE);
+ }
+
+ // update maximim number of labels needed
+ if (comp->next_label > max_num_labels) {
+ max_num_labels = comp->next_label;
+ }
+ }
+
+ // compute some things related to scope and identifiers
+ for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+ scope_compute_things(s);
+ }
+
+ // set max number of labels now that it's calculated
+ emit_bc_set_max_num_labels(emit_bc, max_num_labels);
+
+ // compile pass 2 and 3
+#if MICROPY_EMIT_NATIVE
+ emit_t *emit_native = NULL;
+#endif
+#if MICROPY_EMIT_INLINE_THUMB
+ emit_inline_asm_t *emit_inline_thumb = NULL;
+#endif
+ for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+ if (false) {
+ // dummy
+
+#if MICROPY_EMIT_INLINE_THUMB
+ } else if (s->emit_options == MP_EMIT_OPT_ASM_THUMB) {
+ // inline assembly for thumb
+ if (emit_inline_thumb == NULL) {
+ emit_inline_thumb = emit_inline_thumb_new(max_num_labels);
+ }
+ comp->emit = NULL;
+ comp->emit_inline_asm = emit_inline_thumb;
+ comp->emit_inline_asm_method_table = &emit_inline_thumb_method_table;
+ compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE);
+ if (comp->compile_error == MP_OBJ_NULL) {
+ compile_scope_inline_asm(comp, s, MP_PASS_EMIT);
+ }
+#endif
+
+ } else {
+
+ // choose the emit type
+
+ switch (s->emit_options) {
+
+#if MICROPY_EMIT_NATIVE
+ case MP_EMIT_OPT_NATIVE_PYTHON:
+ case MP_EMIT_OPT_VIPER:
+#if MICROPY_EMIT_X64
+ if (emit_native == NULL) {
+ emit_native = emit_native_x64_new(&comp->compile_error, max_num_labels);
+ }
+ comp->emit_method_table = &emit_native_x64_method_table;
+#elif MICROPY_EMIT_X86
+ if (emit_native == NULL) {
+ emit_native = emit_native_x86_new(&comp->compile_error, max_num_labels);
+ }
+ comp->emit_method_table = &emit_native_x86_method_table;
+#elif MICROPY_EMIT_THUMB
+ if (emit_native == NULL) {
+ emit_native = emit_native_thumb_new(&comp->compile_error, max_num_labels);
+ }
+ comp->emit_method_table = &emit_native_thumb_method_table;
+#elif MICROPY_EMIT_ARM
+ if (emit_native == NULL) {
+ emit_native = emit_native_arm_new(&comp->compile_error, max_num_labels);
+ }
+ comp->emit_method_table = &emit_native_arm_method_table;
+#endif
+ comp->emit = emit_native;
+ EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ENABLE, s->emit_options == MP_EMIT_OPT_VIPER, 0);
+ break;
+#endif // MICROPY_EMIT_NATIVE
+
+ default:
+ comp->emit = emit_bc;
+ #if MICROPY_EMIT_NATIVE
+ comp->emit_method_table = &emit_bc_method_table;
+ #endif
+ break;
+ }
+
+ // need a pass to compute stack size
+ compile_scope(comp, s, MP_PASS_STACK_SIZE);
+
+ // second last pass: compute code size
+ if (comp->compile_error == MP_OBJ_NULL) {
+ compile_scope(comp, s, MP_PASS_CODE_SIZE);
+ }
+
+ // final pass: emit code
+ if (comp->compile_error == MP_OBJ_NULL) {
+ compile_scope(comp, s, MP_PASS_EMIT);
+ }
+ }
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ // if there is no line number for the error then use the line
+ // number for the start of this scope
+ compile_error_set_line(comp, comp->scope_cur->pn);
+ // add a traceback to the exception using relevant source info
+ mp_obj_exception_add_traceback(comp->compile_error, comp->source_file,
+ comp->compile_error_line, comp->scope_cur->simple_name);
+ }
+
+ // free the emitters
+
+ emit_bc_free(emit_bc);
+#if MICROPY_EMIT_NATIVE
+ if (emit_native != NULL) {
+#if MICROPY_EMIT_X64
+ emit_native_x64_free(emit_native);
+#elif MICROPY_EMIT_X86
+ emit_native_x86_free(emit_native);
+#elif MICROPY_EMIT_THUMB
+ emit_native_thumb_free(emit_native);
+#elif MICROPY_EMIT_ARM
+ emit_native_arm_free(emit_native);
+#endif
+ }
+#endif
+#if MICROPY_EMIT_INLINE_THUMB
+ if (emit_inline_thumb != NULL) {
+ emit_inline_thumb_free(emit_inline_thumb);
+ }
+#endif
+
+ // free the parse tree
+ mp_parse_tree_clear(parse_tree);
+
+ // free the scopes
+ mp_raw_code_t *outer_raw_code = module_scope->raw_code;
+ for (scope_t *s = module_scope; s;) {
+ scope_t *next = s->next;
+ scope_free(s);
+ s = next;
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ nlr_raise(comp->compile_error);
+ } else {
+ return outer_raw_code;
+ }
+}
+
+mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, uint emit_opt, bool is_repl) {
+ mp_raw_code_t *rc = mp_compile_to_raw_code(parse_tree, source_file, emit_opt, is_repl);
+ // return function that executes the outer module
+ return mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL);
+}
+
+#endif // MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/compile.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,54 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_COMPILE_H__
+#define __MICROPY_INCLUDED_PY_COMPILE_H__
+
+#include "py/lexer.h"
+#include "py/parse.h"
+#include "py/emitglue.h"
+
+// These must fit in 8 bits; see scope.h
+enum {
+ MP_EMIT_OPT_NONE,
+ MP_EMIT_OPT_BYTECODE,
+ MP_EMIT_OPT_NATIVE_PYTHON,
+ MP_EMIT_OPT_VIPER,
+ MP_EMIT_OPT_ASM_THUMB,
+};
+
+// the compiler will raise an exception if an error occurred
+// the compiler will clear the parse tree before it returns
+mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, uint emit_opt, bool is_repl);
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+// this has the same semantics as mp_compile
+mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_file, uint emit_opt, bool is_repl);
+#endif
+
+// this is implemented in runtime.c
+mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals);
+
+#endif // __MICROPY_INCLUDED_PY_COMPILE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emit.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,290 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef __MICROPY_INCLUDED_PY_EMIT_H__
+#define __MICROPY_INCLUDED_PY_EMIT_H__
+
+#include "py/lexer.h"
+#include "py/scope.h"
+#include "py/runtime0.h"
+
+/* Notes on passes:
+ * We don't know exactly the opcodes in pass 1 because they depend on the
+ * closing over of variables (LOAD_CLOSURE, BUILD_TUPLE, MAKE_CLOSURE), which
+ * depends on determining the scope of variables in each function, and this
+ * is not known until the end of pass 1.
+ * As a consequence, we don't know the maximum stack size until the end of pass 2.
+ * This is problematic for some emitters (x64) since they need to know the maximum
+ * stack size to compile the entry to the function, and this affects code size.
+ */
+
+typedef enum {
+ MP_PASS_SCOPE = 1, // work out id's and their kind, and number of labels
+ MP_PASS_STACK_SIZE = 2, // work out maximum stack size
+ MP_PASS_CODE_SIZE = 3, // work out code size and label offsets
+ MP_PASS_EMIT = 4, // emit code
+} pass_kind_t;
+
+#define MP_EMIT_STAR_FLAG_SINGLE (0x01)
+#define MP_EMIT_STAR_FLAG_DOUBLE (0x02)
+
+#define MP_EMIT_BREAK_FROM_FOR (0x8000)
+
+#define MP_EMIT_NATIVE_TYPE_ENABLE (0)
+#define MP_EMIT_NATIVE_TYPE_RETURN (1)
+#define MP_EMIT_NATIVE_TYPE_ARG (2)
+
+typedef struct _emit_t emit_t;
+
+typedef struct _mp_emit_method_table_id_ops_t {
+ void (*fast)(emit_t *emit, qstr qst, mp_uint_t local_num);
+ void (*deref)(emit_t *emit, qstr qst, mp_uint_t local_num);
+ void (*name)(emit_t *emit, qstr qst);
+ void (*global)(emit_t *emit, qstr qst);
+} mp_emit_method_table_id_ops_t;
+
+typedef struct _emit_method_table_t {
+ void (*set_native_type)(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2);
+ void (*start_pass)(emit_t *emit, pass_kind_t pass, scope_t *scope);
+ void (*end_pass)(emit_t *emit);
+ bool (*last_emit_was_return_value)(emit_t *emit);
+ void (*adjust_stack_size)(emit_t *emit, mp_int_t delta);
+ void (*set_source_line)(emit_t *emit, mp_uint_t line);
+
+ mp_emit_method_table_id_ops_t load_id;
+ mp_emit_method_table_id_ops_t store_id;
+ mp_emit_method_table_id_ops_t delete_id;
+
+ void (*label_assign)(emit_t *emit, mp_uint_t l);
+ void (*import_name)(emit_t *emit, qstr qst);
+ void (*import_from)(emit_t *emit, qstr qst);
+ void (*import_star)(emit_t *emit);
+ void (*load_const_tok)(emit_t *emit, mp_token_kind_t tok);
+ void (*load_const_small_int)(emit_t *emit, mp_int_t arg);
+ void (*load_const_str)(emit_t *emit, qstr qst);
+ void (*load_const_obj)(emit_t *emit, mp_obj_t obj);
+ void (*load_null)(emit_t *emit);
+ void (*load_attr)(emit_t *emit, qstr qst);
+ void (*load_method)(emit_t *emit, qstr qst);
+ void (*load_build_class)(emit_t *emit);
+ void (*load_subscr)(emit_t *emit);
+ void (*store_attr)(emit_t *emit, qstr qst);
+ void (*store_subscr)(emit_t *emit);
+ void (*delete_attr)(emit_t *emit, qstr qst);
+ void (*delete_subscr)(emit_t *emit);
+ void (*dup_top)(emit_t *emit);
+ void (*dup_top_two)(emit_t *emit);
+ void (*pop_top)(emit_t *emit);
+ void (*rot_two)(emit_t *emit);
+ void (*rot_three)(emit_t *emit);
+ void (*jump)(emit_t *emit, mp_uint_t label);
+ void (*pop_jump_if)(emit_t *emit, bool cond, mp_uint_t label);
+ void (*jump_if_or_pop)(emit_t *emit, bool cond, mp_uint_t label);
+ void (*break_loop)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+ void (*continue_loop)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+ void (*setup_with)(emit_t *emit, mp_uint_t label);
+ void (*with_cleanup)(emit_t *emit, mp_uint_t label);
+ void (*setup_except)(emit_t *emit, mp_uint_t label);
+ void (*setup_finally)(emit_t *emit, mp_uint_t label);
+ void (*end_finally)(emit_t *emit);
+ void (*get_iter)(emit_t *emit);
+ void (*for_iter)(emit_t *emit, mp_uint_t label);
+ void (*for_iter_end)(emit_t *emit);
+ void (*pop_block)(emit_t *emit);
+ void (*pop_except)(emit_t *emit);
+ void (*unary_op)(emit_t *emit, mp_unary_op_t op);
+ void (*binary_op)(emit_t *emit, mp_binary_op_t op);
+ void (*build_tuple)(emit_t *emit, mp_uint_t n_args);
+ void (*build_list)(emit_t *emit, mp_uint_t n_args);
+ void (*list_append)(emit_t *emit, mp_uint_t list_stack_index);
+ void (*build_map)(emit_t *emit, mp_uint_t n_args);
+ void (*store_map)(emit_t *emit);
+ void (*map_add)(emit_t *emit, mp_uint_t map_stack_index);
+ #if MICROPY_PY_BUILTINS_SET
+ void (*build_set)(emit_t *emit, mp_uint_t n_args);
+ void (*set_add)(emit_t *emit, mp_uint_t set_stack_index);
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE
+ void (*build_slice)(emit_t *emit, mp_uint_t n_args);
+ #endif
+ void (*unpack_sequence)(emit_t *emit, mp_uint_t n_args);
+ void (*unpack_ex)(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right);
+ void (*make_function)(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+ void (*make_closure)(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+ void (*call_function)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+ void (*call_method)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+ void (*return_value)(emit_t *emit);
+ void (*raise_varargs)(emit_t *emit, mp_uint_t n_args);
+ void (*yield_value)(emit_t *emit);
+ void (*yield_from)(emit_t *emit);
+
+ // these methods are used to control entry to/exit from an exception handler
+ // they may or may not emit code
+ void (*start_except_handler)(emit_t *emit);
+ void (*end_except_handler)(emit_t *emit);
+} emit_method_table_t;
+
+void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst);
+void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst);
+void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst);
+
+extern const emit_method_table_t emit_cpython_method_table;
+extern const emit_method_table_t emit_bc_method_table;
+extern const emit_method_table_t emit_native_x64_method_table;
+extern const emit_method_table_t emit_native_x86_method_table;
+extern const emit_method_table_t emit_native_thumb_method_table;
+extern const emit_method_table_t emit_native_arm_method_table;
+
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops;
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops;
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops;
+
+emit_t *emit_cpython_new(void);
+emit_t *emit_bc_new(void);
+emit_t *emit_native_x64_new(mp_obj_t *error_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_x86_new(mp_obj_t *error_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_thumb_new(mp_obj_t *error_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_arm_new(mp_obj_t *error_slot, mp_uint_t max_num_labels);
+
+void emit_cpython_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels);
+void emit_bc_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels);
+
+void emit_cpython_free(emit_t *emit);
+void emit_bc_free(emit_t *emit);
+void emit_native_x64_free(emit_t *emit);
+void emit_native_x86_free(emit_t *emit);
+void emit_native_thumb_free(emit_t *emit);
+void emit_native_arm_free(emit_t *emit);
+
+void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope);
+void mp_emit_bc_end_pass(emit_t *emit);
+bool mp_emit_bc_last_emit_was_return_value(emit_t *emit);
+void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta);
+void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t line);
+
+void mp_emit_bc_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num);
+void mp_emit_bc_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num);
+void mp_emit_bc_load_name(emit_t *emit, qstr qst);
+void mp_emit_bc_load_global(emit_t *emit, qstr qst);
+void mp_emit_bc_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num);
+void mp_emit_bc_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num);
+void mp_emit_bc_store_name(emit_t *emit, qstr qst);
+void mp_emit_bc_store_global(emit_t *emit, qstr qst);
+void mp_emit_bc_delete_fast(emit_t *emit, qstr qst, mp_uint_t local_num);
+void mp_emit_bc_delete_deref(emit_t *emit, qstr qst, mp_uint_t local_num);
+void mp_emit_bc_delete_name(emit_t *emit, qstr qst);
+void mp_emit_bc_delete_global(emit_t *emit, qstr qst);
+
+void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l);
+void mp_emit_bc_import_name(emit_t *emit, qstr qst);
+void mp_emit_bc_import_from(emit_t *emit, qstr qst);
+void mp_emit_bc_import_star(emit_t *emit);
+void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok);
+void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg);
+void mp_emit_bc_load_const_str(emit_t *emit, qstr qst);
+void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj);
+void mp_emit_bc_load_null(emit_t *emit);
+void mp_emit_bc_load_attr(emit_t *emit, qstr qst);
+void mp_emit_bc_load_method(emit_t *emit, qstr qst);
+void mp_emit_bc_load_build_class(emit_t *emit);
+void mp_emit_bc_load_subscr(emit_t *emit);
+void mp_emit_bc_store_attr(emit_t *emit, qstr qst);
+void mp_emit_bc_store_subscr(emit_t *emit);
+void mp_emit_bc_delete_attr(emit_t *emit, qstr qst);
+void mp_emit_bc_delete_subscr(emit_t *emit);
+void mp_emit_bc_dup_top(emit_t *emit);
+void mp_emit_bc_dup_top_two(emit_t *emit);
+void mp_emit_bc_pop_top(emit_t *emit);
+void mp_emit_bc_rot_two(emit_t *emit);
+void mp_emit_bc_rot_three(emit_t *emit);
+void mp_emit_bc_jump(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label);
+void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label);
+void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+#define mp_emit_bc_break_loop mp_emit_bc_unwind_jump
+#define mp_emit_bc_continue_loop mp_emit_bc_unwind_jump
+void mp_emit_bc_setup_with(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_setup_except(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_setup_finally(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_end_finally(emit_t *emit);
+void mp_emit_bc_get_iter(emit_t *emit);
+void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_for_iter_end(emit_t *emit);
+void mp_emit_bc_pop_block(emit_t *emit);
+void mp_emit_bc_pop_except(emit_t *emit);
+void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op);
+void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op);
+void mp_emit_bc_build_tuple(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_build_list(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_list_append(emit_t *emit, mp_uint_t list_stack_index);
+void mp_emit_bc_build_map(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_store_map(emit_t *emit);
+void mp_emit_bc_map_add(emit_t *emit, mp_uint_t map_stack_index);
+#if MICROPY_PY_BUILTINS_SET
+void mp_emit_bc_build_set(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_set_add(emit_t *emit, mp_uint_t set_stack_index);
+#endif
+#if MICROPY_PY_BUILTINS_SLICE
+void mp_emit_bc_build_slice(emit_t *emit, mp_uint_t n_args);
+#endif
+void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right);
+void mp_emit_bc_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+void mp_emit_bc_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+void mp_emit_bc_return_value(emit_t *emit);
+void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_yield_value(emit_t *emit);
+void mp_emit_bc_yield_from(emit_t *emit);
+void mp_emit_bc_start_except_handler(emit_t *emit);
+void mp_emit_bc_end_except_handler(emit_t *emit);
+
+typedef struct _emit_inline_asm_t emit_inline_asm_t;
+
+typedef struct _emit_inline_asm_method_table_t {
+ void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot);
+ void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig);
+ mp_uint_t (*count_params)(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params);
+ bool (*label)(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id);
+ void (*align)(emit_inline_asm_t *emit, mp_uint_t align);
+ void (*data)(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val);
+ void (*op)(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args);
+} emit_inline_asm_method_table_t;
+
+extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table;
+
+emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels);
+void emit_inline_thumb_free(emit_inline_asm_t *emit);
+
+#if MICROPY_WARNINGS
+void mp_emitter_warning(pass_kind_t pass, const char *msg);
+#else
+#define mp_emitter_warning(pass, msg)
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_EMIT_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emitbc.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1079 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/emit.h"
+#include "py/bc0.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define BYTES_FOR_INT ((BYTES_PER_WORD * 8 + 6) / 7)
+#define DUMMY_DATA_SIZE (BYTES_FOR_INT)
+
+struct _emit_t {
+ // Accessed as mp_obj_t, so must be aligned as such, and we rely on the
+ // memory allocator returning a suitably aligned pointer.
+ // Should work for cases when mp_obj_t is 64-bit on a 32-bit machine.
+ byte dummy_data[DUMMY_DATA_SIZE];
+
+ pass_kind_t pass : 8;
+ mp_uint_t last_emit_was_return_value : 8;
+
+ int stack_size;
+
+ scope_t *scope;
+
+ mp_uint_t last_source_line_offset;
+ mp_uint_t last_source_line;
+
+ mp_uint_t max_num_labels;
+ mp_uint_t *label_offsets;
+
+ size_t code_info_offset;
+ size_t code_info_size;
+ size_t bytecode_offset;
+ size_t bytecode_size;
+ byte *code_base; // stores both byte code and code info
+
+ #if MICROPY_PERSISTENT_CODE
+ uint16_t ct_cur_obj;
+ uint16_t ct_num_obj;
+ uint16_t ct_cur_raw_code;
+ #endif
+ mp_uint_t *const_table;
+};
+
+emit_t *emit_bc_new(void) {
+ emit_t *emit = m_new0(emit_t, 1);
+ return emit;
+}
+
+void emit_bc_set_max_num_labels(emit_t *emit, mp_uint_t max_num_labels) {
+ emit->max_num_labels = max_num_labels;
+ emit->label_offsets = m_new(mp_uint_t, emit->max_num_labels);
+}
+
+void emit_bc_free(emit_t *emit) {
+ m_del(mp_uint_t, emit->label_offsets, emit->max_num_labels);
+ m_del_obj(emit_t, emit);
+}
+
+typedef byte *(*emit_allocator_t)(emit_t *emit, int nbytes);
+
+STATIC void emit_write_uint(emit_t *emit, emit_allocator_t allocator, mp_uint_t val) {
+ // We store each 7 bits in a separate byte, and that's how many bytes needed
+ byte buf[BYTES_FOR_INT];
+ byte *p = buf + sizeof(buf);
+ // We encode in little-ending order, but store in big-endian, to help decoding
+ do {
+ *--p = val & 0x7f;
+ val >>= 7;
+ } while (val != 0);
+ byte *c = allocator(emit, buf + sizeof(buf) - p);
+ while (p != buf + sizeof(buf) - 1) {
+ *c++ = *p++ | 0x80;
+ }
+ *c = *p;
+}
+
+// all functions must go through this one to emit code info
+STATIC byte *emit_get_cur_to_write_code_info(emit_t *emit, int num_bytes_to_write) {
+ //printf("emit %d\n", num_bytes_to_write);
+ if (emit->pass < MP_PASS_EMIT) {
+ emit->code_info_offset += num_bytes_to_write;
+ return emit->dummy_data;
+ } else {
+ assert(emit->code_info_offset + num_bytes_to_write <= emit->code_info_size);
+ byte *c = emit->code_base + emit->code_info_offset;
+ emit->code_info_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+STATIC void emit_write_code_info_byte(emit_t* emit, byte val) {
+ *emit_get_cur_to_write_code_info(emit, 1) = val;
+}
+
+STATIC void emit_write_code_info_uint(emit_t* emit, mp_uint_t val) {
+ emit_write_uint(emit, emit_get_cur_to_write_code_info, val);
+}
+
+STATIC void emit_write_code_info_qstr(emit_t *emit, qstr qst) {
+ #if MICROPY_PERSISTENT_CODE
+ assert((qst >> 16) == 0);
+ byte *c = emit_get_cur_to_write_code_info(emit, 2);
+ c[0] = qst;
+ c[1] = qst >> 8;
+ #else
+ emit_write_uint(emit, emit_get_cur_to_write_code_info, qst);
+ #endif
+}
+
+#if MICROPY_ENABLE_SOURCE_LINE
+STATIC void emit_write_code_info_bytes_lines(emit_t *emit, mp_uint_t bytes_to_skip, mp_uint_t lines_to_skip) {
+ assert(bytes_to_skip > 0 || lines_to_skip > 0);
+ //printf(" %d %d\n", bytes_to_skip, lines_to_skip);
+ while (bytes_to_skip > 0 || lines_to_skip > 0) {
+ mp_uint_t b, l;
+ if (lines_to_skip <= 6) {
+ // use 0b0LLBBBBB encoding
+ b = MIN(bytes_to_skip, 0x1f);
+ l = MIN(lines_to_skip, 0x3);
+ *emit_get_cur_to_write_code_info(emit, 1) = b | (l << 5);
+ } else {
+ // use 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+ b = MIN(bytes_to_skip, 0xf);
+ l = MIN(lines_to_skip, 0x7ff);
+ byte *ci = emit_get_cur_to_write_code_info(emit, 2);
+ ci[0] = 0x80 | b | ((l >> 4) & 0x70);
+ ci[1] = l;
+ }
+ bytes_to_skip -= b;
+ lines_to_skip -= l;
+ }
+}
+#endif
+
+// all functions must go through this one to emit byte code
+STATIC byte *emit_get_cur_to_write_bytecode(emit_t *emit, int num_bytes_to_write) {
+ //printf("emit %d\n", num_bytes_to_write);
+ if (emit->pass < MP_PASS_EMIT) {
+ emit->bytecode_offset += num_bytes_to_write;
+ return emit->dummy_data;
+ } else {
+ assert(emit->bytecode_offset + num_bytes_to_write <= emit->bytecode_size);
+ byte *c = emit->code_base + emit->code_info_size + emit->bytecode_offset;
+ emit->bytecode_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+STATIC void emit_write_bytecode_byte(emit_t *emit, byte b1) {
+ byte *c = emit_get_cur_to_write_bytecode(emit, 1);
+ c[0] = b1;
+}
+
+STATIC void emit_write_bytecode_byte_byte(emit_t* emit, byte b1, byte b2) {
+ assert((b2 & (~0xff)) == 0);
+ byte *c = emit_get_cur_to_write_bytecode(emit, 2);
+ c[0] = b1;
+ c[1] = b2;
+}
+
+// Similar to emit_write_bytecode_uint(), just some extra handling to encode sign
+STATIC void emit_write_bytecode_byte_int(emit_t *emit, byte b1, mp_int_t num) {
+ emit_write_bytecode_byte(emit, b1);
+
+ // We store each 7 bits in a separate byte, and that's how many bytes needed
+ byte buf[BYTES_FOR_INT];
+ byte *p = buf + sizeof(buf);
+ // We encode in little-ending order, but store in big-endian, to help decoding
+ do {
+ *--p = num & 0x7f;
+ num >>= 7;
+ } while (num != 0 && num != -1);
+ // Make sure that highest bit we stored (mask 0x40) matches sign
+ // of the number. If not, store extra byte just to encode sign
+ if (num == -1 && (*p & 0x40) == 0) {
+ *--p = 0x7f;
+ } else if (num == 0 && (*p & 0x40) != 0) {
+ *--p = 0;
+ }
+
+ byte *c = emit_get_cur_to_write_bytecode(emit, buf + sizeof(buf) - p);
+ while (p != buf + sizeof(buf) - 1) {
+ *c++ = *p++ | 0x80;
+ }
+ *c = *p;
+}
+
+STATIC void emit_write_bytecode_byte_uint(emit_t *emit, byte b, mp_uint_t val) {
+ emit_write_bytecode_byte(emit, b);
+ emit_write_uint(emit, emit_get_cur_to_write_bytecode, val);
+}
+
+#if MICROPY_PERSISTENT_CODE
+STATIC void emit_write_bytecode_byte_const(emit_t *emit, byte b, mp_uint_t n, mp_uint_t c) {
+ if (emit->pass == MP_PASS_EMIT) {
+ emit->const_table[n] = c;
+ }
+ emit_write_bytecode_byte_uint(emit, b, n);
+}
+#endif
+
+STATIC void emit_write_bytecode_byte_qstr(emit_t* emit, byte b, qstr qst) {
+ #if MICROPY_PERSISTENT_CODE
+ assert((qst >> 16) == 0);
+ byte *c = emit_get_cur_to_write_bytecode(emit, 3);
+ c[0] = b;
+ c[1] = qst;
+ c[2] = qst >> 8;
+ #else
+ emit_write_bytecode_byte_uint(emit, b, qst);
+ #endif
+}
+
+STATIC void emit_write_bytecode_byte_obj(emit_t *emit, byte b, mp_obj_t obj) {
+ #if MICROPY_PERSISTENT_CODE
+ emit_write_bytecode_byte_const(emit, b,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args
+ + emit->ct_cur_obj++, (mp_uint_t)obj);
+ #else
+ // aligns the pointer so it is friendly to GC
+ emit_write_bytecode_byte(emit, b);
+ emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(mp_obj_t));
+ mp_obj_t *c = (mp_obj_t*)emit_get_cur_to_write_bytecode(emit, sizeof(mp_obj_t));
+ // Verify thar c is already uint-aligned
+ assert(c == MP_ALIGN(c, sizeof(mp_obj_t)));
+ *c = obj;
+ #endif
+}
+
+STATIC void emit_write_bytecode_byte_raw_code(emit_t *emit, byte b, mp_raw_code_t *rc) {
+ #if MICROPY_PERSISTENT_CODE
+ emit_write_bytecode_byte_const(emit, b,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args
+ + emit->ct_num_obj + emit->ct_cur_raw_code++, (mp_uint_t)(uintptr_t)rc);
+ #else
+ // aligns the pointer so it is friendly to GC
+ emit_write_bytecode_byte(emit, b);
+ emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(void*));
+ void **c = (void**)emit_get_cur_to_write_bytecode(emit, sizeof(void*));
+ // Verify thar c is already uint-aligned
+ assert(c == MP_ALIGN(c, sizeof(void*)));
+ *c = rc;
+ #endif
+}
+
+// unsigned labels are relative to ip following this instruction, stored as 16 bits
+STATIC void emit_write_bytecode_byte_unsigned_label(emit_t *emit, byte b1, mp_uint_t label) {
+ mp_uint_t bytecode_offset;
+ if (emit->pass < MP_PASS_EMIT) {
+ bytecode_offset = 0;
+ } else {
+ bytecode_offset = emit->label_offsets[label] - emit->bytecode_offset - 3;
+ }
+ byte *c = emit_get_cur_to_write_bytecode(emit, 3);
+ c[0] = b1;
+ c[1] = bytecode_offset;
+ c[2] = bytecode_offset >> 8;
+}
+
+// signed labels are relative to ip following this instruction, stored as 16 bits, in excess
+STATIC void emit_write_bytecode_byte_signed_label(emit_t *emit, byte b1, mp_uint_t label) {
+ int bytecode_offset;
+ if (emit->pass < MP_PASS_EMIT) {
+ bytecode_offset = 0;
+ } else {
+ bytecode_offset = emit->label_offsets[label] - emit->bytecode_offset - 3 + 0x8000;
+ }
+ byte *c = emit_get_cur_to_write_bytecode(emit, 3);
+ c[0] = b1;
+ c[1] = bytecode_offset;
+ c[2] = bytecode_offset >> 8;
+}
+
+#if MICROPY_EMIT_NATIVE
+STATIC void mp_emit_bc_set_native_type(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2) {
+ (void)emit;
+ (void)op;
+ (void)arg1;
+ (void)arg2;
+}
+#endif
+
+void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
+ emit->pass = pass;
+ emit->stack_size = 0;
+ emit->last_emit_was_return_value = false;
+ emit->scope = scope;
+ emit->last_source_line_offset = 0;
+ emit->last_source_line = 1;
+ if (pass < MP_PASS_EMIT) {
+ memset(emit->label_offsets, -1, emit->max_num_labels * sizeof(mp_uint_t));
+ }
+ emit->bytecode_offset = 0;
+ emit->code_info_offset = 0;
+
+ // Write local state size and exception stack size.
+ {
+ mp_uint_t n_state = scope->num_locals + scope->stack_size;
+ if (n_state == 0) {
+ // Need at least 1 entry in the state, in the case an exception is
+ // propagated through this function, the exception is returned in
+ // the highest slot in the state (fastn[0], see vm.c).
+ n_state = 1;
+ }
+ emit_write_code_info_uint(emit, n_state);
+ emit_write_code_info_uint(emit, scope->exc_stack_size);
+ }
+
+ // Write scope flags and number of arguments.
+ // TODO check that num args all fit in a byte
+ emit_write_code_info_byte(emit, emit->scope->scope_flags);
+ emit_write_code_info_byte(emit, emit->scope->num_pos_args);
+ emit_write_code_info_byte(emit, emit->scope->num_kwonly_args);
+ emit_write_code_info_byte(emit, emit->scope->num_def_pos_args);
+
+ // Write size of the rest of the code info. We don't know how big this
+ // variable uint will be on the MP_PASS_CODE_SIZE pass so we reserve 2 bytes
+ // for it and hope that is enough! TODO assert this or something.
+ if (pass == MP_PASS_EMIT) {
+ emit_write_code_info_uint(emit, emit->code_info_size - emit->code_info_offset);
+ } else {
+ emit_get_cur_to_write_code_info(emit, 2);
+ }
+
+ // Write the name and source file of this function.
+ emit_write_code_info_qstr(emit, scope->simple_name);
+ emit_write_code_info_qstr(emit, scope->source_file);
+
+ // bytecode prelude: initialise closed over variables
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL) {
+ assert(id->local_num < 255);
+ emit_write_bytecode_byte(emit, id->local_num); // write the local which should be converted to a cell
+ }
+ }
+ emit_write_bytecode_byte(emit, 255); // end of list sentinel
+
+ #if MICROPY_PERSISTENT_CODE
+ emit->ct_cur_obj = 0;
+ emit->ct_cur_raw_code = 0;
+ #endif
+
+ if (pass == MP_PASS_EMIT) {
+ // Write argument names (needed to resolve positional args passed as
+ // keywords). We store them as full word-sized objects for efficient access
+ // in mp_setup_code_state this is the start of the prelude and is guaranteed
+ // to be aligned on a word boundary.
+
+ // For a given argument position (indexed by i) we need to find the
+ // corresponding id_info which is a parameter, as it has the correct
+ // qstr name to use as the argument name. Note that it's not a simple
+ // 1-1 mapping (ie i!=j in general) because of possible closed-over
+ // variables. In the case that the argument i has no corresponding
+ // parameter we use "*" as its name (since no argument can ever be named
+ // "*"). We could use a blank qstr but "*" is better for debugging.
+ // Note: there is some wasted RAM here for the case of storing a qstr
+ // for each closed-over variable, and maybe there is a better way to do
+ // it, but that would require changes to mp_setup_code_state.
+ for (int i = 0; i < scope->num_pos_args + scope->num_kwonly_args; i++) {
+ qstr qst = MP_QSTR__star_;
+ for (int j = 0; j < scope->id_info_len; ++j) {
+ id_info_t *id = &scope->id_info[j];
+ if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) {
+ qst = id->qst;
+ break;
+ }
+ }
+ emit->const_table[i] = (mp_uint_t)MP_OBJ_NEW_QSTR(qst);
+ }
+ }
+}
+
+void mp_emit_bc_end_pass(emit_t *emit) {
+ if (emit->pass == MP_PASS_SCOPE) {
+ return;
+ }
+
+ // check stack is back to zero size
+ if (emit->stack_size != 0) {
+ mp_printf(&mp_plat_print, "ERROR: stack size not back to zero; got %d\n", emit->stack_size);
+ }
+
+ emit_write_code_info_byte(emit, 0); // end of line number info
+
+ #if MICROPY_PERSISTENT_CODE
+ assert(emit->pass <= MP_PASS_STACK_SIZE || (emit->ct_num_obj == emit->ct_cur_obj));
+ emit->ct_num_obj = emit->ct_cur_obj;
+ #endif
+
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ #if !MICROPY_PERSISTENT_CODE
+ // so bytecode is aligned
+ emit->code_info_offset = (size_t)MP_ALIGN(emit->code_info_offset, sizeof(mp_uint_t));
+ #endif
+
+ // calculate size of total code-info + bytecode, in bytes
+ emit->code_info_size = emit->code_info_offset;
+ emit->bytecode_size = emit->bytecode_offset;
+ emit->code_base = m_new0(byte, emit->code_info_size + emit->bytecode_size);
+
+ #if MICROPY_PERSISTENT_CODE
+ emit->const_table = m_new0(mp_uint_t,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args
+ + emit->ct_cur_obj + emit->ct_cur_raw_code);
+ #else
+ emit->const_table = m_new0(mp_uint_t,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args);
+ #endif
+
+ } else if (emit->pass == MP_PASS_EMIT) {
+ mp_emit_glue_assign_bytecode(emit->scope->raw_code, emit->code_base,
+ emit->code_info_size + emit->bytecode_size,
+ emit->const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ emit->ct_cur_obj, emit->ct_cur_raw_code,
+ #endif
+ emit->scope->scope_flags);
+ }
+}
+
+bool mp_emit_bc_last_emit_was_return_value(emit_t *emit) {
+ return emit->last_emit_was_return_value;
+}
+
+void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta) {
+ emit->stack_size += delta;
+}
+
+void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
+ //printf("source: line %d -> %d offset %d -> %d\n", emit->last_source_line, source_line, emit->last_source_line_offset, emit->bytecode_offset);
+#if MICROPY_ENABLE_SOURCE_LINE
+ if (MP_STATE_VM(mp_optimise_value) >= 3) {
+ // If we compile with -O3, don't store line numbers.
+ return;
+ }
+ if (source_line > emit->last_source_line) {
+ mp_uint_t bytes_to_skip = emit->bytecode_offset - emit->last_source_line_offset;
+ mp_uint_t lines_to_skip = source_line - emit->last_source_line;
+ emit_write_code_info_bytes_lines(emit, bytes_to_skip, lines_to_skip);
+ emit->last_source_line_offset = emit->bytecode_offset;
+ emit->last_source_line = source_line;
+ }
+#else
+ (void)emit;
+ (void)source_line;
+#endif
+}
+
+STATIC void emit_bc_pre(emit_t *emit, mp_int_t stack_size_delta) {
+ if (emit->pass == MP_PASS_SCOPE) {
+ return;
+ }
+ assert((mp_int_t)emit->stack_size + stack_size_delta >= 0);
+ emit->stack_size += stack_size_delta;
+ if (emit->stack_size > emit->scope->stack_size) {
+ emit->scope->stack_size = emit->stack_size;
+ }
+ emit->last_emit_was_return_value = false;
+}
+
+void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l) {
+ emit_bc_pre(emit, 0);
+ if (emit->pass == MP_PASS_SCOPE) {
+ return;
+ }
+ assert(l < emit->max_num_labels);
+ if (emit->pass < MP_PASS_EMIT) {
+ // assign label offset
+ assert(emit->label_offsets[l] == (mp_uint_t)-1);
+ emit->label_offsets[l] = emit->bytecode_offset;
+ } else {
+ // ensure label offset has not changed from MP_PASS_CODE_SIZE to MP_PASS_EMIT
+ //printf("l%d: (at %d vs %d)\n", l, emit->bytecode_offset, emit->label_offsets[l]);
+ assert(emit->label_offsets[l] == emit->bytecode_offset);
+ }
+}
+
+void mp_emit_bc_import_name(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_IMPORT_NAME, qst);
+}
+
+void mp_emit_bc_import_from(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_IMPORT_FROM, qst);
+}
+
+void mp_emit_bc_import_star(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte(emit, MP_BC_IMPORT_STAR);
+}
+
+void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok) {
+ emit_bc_pre(emit, 1);
+ switch (tok) {
+ case MP_TOKEN_KW_FALSE: emit_write_bytecode_byte(emit, MP_BC_LOAD_CONST_FALSE); break;
+ case MP_TOKEN_KW_NONE: emit_write_bytecode_byte(emit, MP_BC_LOAD_CONST_NONE); break;
+ case MP_TOKEN_KW_TRUE: emit_write_bytecode_byte(emit, MP_BC_LOAD_CONST_TRUE); break;
+ no_other_choice:
+ case MP_TOKEN_ELLIPSIS: emit_write_bytecode_byte_obj(emit, MP_BC_LOAD_CONST_OBJ, MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj)); break;
+ default: assert(0); goto no_other_choice; // to help flow control analysis
+ }
+}
+
+void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg) {
+ emit_bc_pre(emit, 1);
+ if (-16 <= arg && arg <= 47) {
+ emit_write_bytecode_byte(emit, MP_BC_LOAD_CONST_SMALL_INT_MULTI + 16 + arg);
+ } else {
+ emit_write_bytecode_byte_int(emit, MP_BC_LOAD_CONST_SMALL_INT, arg);
+ }
+}
+
+void mp_emit_bc_load_const_str(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_CONST_STRING, qst);
+}
+
+void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_obj(emit, MP_BC_LOAD_CONST_OBJ, obj);
+}
+
+void mp_emit_bc_load_null(emit_t *emit) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte(emit, MP_BC_LOAD_NULL);
+};
+
+void mp_emit_bc_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ (void)qst;
+ assert(local_num >= 0);
+ emit_bc_pre(emit, 1);
+ if (local_num <= 15) {
+ emit_write_bytecode_byte(emit, MP_BC_LOAD_FAST_MULTI + local_num);
+ } else {
+ emit_write_bytecode_byte_uint(emit, MP_BC_LOAD_FAST_N, local_num);
+ }
+}
+
+void mp_emit_bc_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ (void)qst;
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_uint(emit, MP_BC_LOAD_DEREF, local_num);
+}
+
+void mp_emit_bc_load_name(emit_t *emit, qstr qst) {
+ (void)qst;
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_NAME, qst);
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
+ emit_write_bytecode_byte(emit, 0);
+ }
+}
+
+void mp_emit_bc_load_global(emit_t *emit, qstr qst) {
+ (void)qst;
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_GLOBAL, qst);
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
+ emit_write_bytecode_byte(emit, 0);
+ }
+}
+
+void mp_emit_bc_load_attr(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_ATTR, qst);
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
+ emit_write_bytecode_byte(emit, 0);
+ }
+}
+
+void mp_emit_bc_load_method(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_METHOD, qst);
+}
+
+void mp_emit_bc_load_build_class(emit_t *emit) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte(emit, MP_BC_LOAD_BUILD_CLASS);
+}
+
+void mp_emit_bc_load_subscr(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte(emit, MP_BC_LOAD_SUBSCR);
+}
+
+void mp_emit_bc_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ (void)qst;
+ assert(local_num >= 0);
+ emit_bc_pre(emit, -1);
+ if (local_num <= 15) {
+ emit_write_bytecode_byte(emit, MP_BC_STORE_FAST_MULTI + local_num);
+ } else {
+ emit_write_bytecode_byte_uint(emit, MP_BC_STORE_FAST_N, local_num);
+ }
+}
+
+void mp_emit_bc_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ (void)qst;
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_uint(emit, MP_BC_STORE_DEREF, local_num);
+}
+
+void mp_emit_bc_store_name(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_NAME, qst);
+}
+
+void mp_emit_bc_store_global(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_GLOBAL, qst);
+}
+
+void mp_emit_bc_store_attr(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, -2);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_ATTR, qst);
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
+ emit_write_bytecode_byte(emit, 0);
+ }
+}
+
+void mp_emit_bc_store_subscr(emit_t *emit) {
+ emit_bc_pre(emit, -3);
+ emit_write_bytecode_byte(emit, MP_BC_STORE_SUBSCR);
+}
+
+void mp_emit_bc_delete_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ (void)qst;
+ emit_write_bytecode_byte_uint(emit, MP_BC_DELETE_FAST, local_num);
+}
+
+void mp_emit_bc_delete_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ (void)qst;
+ emit_write_bytecode_byte_uint(emit, MP_BC_DELETE_DEREF, local_num);
+}
+
+void mp_emit_bc_delete_name(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_DELETE_NAME, qst);
+}
+
+void mp_emit_bc_delete_global(emit_t *emit, qstr qst) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte_qstr(emit, MP_BC_DELETE_GLOBAL, qst);
+}
+
+void mp_emit_bc_delete_attr(emit_t *emit, qstr qst) {
+ mp_emit_bc_load_null(emit);
+ mp_emit_bc_rot_two(emit);
+ mp_emit_bc_store_attr(emit, qst);
+}
+
+void mp_emit_bc_delete_subscr(emit_t *emit) {
+ mp_emit_bc_load_null(emit);
+ mp_emit_bc_rot_three(emit);
+ mp_emit_bc_store_subscr(emit);
+}
+
+void mp_emit_bc_dup_top(emit_t *emit) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte(emit, MP_BC_DUP_TOP);
+}
+
+void mp_emit_bc_dup_top_two(emit_t *emit) {
+ emit_bc_pre(emit, 2);
+ emit_write_bytecode_byte(emit, MP_BC_DUP_TOP_TWO);
+}
+
+void mp_emit_bc_pop_top(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte(emit, MP_BC_POP_TOP);
+}
+
+void mp_emit_bc_rot_two(emit_t *emit) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_ROT_TWO);
+}
+
+void mp_emit_bc_rot_three(emit_t *emit) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_ROT_THREE);
+}
+
+void mp_emit_bc_jump(emit_t *emit, mp_uint_t label) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_JUMP, label);
+}
+
+void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) {
+ emit_bc_pre(emit, -1);
+ if (cond) {
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_POP_JUMP_IF_TRUE, label);
+ } else {
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_POP_JUMP_IF_FALSE, label);
+ }
+}
+
+void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) {
+ emit_bc_pre(emit, -1);
+ if (cond) {
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_JUMP_IF_TRUE_OR_POP, label);
+ } else {
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_JUMP_IF_FALSE_OR_POP, label);
+ }
+}
+
+void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+ if (except_depth == 0) {
+ emit_bc_pre(emit, 0);
+ if (label & MP_EMIT_BREAK_FROM_FOR) {
+ // need to pop the iterator if we are breaking out of a for loop
+ emit_write_bytecode_byte(emit, MP_BC_POP_TOP);
+ }
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_JUMP, label & ~MP_EMIT_BREAK_FROM_FOR);
+ } else {
+ emit_write_bytecode_byte_signed_label(emit, MP_BC_UNWIND_JUMP, label & ~MP_EMIT_BREAK_FROM_FOR);
+ emit_write_bytecode_byte(emit, ((label & MP_EMIT_BREAK_FROM_FOR) ? 0x80 : 0) | except_depth);
+ }
+}
+
+void mp_emit_bc_setup_with(emit_t *emit, mp_uint_t label) {
+ // TODO We can probably optimise the amount of needed stack space, since
+ // we don't actually need 4 slots during the entire with block, only in
+ // the cleanup handler in certain cases. It needs some thinking.
+ emit_bc_pre(emit, 4);
+ emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_WITH, label);
+}
+
+void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label) {
+ mp_emit_bc_pop_block(emit);
+ mp_emit_bc_load_const_tok(emit, MP_TOKEN_KW_NONE);
+ mp_emit_bc_label_assign(emit, label);
+ emit_bc_pre(emit, -4);
+ emit_write_bytecode_byte(emit, MP_BC_WITH_CLEANUP);
+}
+
+void mp_emit_bc_setup_except(emit_t *emit, mp_uint_t label) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_EXCEPT, label);
+}
+
+void mp_emit_bc_setup_finally(emit_t *emit, mp_uint_t label) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_FINALLY, label);
+}
+
+void mp_emit_bc_end_finally(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte(emit, MP_BC_END_FINALLY);
+}
+
+void mp_emit_bc_get_iter(emit_t *emit) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_GET_ITER);
+}
+
+void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_unsigned_label(emit, MP_BC_FOR_ITER, label);
+}
+
+void mp_emit_bc_for_iter_end(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+}
+
+void mp_emit_bc_pop_block(emit_t *emit) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_POP_BLOCK);
+}
+
+void mp_emit_bc_pop_except(emit_t *emit) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_POP_EXCEPT);
+}
+
+void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_UNARY_OP_MULTI + op);
+}
+
+void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op) {
+ bool invert = false;
+ if (op == MP_BINARY_OP_NOT_IN) {
+ invert = true;
+ op = MP_BINARY_OP_IN;
+ } else if (op == MP_BINARY_OP_IS_NOT) {
+ invert = true;
+ op = MP_BINARY_OP_IS;
+ }
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte(emit, MP_BC_BINARY_OP_MULTI + op);
+ if (invert) {
+ emit_bc_pre(emit, 0);
+ emit_write_bytecode_byte(emit, MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NOT);
+ }
+}
+
+void mp_emit_bc_build_tuple(emit_t *emit, mp_uint_t n_args) {
+ emit_bc_pre(emit, 1 - n_args);
+ emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_TUPLE, n_args);
+}
+
+void mp_emit_bc_build_list(emit_t *emit, mp_uint_t n_args) {
+ emit_bc_pre(emit, 1 - n_args);
+ emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_LIST, n_args);
+}
+
+void mp_emit_bc_list_append(emit_t *emit, mp_uint_t list_stack_index) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_uint(emit, MP_BC_LIST_APPEND, list_stack_index);
+}
+
+void mp_emit_bc_build_map(emit_t *emit, mp_uint_t n_args) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_MAP, n_args);
+}
+
+void mp_emit_bc_store_map(emit_t *emit) {
+ emit_bc_pre(emit, -2);
+ emit_write_bytecode_byte(emit, MP_BC_STORE_MAP);
+}
+
+void mp_emit_bc_map_add(emit_t *emit, mp_uint_t map_stack_index) {
+ emit_bc_pre(emit, -2);
+ emit_write_bytecode_byte_uint(emit, MP_BC_MAP_ADD, map_stack_index);
+}
+
+#if MICROPY_PY_BUILTINS_SET
+void mp_emit_bc_build_set(emit_t *emit, mp_uint_t n_args) {
+ emit_bc_pre(emit, 1 - n_args);
+ emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_SET, n_args);
+}
+
+void mp_emit_bc_set_add(emit_t *emit, mp_uint_t set_stack_index) {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_uint(emit, MP_BC_SET_ADD, set_stack_index);
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE
+void mp_emit_bc_build_slice(emit_t *emit, mp_uint_t n_args) {
+ emit_bc_pre(emit, 1 - n_args);
+ emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_SLICE, n_args);
+}
+#endif
+
+void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args) {
+ emit_bc_pre(emit, -1 + n_args);
+ emit_write_bytecode_byte_uint(emit, MP_BC_UNPACK_SEQUENCE, n_args);
+}
+
+void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right) {
+ emit_bc_pre(emit, -1 + n_left + n_right + 1);
+ emit_write_bytecode_byte_uint(emit, MP_BC_UNPACK_EX, n_left | (n_right << 8));
+}
+
+void mp_emit_bc_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ emit_bc_pre(emit, 1);
+ emit_write_bytecode_byte_raw_code(emit, MP_BC_MAKE_FUNCTION, scope->raw_code);
+ } else {
+ emit_bc_pre(emit, -1);
+ emit_write_bytecode_byte_raw_code(emit, MP_BC_MAKE_FUNCTION_DEFARGS, scope->raw_code);
+ }
+}
+
+void mp_emit_bc_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ emit_bc_pre(emit, -n_closed_over + 1);
+ emit_write_bytecode_byte_raw_code(emit, MP_BC_MAKE_CLOSURE, scope->raw_code);
+ emit_write_bytecode_byte(emit, n_closed_over);
+ } else {
+ assert(n_closed_over <= 255);
+ emit_bc_pre(emit, -2 - n_closed_over + 1);
+ emit_write_bytecode_byte_raw_code(emit, MP_BC_MAKE_CLOSURE_DEFARGS, scope->raw_code);
+ emit_write_bytecode_byte(emit, n_closed_over);
+ }
+}
+
+STATIC void emit_bc_call_function_method_helper(emit_t *emit, mp_int_t stack_adj, mp_uint_t bytecode_base, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ if (star_flags) {
+ emit_bc_pre(emit, stack_adj - (mp_int_t)n_positional - 2 * (mp_int_t)n_keyword - 2);
+ emit_write_bytecode_byte_uint(emit, bytecode_base + 1, (n_keyword << 8) | n_positional); // TODO make it 2 separate uints?
+ } else {
+ emit_bc_pre(emit, stack_adj - (mp_int_t)n_positional - 2 * (mp_int_t)n_keyword);
+ emit_write_bytecode_byte_uint(emit, bytecode_base, (n_keyword << 8) | n_positional); // TODO make it 2 separate uints?
+ }
+}
+
+void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ emit_bc_call_function_method_helper(emit, 0, MP_BC_CALL_FUNCTION, n_positional, n_keyword, star_flags);
+}
+
+void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ emit_bc_call_function_method_helper(emit, -1, MP_BC_CALL_METHOD, n_positional, n_keyword, star_flags);
+}
+
+void mp_emit_bc_return_value(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+ emit->last_emit_was_return_value = true;
+ emit_write_bytecode_byte(emit, MP_BC_RETURN_VALUE);
+}
+
+void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args) {
+ assert(0 <= n_args && n_args <= 2);
+ emit_bc_pre(emit, -n_args);
+ emit_write_bytecode_byte_byte(emit, MP_BC_RAISE_VARARGS, n_args);
+}
+
+void mp_emit_bc_yield_value(emit_t *emit) {
+ emit_bc_pre(emit, 0);
+ emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+ emit_write_bytecode_byte(emit, MP_BC_YIELD_VALUE);
+}
+
+void mp_emit_bc_yield_from(emit_t *emit) {
+ emit_bc_pre(emit, -1);
+ emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+ emit_write_bytecode_byte(emit, MP_BC_YIELD_FROM);
+}
+
+void mp_emit_bc_start_except_handler(emit_t *emit) {
+ mp_emit_bc_adjust_stack_size(emit, 6); // stack adjust for the 3 exception items, +3 for possible UNWIND_JUMP state
+}
+
+void mp_emit_bc_end_except_handler(emit_t *emit) {
+ mp_emit_bc_adjust_stack_size(emit, -5); // stack adjust
+}
+
+#if MICROPY_EMIT_NATIVE
+const emit_method_table_t emit_bc_method_table = {
+ mp_emit_bc_set_native_type,
+ mp_emit_bc_start_pass,
+ mp_emit_bc_end_pass,
+ mp_emit_bc_last_emit_was_return_value,
+ mp_emit_bc_adjust_stack_size,
+ mp_emit_bc_set_source_line,
+
+ {
+ mp_emit_bc_load_fast,
+ mp_emit_bc_load_deref,
+ mp_emit_bc_load_name,
+ mp_emit_bc_load_global,
+ },
+ {
+ mp_emit_bc_store_fast,
+ mp_emit_bc_store_deref,
+ mp_emit_bc_store_name,
+ mp_emit_bc_store_global,
+ },
+ {
+ mp_emit_bc_delete_fast,
+ mp_emit_bc_delete_deref,
+ mp_emit_bc_delete_name,
+ mp_emit_bc_delete_global,
+ },
+
+ mp_emit_bc_label_assign,
+ mp_emit_bc_import_name,
+ mp_emit_bc_import_from,
+ mp_emit_bc_import_star,
+ mp_emit_bc_load_const_tok,
+ mp_emit_bc_load_const_small_int,
+ mp_emit_bc_load_const_str,
+ mp_emit_bc_load_const_obj,
+ mp_emit_bc_load_null,
+ mp_emit_bc_load_attr,
+ mp_emit_bc_load_method,
+ mp_emit_bc_load_build_class,
+ mp_emit_bc_load_subscr,
+ mp_emit_bc_store_attr,
+ mp_emit_bc_store_subscr,
+ mp_emit_bc_delete_attr,
+ mp_emit_bc_delete_subscr,
+ mp_emit_bc_dup_top,
+ mp_emit_bc_dup_top_two,
+ mp_emit_bc_pop_top,
+ mp_emit_bc_rot_two,
+ mp_emit_bc_rot_three,
+ mp_emit_bc_jump,
+ mp_emit_bc_pop_jump_if,
+ mp_emit_bc_jump_if_or_pop,
+ mp_emit_bc_unwind_jump,
+ mp_emit_bc_unwind_jump,
+ mp_emit_bc_setup_with,
+ mp_emit_bc_with_cleanup,
+ mp_emit_bc_setup_except,
+ mp_emit_bc_setup_finally,
+ mp_emit_bc_end_finally,
+ mp_emit_bc_get_iter,
+ mp_emit_bc_for_iter,
+ mp_emit_bc_for_iter_end,
+ mp_emit_bc_pop_block,
+ mp_emit_bc_pop_except,
+ mp_emit_bc_unary_op,
+ mp_emit_bc_binary_op,
+ mp_emit_bc_build_tuple,
+ mp_emit_bc_build_list,
+ mp_emit_bc_list_append,
+ mp_emit_bc_build_map,
+ mp_emit_bc_store_map,
+ mp_emit_bc_map_add,
+ #if MICROPY_PY_BUILTINS_SET
+ mp_emit_bc_build_set,
+ mp_emit_bc_set_add,
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE
+ mp_emit_bc_build_slice,
+ #endif
+ mp_emit_bc_unpack_sequence,
+ mp_emit_bc_unpack_ex,
+ mp_emit_bc_make_function,
+ mp_emit_bc_make_closure,
+ mp_emit_bc_call_function,
+ mp_emit_bc_call_method,
+ mp_emit_bc_return_value,
+ mp_emit_bc_raise_varargs,
+ mp_emit_bc_yield_value,
+ mp_emit_bc_yield_from,
+
+ mp_emit_bc_start_except_handler,
+ mp_emit_bc_end_except_handler,
+};
+#else
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops = {
+ mp_emit_bc_load_fast,
+ mp_emit_bc_load_deref,
+ mp_emit_bc_load_name,
+ mp_emit_bc_load_global,
+};
+
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops = {
+ mp_emit_bc_store_fast,
+ mp_emit_bc_store_deref,
+ mp_emit_bc_store_name,
+ mp_emit_bc_store_global,
+};
+
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops = {
+ mp_emit_bc_delete_fast,
+ mp_emit_bc_delete_deref,
+ mp_emit_bc_delete_name,
+ mp_emit_bc_delete_global,
+};
+#endif
+
+#endif //MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emitcommon.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,83 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "py/emit.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst) {
+ // name adding/lookup
+ bool added;
+ id_info_t *id = scope_find_or_add_id(scope, qst, &added);
+ if (added) {
+ id_info_t *id2 = scope_find_local_in_parent(scope, qst);
+ if (id2 != NULL && (id2->kind == ID_INFO_KIND_LOCAL || id2->kind == ID_INFO_KIND_CELL || id2->kind == ID_INFO_KIND_FREE)) {
+ id->kind = ID_INFO_KIND_FREE;
+ scope_close_over_in_parents(scope, qst);
+ } else {
+ id->kind = ID_INFO_KIND_GLOBAL_IMPLICIT;
+ }
+ }
+}
+
+void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst) {
+ // name adding/lookup
+ bool added;
+ id_info_t *id = scope_find_or_add_id(scope, qst, &added);
+ if (added) {
+ if (scope->kind == SCOPE_MODULE || scope->kind == SCOPE_CLASS) {
+ id->kind = ID_INFO_KIND_GLOBAL_IMPLICIT;
+ } else {
+ id->kind = ID_INFO_KIND_LOCAL;
+ }
+ } else if (scope->kind >= SCOPE_FUNCTION && scope->kind <= SCOPE_GEN_EXPR && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ // rebind as a local variable
+ id->kind = ID_INFO_KIND_LOCAL;
+ }
+}
+
+void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst) {
+ // assumes pass is greater than 1, ie that all identifiers are defined in the scope
+
+ id_info_t *id = scope_find(scope, qst);
+ assert(id != NULL);
+
+ // call the emit backend with the correct code
+ if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ emit_method_table->name(emit, qst);
+ } else if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) {
+ emit_method_table->global(emit, qst);
+ } else if (id->kind == ID_INFO_KIND_LOCAL) {
+ emit_method_table->fast(emit, qst, id->local_num);
+ } else {
+ assert(id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE);
+ emit_method_table->deref(emit, qst, id->local_num);
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emitglue.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,663 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// This code glues the code emitters to the runtime.
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/emitglue.h"
+#include "py/runtime0.h"
+#include "py/bc.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define WRITE_CODE (1)
+#define DEBUG_printf DEBUG_printf
+#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#define DEBUG_OP_printf(...) (void)0
+#endif
+
+#if MICROPY_DEBUG_PRINTERS
+mp_uint_t mp_verbose_flag = 0;
+#endif
+
+struct _mp_raw_code_t {
+ mp_raw_code_kind_t kind : 3;
+ mp_uint_t scope_flags : 7;
+ mp_uint_t n_pos_args : 11;
+ union {
+ struct {
+ const byte *bytecode;
+ const mp_uint_t *const_table;
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ mp_uint_t bc_len;
+ uint16_t n_obj;
+ uint16_t n_raw_code;
+ #endif
+ } u_byte;
+ struct {
+ void *fun_data;
+ const mp_uint_t *const_table;
+ mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc
+ } u_native;
+ } data;
+};
+
+mp_raw_code_t *mp_emit_glue_new_raw_code(void) {
+ mp_raw_code_t *rc = m_new0(mp_raw_code_t, 1);
+ rc->kind = MP_CODE_RESERVED;
+ return rc;
+}
+
+void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, mp_uint_t len,
+ const mp_uint_t *const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t n_obj, uint16_t n_raw_code,
+ #endif
+ mp_uint_t scope_flags) {
+
+ rc->kind = MP_CODE_BYTECODE;
+ rc->scope_flags = scope_flags;
+ rc->data.u_byte.bytecode = code;
+ rc->data.u_byte.const_table = const_table;
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ rc->data.u_byte.bc_len = len;
+ rc->data.u_byte.n_obj = n_obj;
+ rc->data.u_byte.n_raw_code = n_raw_code;
+ #endif
+
+#ifdef DEBUG_PRINT
+ DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " flags=%x\n", code, len, (uint)scope_flags);
+#endif
+#if MICROPY_DEBUG_PRINTERS
+ if (mp_verbose_flag >= 2) {
+ mp_bytecode_print(rc, code, len, const_table);
+ }
+#endif
+}
+
+#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) {
+ assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM);
+ rc->kind = kind;
+ rc->scope_flags = scope_flags;
+ rc->n_pos_args = n_pos_args;
+ rc->data.u_native.fun_data = fun_data;
+ rc->data.u_native.const_table = const_table;
+ rc->data.u_native.type_sig = type_sig;
+
+#ifdef DEBUG_PRINT
+ DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " n_pos_args=" UINT_FMT " flags=%x\n", kind, fun_data, fun_len, n_pos_args, (uint)scope_flags);
+ for (mp_uint_t i = 0; i < fun_len; i++) {
+ if (i > 0 && i % 16 == 0) {
+ DEBUG_printf("\n");
+ }
+ DEBUG_printf(" %02x", ((byte*)fun_data)[i]);
+ }
+ DEBUG_printf("\n");
+
+#ifdef WRITE_CODE
+ FILE *fp_write_code = fopen("out-code", "wb");
+ fwrite(fun_data, fun_len, 1, fp_write_code);
+ fclose(fp_write_code);
+#endif
+#else
+ (void)fun_len;
+#endif
+}
+#endif
+
+mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) {
+ DEBUG_OP_printf("make_function_from_raw_code %p\n", rc);
+ assert(rc != NULL);
+
+ // def_args must be MP_OBJ_NULL or a tuple
+ assert(def_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_args, &mp_type_tuple));
+
+ // def_kw_args must be MP_OBJ_NULL or a dict
+ assert(def_kw_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_kw_args, &mp_type_dict));
+
+ // make the function, depending on the raw code kind
+ mp_obj_t fun;
+ switch (rc->kind) {
+ case MP_CODE_BYTECODE:
+ no_other_choice:
+ fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->data.u_byte.bytecode, rc->data.u_byte.const_table);
+ break;
+ #if MICROPY_EMIT_NATIVE
+ case MP_CODE_NATIVE_PY:
+ fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->data.u_native.fun_data, rc->data.u_native.const_table);
+ break;
+ case MP_CODE_NATIVE_VIPER:
+ fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig);
+ break;
+ #endif
+ #if MICROPY_EMIT_INLINE_THUMB
+ case MP_CODE_NATIVE_ASM:
+ fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig);
+ break;
+ #endif
+ default:
+ // raw code was never set (this should not happen)
+ assert(0);
+ goto no_other_choice; // to help flow control analysis
+ }
+
+ // check for generator functions and if so wrap in generator object
+ if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) {
+ fun = mp_obj_new_gen_wrap(fun);
+ }
+
+ return fun;
+}
+
+mp_obj_t mp_make_closure_from_raw_code(mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args) {
+ DEBUG_OP_printf("make_closure_from_raw_code %p " UINT_FMT " %p\n", rc, n_closed_over, args);
+ // make function object
+ mp_obj_t ffun;
+ if (n_closed_over & 0x100) {
+ // default positional and keyword args given
+ ffun = mp_make_function_from_raw_code(rc, args[0], args[1]);
+ } else {
+ // default positional and keyword args not given
+ ffun = mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL);
+ }
+ // wrap function in closure object
+ return mp_obj_new_closure(ffun, n_closed_over & 0xff, args + ((n_closed_over >> 7) & 2));
+}
+
+#if MICROPY_PERSISTENT_CODE
+
+#include "py/smallint.h"
+
+// The feature flags byte encodes the compile-time config options that
+// affect the generate bytecode.
+#define MPY_FEATURE_FLAGS ( \
+ ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) << 0) \
+ | ((MICROPY_PY_BUILTINS_STR_UNICODE) << 1) \
+ )
+// This is a version of the flags that can be configured at runtime.
+#define MPY_FEATURE_FLAGS_DYNAMIC ( \
+ ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) << 0) \
+ | ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \
+ )
+
+#if MICROPY_PERSISTENT_CODE_LOAD || (MICROPY_PERSISTENT_CODE_SAVE && !MICROPY_DYNAMIC_COMPILER)
+// The bytecode will depend on the number of bits in a small-int, and
+// this function computes that (could make it a fixed constant, but it
+// would need to be defined in mpconfigport.h).
+STATIC int mp_small_int_bits(void) {
+ mp_int_t i = MP_SMALL_INT_MAX;
+ int n = 1;
+ while (i != 0) {
+ i >>= 1;
+ ++n;
+ }
+ return n;
+}
+#endif
+
+typedef struct _bytecode_prelude_t {
+ uint n_state;
+ uint n_exc_stack;
+ uint scope_flags;
+ uint n_pos_args;
+ uint n_kwonly_args;
+ uint n_def_pos_args;
+ uint code_info_size;
+} bytecode_prelude_t;
+
+// ip will point to start of opcodes
+// ip2 will point to simple_name, source_file qstrs
+STATIC void extract_prelude(const byte **ip, const byte **ip2, bytecode_prelude_t *prelude) {
+ prelude->n_state = mp_decode_uint(ip);
+ prelude->n_exc_stack = mp_decode_uint(ip);
+ prelude->scope_flags = *(*ip)++;
+ prelude->n_pos_args = *(*ip)++;
+ prelude->n_kwonly_args = *(*ip)++;
+ prelude->n_def_pos_args = *(*ip)++;
+ *ip2 = *ip;
+ prelude->code_info_size = mp_decode_uint(ip2);
+ *ip += prelude->code_info_size;
+ while (*(*ip)++ != 255) {
+ }
+}
+
+#endif // MICROPY_PERSISTENT_CODE
+
+#if MICROPY_PERSISTENT_CODE_LOAD
+
+#include "py/parsenum.h"
+#include "py/bc0.h"
+
+STATIC int read_byte(mp_reader_t *reader) {
+ return reader->read_byte(reader->data);
+}
+
+STATIC void read_bytes(mp_reader_t *reader, byte *buf, size_t len) {
+ while (len-- > 0) {
+ *buf++ = reader->read_byte(reader->data);
+ }
+}
+
+STATIC mp_uint_t read_uint(mp_reader_t *reader) {
+ mp_uint_t unum = 0;
+ for (;;) {
+ byte b = reader->read_byte(reader->data);
+ unum = (unum << 7) | (b & 0x7f);
+ if ((b & 0x80) == 0) {
+ break;
+ }
+ }
+ return unum;
+}
+
+STATIC qstr load_qstr(mp_reader_t *reader) {
+ mp_uint_t len = read_uint(reader);
+ char *str = m_new(char, len);
+ read_bytes(reader, (byte*)str, len);
+ qstr qst = qstr_from_strn(str, len);
+ m_del(char, str, len);
+ return qst;
+}
+
+STATIC mp_obj_t load_obj(mp_reader_t *reader) {
+ byte obj_type = read_byte(reader);
+ if (obj_type == 'e') {
+ return MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj);
+ } else {
+ size_t len = read_uint(reader);
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ read_bytes(reader, (byte*)vstr.buf, len);
+ if (obj_type == 's' || obj_type == 'b') {
+ return mp_obj_new_str_from_vstr(obj_type == 's' ? &mp_type_str : &mp_type_bytes, &vstr);
+ } else if (obj_type == 'i') {
+ return mp_parse_num_integer(vstr.buf, vstr.len, 10, NULL);
+ } else {
+ assert(obj_type == 'f' || obj_type == 'c');
+ return mp_parse_num_decimal(vstr.buf, vstr.len, obj_type == 'c', false, NULL);
+ }
+ }
+}
+
+STATIC void load_bytecode_qstrs(mp_reader_t *reader, byte *ip, byte *ip_top) {
+ while (ip < ip_top) {
+ size_t sz;
+ uint f = mp_opcode_format(ip, &sz);
+ if (f == MP_OPCODE_QSTR) {
+ qstr qst = load_qstr(reader);
+ ip[1] = qst;
+ ip[2] = qst >> 8;
+ }
+ ip += sz;
+ }
+}
+
+STATIC mp_raw_code_t *load_raw_code(mp_reader_t *reader) {
+ // load bytecode
+ mp_uint_t bc_len = read_uint(reader);
+ byte *bytecode = m_new(byte, bc_len);
+ read_bytes(reader, bytecode, bc_len);
+
+ // extract prelude
+ const byte *ip = bytecode;
+ const byte *ip2;
+ bytecode_prelude_t prelude;
+ extract_prelude(&ip, &ip2, &prelude);
+
+ // load qstrs and link global qstr ids into bytecode
+ qstr simple_name = load_qstr(reader);
+ qstr source_file = load_qstr(reader);
+ ((byte*)ip2)[0] = simple_name; ((byte*)ip2)[1] = simple_name >> 8;
+ ((byte*)ip2)[2] = source_file; ((byte*)ip2)[3] = source_file >> 8;
+ load_bytecode_qstrs(reader, (byte*)ip, bytecode + bc_len);
+
+ // load constant table
+ mp_uint_t n_obj = read_uint(reader);
+ mp_uint_t n_raw_code = read_uint(reader);
+ mp_uint_t *const_table = m_new(mp_uint_t, prelude.n_pos_args + prelude.n_kwonly_args + n_obj + n_raw_code);
+ mp_uint_t *ct = const_table;
+ for (mp_uint_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) {
+ *ct++ = (mp_uint_t)MP_OBJ_NEW_QSTR(load_qstr(reader));
+ }
+ for (mp_uint_t i = 0; i < n_obj; ++i) {
+ *ct++ = (mp_uint_t)load_obj(reader);
+ }
+ for (mp_uint_t i = 0; i < n_raw_code; ++i) {
+ *ct++ = (mp_uint_t)(uintptr_t)load_raw_code(reader);
+ }
+
+ // create raw_code and return it
+ mp_raw_code_t *rc = mp_emit_glue_new_raw_code();
+ mp_emit_glue_assign_bytecode(rc, bytecode, bc_len, const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ n_obj, n_raw_code,
+ #endif
+ prelude.scope_flags);
+ return rc;
+}
+
+mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader) {
+ byte header[4];
+ read_bytes(reader, header, sizeof(header));
+ if (strncmp((char*)header, "M\x00", 2) != 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "invalid .mpy file"));
+ }
+ if (header[2] != MPY_FEATURE_FLAGS || header[3] > mp_small_int_bits()) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "incompatible .mpy file"));
+ }
+ return load_raw_code(reader);
+}
+
+typedef struct _mp_mem_reader_t {
+ const byte *cur;
+ const byte *end;
+} mp_mem_reader_t;
+
+STATIC mp_uint_t mp_mem_reader_next_byte(void *br_in) {
+ mp_mem_reader_t *br = br_in;
+ if (br->cur < br->end) {
+ return *br->cur++;
+ } else {
+ return (mp_uint_t)-1;
+ }
+}
+
+mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len) {
+ mp_mem_reader_t mr = {buf, buf + len};
+ mp_reader_t reader = {&mr, mp_mem_reader_next_byte};
+ return mp_raw_code_load(&reader);
+}
+
+// here we define mp_raw_code_load_file depending on the port
+// TODO abstract this away properly
+
+#if defined(__i386__) || defined(__x86_64__) || (defined(__arm__) && (defined(__unix__)))
+// unix file reader
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+typedef struct _mp_lexer_file_buf_t {
+ int fd;
+ byte buf[20];
+ mp_uint_t len;
+ mp_uint_t pos;
+} mp_lexer_file_buf_t;
+
+STATIC mp_uint_t file_buf_next_byte(void *fb_in) {
+ mp_lexer_file_buf_t *fb = fb_in;
+ if (fb->pos >= fb->len) {
+ if (fb->len == 0) {
+ return (mp_uint_t)-1;
+ } else {
+ int n = read(fb->fd, fb->buf, sizeof(fb->buf));
+ if (n <= 0) {
+ fb->len = 0;
+ return (mp_uint_t)-1;
+ }
+ fb->len = n;
+ fb->pos = 0;
+ }
+ }
+ return fb->buf[fb->pos++];
+}
+
+mp_raw_code_t *mp_raw_code_load_file(const char *filename) {
+ mp_lexer_file_buf_t fb;
+ fb.fd = open(filename, O_RDONLY, 0644);
+ int n = read(fb.fd, fb.buf, sizeof(fb.buf));
+ fb.len = n;
+ fb.pos = 0;
+ mp_reader_t reader;
+ reader.data = &fb;
+ reader.read_byte = file_buf_next_byte;
+ mp_raw_code_t *rc = mp_raw_code_load(&reader);
+ close(fb.fd);
+ return rc;
+}
+
+#elif defined(__thumb2__)
+// fatfs file reader (assume thumb2 arch uses fatfs...)
+
+#include "lib/fatfs/ff.h"
+
+typedef struct _mp_lexer_file_buf_t {
+ FIL fp;
+ byte buf[20];
+ uint16_t len;
+ uint16_t pos;
+} mp_lexer_file_buf_t;
+
+STATIC mp_uint_t file_buf_next_byte(void *fb_in) {
+ mp_lexer_file_buf_t *fb = fb_in;
+ if (fb->pos >= fb->len) {
+ if (fb->len < sizeof(fb->buf)) {
+ return (mp_uint_t)-1;
+ } else {
+ UINT n;
+ f_read(&fb->fp, fb->buf, sizeof(fb->buf), &n);
+ if (n == 0) {
+ return (mp_uint_t)-1;
+ }
+ fb->len = n;
+ fb->pos = 0;
+ }
+ }
+ return fb->buf[fb->pos++];
+}
+
+mp_raw_code_t *mp_raw_code_load_file(const char *filename) {
+ mp_lexer_file_buf_t fb;
+ /*FRESULT res =*/ f_open(&fb.fp, filename, FA_READ);
+ UINT n;
+ f_read(&fb.fp, fb.buf, sizeof(fb.buf), &n);
+ fb.len = n;
+ fb.pos = 0;
+
+ mp_reader_t reader;
+ reader.data = &fb;
+ reader.read_byte = file_buf_next_byte;
+ mp_raw_code_t *rc = mp_raw_code_load(&reader);
+
+ f_close(&fb.fp);
+
+ return rc;
+}
+
+#endif
+
+#endif // MICROPY_PERSISTENT_CODE_LOAD
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+
+#include "py/objstr.h"
+
+STATIC void mp_print_bytes(mp_print_t *print, const byte *data, size_t len) {
+ print->print_strn(print->data, (const char*)data, len);
+}
+
+#define BYTES_FOR_INT ((BYTES_PER_WORD * 8 + 6) / 7)
+STATIC void mp_print_uint(mp_print_t *print, mp_uint_t n) {
+ byte buf[BYTES_FOR_INT];
+ byte *p = buf + sizeof(buf);
+ *--p = n & 0x7f;
+ n >>= 7;
+ for (; n != 0; n >>= 7) {
+ *--p = 0x80 | (n & 0x7f);
+ }
+ print->print_strn(print->data, (char*)p, buf + sizeof(buf) - p);
+}
+
+STATIC void save_qstr(mp_print_t *print, qstr qst) {
+ size_t len;
+ const byte *str = qstr_data(qst, &len);
+ mp_print_uint(print, len);
+ mp_print_bytes(print, str, len);
+}
+
+STATIC void save_obj(mp_print_t *print, mp_obj_t o) {
+ if (MP_OBJ_IS_STR_OR_BYTES(o)) {
+ byte obj_type;
+ if (MP_OBJ_IS_STR(o)) {
+ obj_type = 's';
+ } else {
+ obj_type = 'b';
+ }
+ mp_uint_t len;
+ const char *str = mp_obj_str_get_data(o, &len);
+ mp_print_bytes(print, &obj_type, 1);
+ mp_print_uint(print, len);
+ mp_print_bytes(print, (const byte*)str, len);
+ } else if (MP_OBJ_TO_PTR(o) == &mp_const_ellipsis_obj) {
+ byte obj_type = 'e';
+ mp_print_bytes(print, &obj_type, 1);
+ } else {
+ // we save numbers using a simplistic text representation
+ // TODO could be improved
+ byte obj_type;
+ if (MP_OBJ_IS_TYPE(o, &mp_type_int)) {
+ obj_type = 'i';
+ } else if (mp_obj_is_float(o)) {
+ obj_type = 'f';
+ } else {
+ assert(MP_OBJ_IS_TYPE(o, &mp_type_complex));
+ obj_type = 'c';
+ }
+ vstr_t vstr;
+ mp_print_t pr;
+ vstr_init_print(&vstr, 10, &pr);
+ mp_obj_print_helper(&pr, o, PRINT_REPR);
+ mp_print_bytes(print, &obj_type, 1);
+ mp_print_uint(print, vstr.len);
+ mp_print_bytes(print, (const byte*)vstr.buf, vstr.len);
+ vstr_clear(&vstr);
+ }
+}
+
+STATIC void save_bytecode_qstrs(mp_print_t *print, const byte *ip, const byte *ip_top) {
+ while (ip < ip_top) {
+ size_t sz;
+ uint f = mp_opcode_format(ip, &sz);
+ if (f == MP_OPCODE_QSTR) {
+ qstr qst = ip[1] | (ip[2] << 8);
+ save_qstr(print, qst);
+ }
+ ip += sz;
+ }
+}
+
+STATIC void save_raw_code(mp_print_t *print, mp_raw_code_t *rc) {
+ if (rc->kind != MP_CODE_BYTECODE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "can only save bytecode"));
+ }
+
+ // save bytecode
+ mp_print_uint(print, rc->data.u_byte.bc_len);
+ mp_print_bytes(print, rc->data.u_byte.bytecode, rc->data.u_byte.bc_len);
+
+ // extract prelude
+ const byte *ip = rc->data.u_byte.bytecode;
+ const byte *ip2;
+ bytecode_prelude_t prelude;
+ extract_prelude(&ip, &ip2, &prelude);
+
+ // save qstrs
+ save_qstr(print, ip2[0] | (ip2[1] << 8)); // simple_name
+ save_qstr(print, ip2[2] | (ip2[3] << 8)); // source_file
+ save_bytecode_qstrs(print, ip, rc->data.u_byte.bytecode + rc->data.u_byte.bc_len);
+
+ // save constant table
+ mp_print_uint(print, rc->data.u_byte.n_obj);
+ mp_print_uint(print, rc->data.u_byte.n_raw_code);
+ const mp_uint_t *const_table = rc->data.u_byte.const_table;
+ for (uint i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) {
+ mp_obj_t o = (mp_obj_t)*const_table++;
+ save_qstr(print, MP_OBJ_QSTR_VALUE(o));
+ }
+ for (uint i = 0; i < rc->data.u_byte.n_obj; ++i) {
+ save_obj(print, (mp_obj_t)*const_table++);
+ }
+ for (uint i = 0; i < rc->data.u_byte.n_raw_code; ++i) {
+ save_raw_code(print, (mp_raw_code_t*)(uintptr_t)*const_table++);
+ }
+}
+
+void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print) {
+ // header contains:
+ // byte 'M'
+ // byte version
+ // byte feature flags
+ // byte number of bits in a small int
+ byte header[4] = {'M', 0, MPY_FEATURE_FLAGS_DYNAMIC,
+ #if MICROPY_DYNAMIC_COMPILER
+ mp_dynamic_compiler.small_int_bits,
+ #else
+ mp_small_int_bits(),
+ #endif
+ };
+ mp_print_bytes(print, header, sizeof(header));
+
+ save_raw_code(print, rc);
+}
+
+// here we define mp_raw_code_save_file depending on the port
+// TODO abstract this away properly
+
+#if defined(__i386__) || defined(__x86_64__) || (defined(__arm__) && (defined(__unix__)))
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+STATIC void fd_print_strn(void *env, const char *str, size_t len) {
+ int fd = (intptr_t)env;
+ ssize_t ret = write(fd, str, len);
+ (void)ret;
+}
+
+void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename) {
+ int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ mp_print_t fd_print = {(void*)(intptr_t)fd, fd_print_strn};
+ mp_raw_code_save(rc, &fd_print);
+ close(fd);
+}
+
+#else
+#error mp_raw_code_save_file not implemented for this platform
+#endif
+
+#endif // MICROPY_PERSISTENT_CODE_SAVE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emitglue.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,74 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_EMITGLUE_H__
+#define __MICROPY_INCLUDED_PY_EMITGLUE_H__
+
+#include "py/obj.h"
+
+// These variables and functions glue the code emitters to the runtime.
+
+typedef enum {
+ MP_CODE_UNUSED,
+ MP_CODE_RESERVED,
+ MP_CODE_BYTECODE,
+ MP_CODE_NATIVE_PY,
+ MP_CODE_NATIVE_VIPER,
+ MP_CODE_NATIVE_ASM,
+} mp_raw_code_kind_t;
+
+typedef struct _mp_raw_code_t mp_raw_code_t;
+
+mp_raw_code_t *mp_emit_glue_new_raw_code(void);
+
+void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, mp_uint_t len,
+ const mp_uint_t *const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t n_obj, uint16_t n_raw_code,
+ #endif
+ mp_uint_t scope_flags);
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig);
+
+mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args);
+mp_obj_t mp_make_closure_from_raw_code(mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args);
+
+#if MICROPY_PERSISTENT_CODE_LOAD
+typedef struct _mp_reader_t {
+ void *data;
+ mp_uint_t (*read_byte)(void *data);
+ void (*close)(void *data);
+} mp_reader_t;
+
+mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader);
+mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len);
+mp_raw_code_t *mp_raw_code_load_file(const char *filename);
+#endif
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print);
+void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename);
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_EMITGLUE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emitinlinethumb.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,830 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/asmthumb.h"
+
+#if MICROPY_EMIT_INLINE_THUMB
+
+typedef enum {
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#include "py/grammar.h"
+#undef DEF_RULE
+ PN_maximum_number_of,
+} pn_kind_t;
+
+struct _emit_inline_asm_t {
+ uint16_t pass;
+ scope_t *scope;
+ mp_obj_t *error_slot;
+ mp_uint_t max_num_labels;
+ qstr *label_lookup;
+ asm_thumb_t *as;
+};
+
+STATIC void emit_inline_thumb_error_msg(emit_inline_asm_t *emit, const char *msg) {
+ *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+}
+
+STATIC void emit_inline_thumb_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) {
+ *emit->error_slot = exc;
+}
+
+emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels) {
+ emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t);
+ emit->max_num_labels = max_num_labels;
+ emit->label_lookup = m_new(qstr, max_num_labels);
+ emit->as = asm_thumb_new(max_num_labels);
+ return emit;
+}
+
+void emit_inline_thumb_free(emit_inline_asm_t *emit) {
+ m_del(qstr, emit->label_lookup, emit->max_num_labels);
+ asm_thumb_free(emit->as, false);
+ m_del_obj(emit_inline_asm_t, emit);
+}
+
+STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot) {
+ emit->pass = pass;
+ emit->scope = scope;
+ emit->error_slot = error_slot;
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr));
+ }
+ asm_thumb_start_pass(emit->as, pass == MP_PASS_EMIT ? ASM_THUMB_PASS_EMIT : ASM_THUMB_PASS_COMPUTE);
+ asm_thumb_entry(emit->as, 0);
+}
+
+STATIC void emit_inline_thumb_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) {
+ asm_thumb_exit(emit->as);
+ asm_thumb_end_pass(emit->as);
+
+ if (emit->pass == MP_PASS_EMIT) {
+ void *f = asm_thumb_get_code(emit->as);
+ mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f,
+ asm_thumb_get_code_size(emit->as), NULL, emit->scope->num_pos_args, 0, type_sig);
+ }
+}
+
+STATIC mp_uint_t emit_inline_thumb_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
+ if (n_params > 4) {
+ emit_inline_thumb_error_msg(emit, "can only have up to 4 parameters to Thumb assembly");
+ return 0;
+ }
+ for (mp_uint_t i = 0; i < n_params; i++) {
+ if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
+ emit_inline_thumb_error_msg(emit, "parameters must be registers in sequence r0 to r3");
+ return 0;
+ }
+ const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
+ if (!(strlen(p) == 2 && p[0] == 'r' && p[1] == '0' + i)) {
+ emit_inline_thumb_error_msg(emit, "parameters must be registers in sequence r0 to r3");
+ return 0;
+ }
+ }
+ return n_params;
+}
+
+STATIC bool emit_inline_thumb_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) {
+ assert(label_num < emit->max_num_labels);
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ // check for duplicate label on first pass
+ for (uint i = 0; i < emit->max_num_labels; i++) {
+ if (emit->label_lookup[i] == label_id) {
+ return false;
+ }
+ }
+ }
+ emit->label_lookup[label_num] = label_id;
+ asm_thumb_label_assign(emit->as, label_num);
+ return true;
+}
+
+STATIC void emit_inline_thumb_align(emit_inline_asm_t *emit, mp_uint_t align) {
+ asm_thumb_align(emit->as, align);
+}
+
+STATIC void emit_inline_thumb_data(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val) {
+ asm_thumb_data(emit->as, bytesize, val);
+}
+
+typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t;
+STATIC const reg_name_t reg_name_table[] = {
+ {0, "r0\0"},
+ {1, "r1\0"},
+ {2, "r2\0"},
+ {3, "r3\0"},
+ {4, "r4\0"},
+ {5, "r5\0"},
+ {6, "r6\0"},
+ {7, "r7\0"},
+ {8, "r8\0"},
+ {9, "r9\0"},
+ {10, "r10"},
+ {11, "r11"},
+ {12, "r12"},
+ {13, "r13"},
+ {14, "r14"},
+ {15, "r15"},
+ {10, "sl\0"},
+ {11, "fp\0"},
+ {13, "sp\0"},
+ {14, "lr\0"},
+ {15, "pc\0"},
+};
+
+#define MAX_SPECIAL_REGISTER_NAME_LENGTH 7
+typedef struct _special_reg_name_t { byte reg; char name[MAX_SPECIAL_REGISTER_NAME_LENGTH + 1]; } special_reg_name_t;
+STATIC const special_reg_name_t special_reg_name_table[] = {
+ {5, "IPSR"},
+ {17, "BASEPRI"},
+};
+
+// return empty string in case of error, so we can attempt to parse the string
+// without a special check if it was in fact a string
+STATIC const char *get_arg_str(mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ qstr qst = MP_PARSE_NODE_LEAF_ARG(pn);
+ return qstr_str(qst);
+ } else {
+ return "";
+ }
+}
+
+STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, mp_uint_t max_reg) {
+ const char *reg_str = get_arg_str(pn);
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) {
+ const reg_name_t *r = ®_name_table[i];
+ if (reg_str[0] == r->name[0]
+ && reg_str[1] == r->name[1]
+ && reg_str[2] == r->name[2]
+ && (reg_str[2] == '\0' || reg_str[3] == '\0')) {
+ if (r->reg > max_reg) {
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ "'%s' expects at most r%d", op, max_reg));
+ return 0;
+ } else {
+ return r->reg;
+ }
+ }
+ }
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ "'%s' expects a register", op));
+ return 0;
+}
+
+STATIC mp_uint_t get_arg_special_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ const char *reg_str = get_arg_str(pn);
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(special_reg_name_table); i++) {
+ const special_reg_name_t *r = &special_reg_name_table[i];
+ if (strcmp(r->name, reg_str) == 0) {
+ return r->reg;
+ }
+ }
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ "'%s' expects a special register", op));
+ return 0;
+}
+
+#if MICROPY_EMIT_INLINE_THUMB_FLOAT
+STATIC mp_uint_t get_arg_vfpreg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ const char *reg_str = get_arg_str(pn);
+ if (reg_str[0] == 's' && reg_str[1] != '\0') {
+ mp_uint_t regno = 0;
+ for (++reg_str; *reg_str; ++reg_str) {
+ mp_uint_t v = *reg_str;
+ if (!('0' <= v && v <= '9')) {
+ goto malformed;
+ }
+ regno = 10 * regno + v - '0';
+ }
+ if (regno > 31) {
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ "'%s' expects at most r%d", op, 31));
+ return 0;
+ } else {
+ return regno;
+ }
+ }
+malformed:
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ "'%s' expects an FPU register", op));
+ return 0;
+}
+#endif
+
+STATIC mp_uint_t get_arg_reglist(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ // a register list looks like {r0, r1, r2} and is parsed as a Python set
+
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_brace)) {
+ goto bad_arg;
+ }
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 1); // should always be
+ pn = pns->nodes[0];
+
+ mp_uint_t reglist = 0;
+
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ // set with one element
+ reglist |= 1 << get_arg_reg(emit, op, pn, 15);
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
+ // set with multiple elements
+
+ // get first element of set (we rely on get_arg_reg to catch syntax errors)
+ reglist |= 1 << get_arg_reg(emit, op, pns->nodes[0], 15);
+
+ // get tail elements (2nd, 3rd, ...)
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns1->nodes[0], PN_dictorsetmaker_list2, &nodes);
+
+ // process rest of elements
+ for (int i = 0; i < n; i++) {
+ reglist |= 1 << get_arg_reg(emit, op, nodes[i], 15);
+ }
+ } else {
+ goto bad_arg;
+ }
+ } else {
+ goto bad_arg;
+ }
+ } else {
+ goto bad_arg;
+ }
+
+ return reglist;
+
+bad_arg:
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects {r0, r1, ...}", op));
+ return 0;
+}
+
+STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, uint32_t fit_mask) {
+ mp_obj_t o;
+ if (!mp_parse_node_get_int_maybe(pn, &o)) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an integer", op));
+ return 0;
+ }
+ uint32_t i = mp_obj_get_int_truncated(o);
+ if ((i & (~fit_mask)) != 0) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer 0x%x does not fit in mask 0x%x", op, i, fit_mask));
+ return 0;
+ }
+ return i;
+}
+
+STATIC bool get_arg_addr(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, mp_parse_node_t *pn_base, mp_parse_node_t *pn_offset) {
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_bracket)) {
+ goto bad_arg;
+ }
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+ goto bad_arg;
+ }
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ if (MP_PARSE_NODE_STRUCT_NUM_NODES(pns) != 2) {
+ goto bad_arg;
+ }
+
+ *pn_base = pns->nodes[0];
+ *pn_offset = pns->nodes[1];
+ return true;
+
+bad_arg:
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an address of the form [a, b]", op));
+ return false;
+}
+
+STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ if (!MP_PARSE_NODE_IS_ID(pn)) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects a label", op));
+ return 0;
+ }
+ qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
+ for (uint i = 0; i < emit->max_num_labels; i++) {
+ if (emit->label_lookup[i] == label_qstr) {
+ return i;
+ }
+ }
+ // only need to have the labels on the last pass
+ if (emit->pass == MP_PASS_EMIT) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "label '%q' not defined", label_qstr));
+ }
+ return 0;
+}
+
+typedef struct _cc_name_t { byte cc; byte name[2]; } cc_name_t;
+STATIC const cc_name_t cc_name_table[] = {
+ { ASM_THUMB_CC_EQ, "eq" },
+ { ASM_THUMB_CC_NE, "ne" },
+ { ASM_THUMB_CC_CS, "cs" },
+ { ASM_THUMB_CC_CC, "cc" },
+ { ASM_THUMB_CC_MI, "mi" },
+ { ASM_THUMB_CC_PL, "pl" },
+ { ASM_THUMB_CC_VS, "vs" },
+ { ASM_THUMB_CC_VC, "vc" },
+ { ASM_THUMB_CC_HI, "hi" },
+ { ASM_THUMB_CC_LS, "ls" },
+ { ASM_THUMB_CC_GE, "ge" },
+ { ASM_THUMB_CC_LT, "lt" },
+ { ASM_THUMB_CC_GT, "gt" },
+ { ASM_THUMB_CC_LE, "le" },
+};
+
+typedef struct _format_4_op_t { byte op; char name[3]; } format_4_op_t;
+#define X(x) (((x) >> 4) & 0xff) // only need 1 byte to distinguish these ops
+STATIC const format_4_op_t format_4_op_table[] = {
+ { X(ASM_THUMB_FORMAT_4_EOR), "eor" },
+ { X(ASM_THUMB_FORMAT_4_LSL), "lsl" },
+ { X(ASM_THUMB_FORMAT_4_LSR), "lsr" },
+ { X(ASM_THUMB_FORMAT_4_ASR), "asr" },
+ { X(ASM_THUMB_FORMAT_4_ADC), "adc" },
+ { X(ASM_THUMB_FORMAT_4_SBC), "sbc" },
+ { X(ASM_THUMB_FORMAT_4_ROR), "ror" },
+ { X(ASM_THUMB_FORMAT_4_TST), "tst" },
+ { X(ASM_THUMB_FORMAT_4_NEG), "neg" },
+ { X(ASM_THUMB_FORMAT_4_CMP), "cmp" },
+ { X(ASM_THUMB_FORMAT_4_CMN), "cmn" },
+ { X(ASM_THUMB_FORMAT_4_ORR), "orr" },
+ { X(ASM_THUMB_FORMAT_4_MUL), "mul" },
+ { X(ASM_THUMB_FORMAT_4_BIC), "bic" },
+ { X(ASM_THUMB_FORMAT_4_MVN), "mvn" },
+};
+#undef X
+
+// name is actually a qstr, which should fit in 16 bits
+typedef struct _format_9_10_op_t { uint16_t op; uint16_t name; } format_9_10_op_t;
+#define X(x) (x)
+STATIC const format_9_10_op_t format_9_10_op_table[] = {
+ { X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_ldr },
+ { X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER), MP_QSTR_ldrb },
+ { X(ASM_THUMB_FORMAT_10_LDRH), MP_QSTR_ldrh },
+ { X(ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_str },
+ { X(ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER), MP_QSTR_strb },
+ { X(ASM_THUMB_FORMAT_10_STRH), MP_QSTR_strh },
+};
+#undef X
+
+#if MICROPY_EMIT_INLINE_THUMB_FLOAT
+// actual opcodes are: 0xee00 | op.hi_nibble, 0x0a00 | op.lo_nibble
+typedef struct _format_vfp_op_t { byte op; char name[3]; } format_vfp_op_t;
+STATIC const format_vfp_op_t format_vfp_op_table[] = {
+ { 0x30, "add" },
+ { 0x34, "sub" },
+ { 0x20, "mul" },
+ { 0x80, "div" },
+};
+#endif
+
+// shorthand alias for whether we allow ARMv7-M instructions
+#define ARMV7M MICROPY_EMIT_INLINE_THUMB_ARMV7M
+
+STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
+ // TODO perhaps make two tables:
+ // one_args =
+ // "b", LAB, asm_thumb_b_n,
+ // "bgt", LAB, asm_thumb_bgt_n,
+ // two_args =
+ // "movs", RLO, I8, asm_thumb_movs_reg_i8
+ // "movw", REG, REG, asm_thumb_movw_reg_i16
+ // three_args =
+ // "subs", RLO, RLO, I3, asm_thumb_subs_reg_reg_i3
+
+ size_t op_len;
+ const char *op_str = (const char*)qstr_data(op, &op_len);
+
+ #if MICROPY_EMIT_INLINE_THUMB_FLOAT
+ if (op_str[0] == 'v') {
+ // floating point operations
+ if (n_args == 2) {
+ mp_uint_t op_code = 0x0ac0, op_code_hi;
+ if (op == MP_QSTR_vcmp) {
+ op_code_hi = 0xeeb4;
+ op_vfp_twoargs:;
+ mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
+ asm_thumb_op32(emit->as,
+ op_code_hi | ((vd & 1) << 6),
+ op_code | ((vd & 0x1e) << 11) | ((vm & 1) << 5) | (vm & 0x1e) >> 1);
+ } else if (op == MP_QSTR_vsqrt) {
+ op_code_hi = 0xeeb1;
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vneg) {
+ op_code_hi = 0xeeb1;
+ op_code = 0x0a40;
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vcvt_f32_s32) {
+ op_code_hi = 0xeeb8; // int to float
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vcvt_s32_f32) {
+ op_code_hi = 0xeebd; // float to int
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vmrs) {
+ mp_uint_t reg_dest;
+ const char *reg_str0 = get_arg_str(pn_args[0]);
+ if (strcmp(reg_str0, "APSR_nzcv") == 0) {
+ reg_dest = 15;
+ } else {
+ reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ }
+ const char *reg_str1 = get_arg_str(pn_args[1]);
+ if (strcmp(reg_str1, "FPSCR") == 0) {
+ // FP status to ARM reg
+ asm_thumb_op32(emit->as, 0xeef1, 0x0a10 | (reg_dest << 12));
+ } else {
+ goto unknown_op;
+ }
+ } else if (op == MP_QSTR_vmov) {
+ op_code_hi = 0xee00;
+ mp_uint_t r_arm, vm;
+ const char *reg_str = get_arg_str(pn_args[0]);
+ if (reg_str[0] == 'r') {
+ r_arm = get_arg_reg(emit, op_str, pn_args[0], 15);
+ vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
+ op_code_hi |= 0x10;
+ } else {
+ vm = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ r_arm = get_arg_reg(emit, op_str, pn_args[1], 15);
+ }
+ asm_thumb_op32(emit->as,
+ op_code_hi | ((vm & 0x1e) >> 1),
+ 0x0a10 | (r_arm << 12) | ((vm & 1) << 7));
+ } else if (op == MP_QSTR_vldr) {
+ op_code_hi = 0xed90;
+ op_vldr_vstr:;
+ mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ mp_parse_node_t pn_base, pn_offset;
+ if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+ mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7);
+ mp_uint_t i8;
+ i8 = get_arg_i(emit, op_str, pn_offset, 0x3fc) >> 2;
+ asm_thumb_op32(emit->as,
+ op_code_hi | rlo_base | ((vd & 1) << 6),
+ 0x0a00 | ((vd & 0x1e) << 11) | i8);
+ }
+ } else if (op == MP_QSTR_vstr) {
+ op_code_hi = 0xed80;
+ goto op_vldr_vstr;
+ } else {
+ goto unknown_op;
+ }
+ } else if (n_args == 3) {
+ // search table for arith ops
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_vfp_op_table); i++) {
+ if (strncmp(op_str + 1, format_vfp_op_table[i].name, 3) == 0 && op_str[4] == '\0') {
+ mp_uint_t op_code_hi = 0xee00 | (format_vfp_op_table[i].op & 0xf0);
+ mp_uint_t op_code = 0x0a00 | ((format_vfp_op_table[i].op & 0x0f) << 4);
+ mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ mp_uint_t vn = get_arg_vfpreg(emit, op_str, pn_args[1]);
+ mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[2]);
+ asm_thumb_op32(emit->as,
+ op_code_hi | ((vd & 1) << 6) | (vn >> 1),
+ op_code | (vm >> 1) | ((vm & 1) << 5) | ((vd & 0x1e) << 11) | ((vn & 1) << 7));
+ return;
+ }
+ }
+ goto unknown_op;
+ } else {
+ goto unknown_op;
+ }
+ } else
+ #endif
+ if (n_args == 0) {
+ if (op == MP_QSTR_nop) {
+ asm_thumb_op16(emit->as, ASM_THUMB_OP_NOP);
+ } else if (op == MP_QSTR_wfi) {
+ asm_thumb_op16(emit->as, ASM_THUMB_OP_WFI);
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 1) {
+ if (op == MP_QSTR_b) {
+ int label_num = get_arg_label(emit, op_str, pn_args[0]);
+ if (!asm_thumb_b_n_label(emit->as, label_num)) {
+ goto branch_not_in_range;
+ }
+ } else if (op == MP_QSTR_bl) {
+ int label_num = get_arg_label(emit, op_str, pn_args[0]);
+ if (!asm_thumb_bl_label(emit->as, label_num)) {
+ goto branch_not_in_range;
+ }
+ } else if (op == MP_QSTR_bx) {
+ mp_uint_t r = get_arg_reg(emit, op_str, pn_args[0], 15);
+ asm_thumb_op16(emit->as, 0x4700 | (r << 3));
+ } else if (op_str[0] == 'b' && (op_len == 3
+ || (op_len == 5 && op_str[3] == '_'
+ && (op_str[4] == 'n' || (ARMV7M && op_str[4] == 'w'))))) {
+ mp_uint_t cc = -1;
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
+ if (op_str[1] == cc_name_table[i].name[0] && op_str[2] == cc_name_table[i].name[1]) {
+ cc = cc_name_table[i].cc;
+ }
+ }
+ if (cc == (mp_uint_t)-1) {
+ goto unknown_op;
+ }
+ int label_num = get_arg_label(emit, op_str, pn_args[0]);
+ if (!asm_thumb_bcc_nw_label(emit->as, cc, label_num, op_len == 5 && op_str[4] == 'w')) {
+ goto branch_not_in_range;
+ }
+ } else if (ARMV7M && op_str[0] == 'i' && op_str[1] == 't') {
+ const char *arg_str = get_arg_str(pn_args[0]);
+ mp_uint_t cc = -1;
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
+ if (arg_str[0] == cc_name_table[i].name[0]
+ && arg_str[1] == cc_name_table[i].name[1]
+ && arg_str[2] == '\0') {
+ cc = cc_name_table[i].cc;
+ break;
+ }
+ }
+ if (cc == (mp_uint_t)-1) {
+ goto unknown_op;
+ }
+ const char *os = op_str + 2;
+ while (*os != '\0') {
+ os++;
+ }
+ if (os > op_str + 5) {
+ goto unknown_op;
+ }
+ mp_uint_t it_mask = 8;
+ while (--os >= op_str + 2) {
+ it_mask >>= 1;
+ if (*os == 't') {
+ it_mask |= (cc & 1) << 3;
+ } else if (*os == 'e') {
+ it_mask |= ((~cc) & 1) << 3;
+ } else {
+ goto unknown_op;
+ }
+ }
+ asm_thumb_it_cc(emit->as, cc, it_mask);
+ } else if (op == MP_QSTR_cpsid) {
+ // TODO check pn_args[0] == i
+ asm_thumb_op16(emit->as, ASM_THUMB_OP_CPSID_I);
+ } else if (op == MP_QSTR_cpsie) {
+ // TODO check pn_args[0] == i
+ asm_thumb_op16(emit->as, ASM_THUMB_OP_CPSIE_I);
+ } else if (op == MP_QSTR_push) {
+ mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]);
+ if ((reglist & 0xff00) == 0) {
+ asm_thumb_op16(emit->as, 0xb400 | reglist);
+ } else {
+ if (!ARMV7M) {
+ goto unknown_op;
+ }
+ asm_thumb_op32(emit->as, 0xe92d, reglist);
+ }
+ } else if (op == MP_QSTR_pop) {
+ mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]);
+ if ((reglist & 0xff00) == 0) {
+ asm_thumb_op16(emit->as, 0xbc00 | reglist);
+ } else {
+ if (!ARMV7M) {
+ goto unknown_op;
+ }
+ asm_thumb_op32(emit->as, 0xe8bd, reglist);
+ }
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 2) {
+ if (MP_PARSE_NODE_IS_ID(pn_args[1])) {
+ // second arg is a register (or should be)
+ mp_uint_t op_code, op_code_hi;
+ if (op == MP_QSTR_mov) {
+ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ mp_uint_t reg_src = get_arg_reg(emit, op_str, pn_args[1], 15);
+ asm_thumb_mov_reg_reg(emit->as, reg_dest, reg_src);
+ } else if (ARMV7M && op == MP_QSTR_clz) {
+ op_code_hi = 0xfab0;
+ op_code = 0xf080;
+ mp_uint_t rd, rm;
+ op_clz_rbit:
+ rd = get_arg_reg(emit, op_str, pn_args[0], 15);
+ rm = get_arg_reg(emit, op_str, pn_args[1], 15);
+ asm_thumb_op32(emit->as, op_code_hi | rm, op_code | (rd << 8) | rm);
+ } else if (ARMV7M && op == MP_QSTR_rbit) {
+ op_code_hi = 0xfa90;
+ op_code = 0xf0a0;
+ goto op_clz_rbit;
+ } else if (ARMV7M && op == MP_QSTR_mrs){
+ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 12);
+ mp_uint_t reg_src = get_arg_special_reg(emit, op_str, pn_args[1]);
+ asm_thumb_op32(emit->as, 0xf3ef, 0x8000 | (reg_dest << 8) | reg_src);
+ } else {
+ if (op == MP_QSTR_and_) {
+ op_code = ASM_THUMB_FORMAT_4_AND;
+ mp_uint_t reg_dest, reg_src;
+ op_format_4:
+ reg_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ reg_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+ asm_thumb_format_4(emit->as, op_code, reg_dest, reg_src);
+ return;
+ }
+ // search table for ALU ops
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_4_op_table); i++) {
+ if (strncmp(op_str, format_4_op_table[i].name, 3) == 0 && op_str[3] == '\0') {
+ op_code = 0x4000 | (format_4_op_table[i].op << 4);
+ goto op_format_4;
+ }
+ }
+ goto unknown_op;
+ }
+ } else {
+ // second arg is not a register
+ mp_uint_t op_code;
+ if (op == MP_QSTR_mov) {
+ op_code = ASM_THUMB_FORMAT_3_MOV;
+ mp_uint_t rlo_dest, i8_src;
+ op_format_3:
+ rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ i8_src = get_arg_i(emit, op_str, pn_args[1], 0xff);
+ asm_thumb_format_3(emit->as, op_code, rlo_dest, i8_src);
+ } else if (op == MP_QSTR_cmp) {
+ op_code = ASM_THUMB_FORMAT_3_CMP;
+ goto op_format_3;
+ } else if (op == MP_QSTR_add) {
+ op_code = ASM_THUMB_FORMAT_3_ADD;
+ goto op_format_3;
+ } else if (op == MP_QSTR_sub) {
+ op_code = ASM_THUMB_FORMAT_3_SUB;
+ goto op_format_3;
+ } else if (ARMV7M && op == MP_QSTR_movw) {
+ op_code = ASM_THUMB_OP_MOVW;
+ mp_uint_t reg_dest;
+ op_movw_movt:
+ reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ int i_src = get_arg_i(emit, op_str, pn_args[1], 0xffff);
+ asm_thumb_mov_reg_i16(emit->as, op_code, reg_dest, i_src);
+ } else if (ARMV7M && op == MP_QSTR_movt) {
+ op_code = ASM_THUMB_OP_MOVT;
+ goto op_movw_movt;
+ } else if (ARMV7M && op == MP_QSTR_movwt) {
+ // this is a convenience instruction
+ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ uint32_t i_src = get_arg_i(emit, op_str, pn_args[1], 0xffffffff);
+ asm_thumb_mov_reg_i16(emit->as, ASM_THUMB_OP_MOVW, reg_dest, i_src & 0xffff);
+ asm_thumb_mov_reg_i16(emit->as, ASM_THUMB_OP_MOVT, reg_dest, (i_src >> 16) & 0xffff);
+ } else if (ARMV7M && op == MP_QSTR_ldrex) {
+ mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ mp_parse_node_t pn_base, pn_offset;
+ if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+ mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15);
+ mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
+ asm_thumb_op32(emit->as, 0xe850 | r_base, 0x0f00 | (r_dest << 12) | i8);
+ }
+ } else {
+ // search table for ldr/str instructions
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_9_10_op_table); i++) {
+ if (op == format_9_10_op_table[i].name) {
+ op_code = format_9_10_op_table[i].op;
+ mp_parse_node_t pn_base, pn_offset;
+ mp_uint_t rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+ mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7);
+ mp_uint_t i5;
+ if (op_code & ASM_THUMB_FORMAT_9_BYTE_TRANSFER) {
+ i5 = get_arg_i(emit, op_str, pn_offset, 0x1f);
+ } else if (op_code & ASM_THUMB_FORMAT_10_STRH) { // also catches LDRH
+ i5 = get_arg_i(emit, op_str, pn_offset, 0x3e) >> 1;
+ } else {
+ i5 = get_arg_i(emit, op_str, pn_offset, 0x7c) >> 2;
+ }
+ asm_thumb_format_9_10(emit->as, op_code, rlo_dest, rlo_base, i5);
+ return;
+ }
+ break;
+ }
+ }
+ goto unknown_op;
+ }
+ }
+
+ } else if (n_args == 3) {
+ mp_uint_t op_code;
+ if (op == MP_QSTR_lsl) {
+ op_code = ASM_THUMB_FORMAT_1_LSL;
+ mp_uint_t rlo_dest, rlo_src, i5;
+ op_format_1:
+ rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+ i5 = get_arg_i(emit, op_str, pn_args[2], 0x1f);
+ asm_thumb_format_1(emit->as, op_code, rlo_dest, rlo_src, i5);
+ } else if (op == MP_QSTR_lsr) {
+ op_code = ASM_THUMB_FORMAT_1_LSR;
+ goto op_format_1;
+ } else if (op == MP_QSTR_asr) {
+ op_code = ASM_THUMB_FORMAT_1_ASR;
+ goto op_format_1;
+ } else if (op == MP_QSTR_add) {
+ op_code = ASM_THUMB_FORMAT_2_ADD;
+ mp_uint_t rlo_dest, rlo_src;
+ op_format_2:
+ rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+ int src_b;
+ if (MP_PARSE_NODE_IS_ID(pn_args[2])) {
+ op_code |= ASM_THUMB_FORMAT_2_REG_OPERAND;
+ src_b = get_arg_reg(emit, op_str, pn_args[2], 7);
+ } else {
+ op_code |= ASM_THUMB_FORMAT_2_IMM_OPERAND;
+ src_b = get_arg_i(emit, op_str, pn_args[2], 0x7);
+ }
+ asm_thumb_format_2(emit->as, op_code, rlo_dest, rlo_src, src_b);
+ } else if (ARMV7M && op == MP_QSTR_sdiv) {
+ op_code = 0xfb90; // sdiv high part
+ mp_uint_t rd, rn, rm;
+ op_sdiv_udiv:
+ rd = get_arg_reg(emit, op_str, pn_args[0], 15);
+ rn = get_arg_reg(emit, op_str, pn_args[1], 15);
+ rm = get_arg_reg(emit, op_str, pn_args[2], 15);
+ asm_thumb_op32(emit->as, op_code | rn, 0xf0f0 | (rd << 8) | rm);
+ } else if (ARMV7M && op == MP_QSTR_udiv) {
+ op_code = 0xfbb0; // udiv high part
+ goto op_sdiv_udiv;
+ } else if (op == MP_QSTR_sub) {
+ op_code = ASM_THUMB_FORMAT_2_SUB;
+ goto op_format_2;
+ } else if (ARMV7M && op == MP_QSTR_strex) {
+ mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ mp_uint_t r_src = get_arg_reg(emit, op_str, pn_args[1], 15);
+ mp_parse_node_t pn_base, pn_offset;
+ if (get_arg_addr(emit, op_str, pn_args[2], &pn_base, &pn_offset)) {
+ mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15);
+ mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
+ asm_thumb_op32(emit->as, 0xe840 | r_base, (r_src << 12) | (r_dest << 8) | i8);
+ }
+ } else {
+ goto unknown_op;
+ }
+
+ } else {
+ goto unknown_op;
+ }
+
+ return;
+
+unknown_op:
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "unsupported Thumb instruction '%s' with %d arguments", op_str, n_args));
+ return;
+
+branch_not_in_range:
+ emit_inline_thumb_error_msg(emit, "branch not in range");
+ return;
+}
+
+const emit_inline_asm_method_table_t emit_inline_thumb_method_table = {
+ emit_inline_thumb_start_pass,
+ emit_inline_thumb_end_pass,
+ emit_inline_thumb_count_params,
+ emit_inline_thumb_label,
+ emit_inline_thumb_align,
+ emit_inline_thumb_data,
+ emit_inline_thumb_op,
+};
+
+#endif // MICROPY_EMIT_INLINE_THUMB
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/emitnative.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,2703 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// Essentially normal Python has 1 type: Python objects
+// Viper has more than 1 type, and is just a more complicated (a superset of) Python.
+// If you declare everything in Viper as a Python object (ie omit type decls) then
+// it should in principle be exactly the same as Python native.
+// Having types means having more opcodes, like binary_op_nat_nat, binary_op_nat_obj etc.
+// In practice we won't have a VM but rather do this in asm which is actually very minimal.
+
+// Because it breaks strict Python equivalence it should be a completely separate
+// decorator. It breaks equivalence because overflow on integers wraps around.
+// It shouldn't break equivalence if you don't use the new types, but since the
+// type decls might be used in normal Python for other reasons, it's probably safest,
+// cleanest and clearest to make it a separate decorator.
+
+// Actually, it does break equivalence because integers default to native integers,
+// not Python objects.
+
+// for x in l[0:8]: can be compiled into a native loop if l has pointer type
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/emit.h"
+#include "py/bc.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+// wrapper around everything in this file
+#if (MICROPY_EMIT_X64 && N_X64) \
+ || (MICROPY_EMIT_X86 && N_X86) \
+ || (MICROPY_EMIT_THUMB && N_THUMB) \
+ || (MICROPY_EMIT_ARM && N_ARM)
+
+#if N_X64
+
+// x64 specific stuff
+
+#include "py/asmx64.h"
+
+#define EXPORT_FUN(name) emit_native_x64_##name
+
+#define ASM_WORD_SIZE (8)
+
+#define REG_RET ASM_X64_REG_RAX
+#define REG_ARG_1 ASM_X64_REG_RDI
+#define REG_ARG_2 ASM_X64_REG_RSI
+#define REG_ARG_3 ASM_X64_REG_RDX
+#define REG_ARG_4 ASM_X64_REG_RCX
+#define REG_ARG_5 ASM_X64_REG_R08
+
+// caller-save
+#define REG_TEMP0 ASM_X64_REG_RAX
+#define REG_TEMP1 ASM_X64_REG_RDI
+#define REG_TEMP2 ASM_X64_REG_RSI
+
+// callee-save
+#define REG_LOCAL_1 ASM_X64_REG_RBX
+#define REG_LOCAL_2 ASM_X64_REG_R12
+#define REG_LOCAL_3 ASM_X64_REG_R13
+#define REG_LOCAL_NUM (3)
+
+#define ASM_PASS_COMPUTE ASM_X64_PASS_COMPUTE
+#define ASM_PASS_EMIT ASM_X64_PASS_EMIT
+
+#define ASM_T asm_x64_t
+#define ASM_NEW asm_x64_new
+#define ASM_FREE asm_x64_free
+#define ASM_GET_CODE asm_x64_get_code
+#define ASM_GET_CODE_POS asm_x64_get_code_pos
+#define ASM_GET_CODE_SIZE asm_x64_get_code_size
+#define ASM_START_PASS asm_x64_start_pass
+#define ASM_END_PASS asm_x64_end_pass
+#define ASM_ENTRY asm_x64_entry
+#define ASM_EXIT asm_x64_exit
+
+#define ASM_ALIGN asm_x64_align
+#define ASM_DATA asm_x64_data
+
+#define ASM_LABEL_ASSIGN asm_x64_label_assign
+#define ASM_JUMP asm_x64_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+ do { \
+ asm_x64_test_r8_with_r8(as, reg, reg); \
+ asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+ do { \
+ asm_x64_test_r8_with_r8(as, reg, reg); \
+ asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_x64_cmp_r64_with_r64(as, reg1, reg2); \
+ asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \
+ } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, ASM_X64_REG_RAX)
+
+#define ASM_MOV_REG_TO_LOCAL asm_x64_mov_r64_to_local
+#define ASM_MOV_IMM_TO_REG asm_x64_mov_i64_to_r64_optimised
+#define ASM_MOV_ALIGNED_IMM_TO_REG asm_x64_mov_i64_to_r64_aligned
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+ do { \
+ asm_x64_mov_i64_to_r64_optimised(as, (imm), (reg_temp)); \
+ asm_x64_mov_r64_to_local(as, (reg_temp), (local_num)); \
+ } while (false)
+#define ASM_MOV_LOCAL_TO_REG asm_x64_mov_local_to_r64
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_MOV_LOCAL_ADDR_TO_REG asm_x64_mov_local_addr_to_r64
+
+#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
+#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest))
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x64_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+
+#elif N_X86
+
+// x86 specific stuff
+
+#include "py/asmx86.h"
+
+STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
+ [MP_F_CONVERT_OBJ_TO_NATIVE] = 2,
+ [MP_F_CONVERT_NATIVE_TO_OBJ] = 2,
+ [MP_F_LOAD_NAME] = 1,
+ [MP_F_LOAD_GLOBAL] = 1,
+ [MP_F_LOAD_BUILD_CLASS] = 0,
+ [MP_F_LOAD_ATTR] = 2,
+ [MP_F_LOAD_METHOD] = 3,
+ [MP_F_STORE_NAME] = 2,
+ [MP_F_STORE_GLOBAL] = 2,
+ [MP_F_STORE_ATTR] = 3,
+ [MP_F_OBJ_SUBSCR] = 3,
+ [MP_F_OBJ_IS_TRUE] = 1,
+ [MP_F_UNARY_OP] = 2,
+ [MP_F_BINARY_OP] = 3,
+ [MP_F_BUILD_TUPLE] = 2,
+ [MP_F_BUILD_LIST] = 2,
+ [MP_F_LIST_APPEND] = 2,
+ [MP_F_BUILD_MAP] = 1,
+ [MP_F_STORE_MAP] = 3,
+#if MICROPY_PY_BUILTINS_SET
+ [MP_F_BUILD_SET] = 2,
+ [MP_F_STORE_SET] = 2,
+#endif
+ [MP_F_MAKE_FUNCTION_FROM_RAW_CODE] = 3,
+ [MP_F_NATIVE_CALL_FUNCTION_N_KW] = 3,
+ [MP_F_CALL_METHOD_N_KW] = 3,
+ [MP_F_CALL_METHOD_N_KW_VAR] = 3,
+ [MP_F_GETITER] = 1,
+ [MP_F_ITERNEXT] = 1,
+ [MP_F_NLR_PUSH] = 1,
+ [MP_F_NLR_POP] = 0,
+ [MP_F_NATIVE_RAISE] = 1,
+ [MP_F_IMPORT_NAME] = 3,
+ [MP_F_IMPORT_FROM] = 2,
+ [MP_F_IMPORT_ALL] = 1,
+#if MICROPY_PY_BUILTINS_SLICE
+ [MP_F_NEW_SLICE] = 3,
+#endif
+ [MP_F_UNPACK_SEQUENCE] = 3,
+ [MP_F_UNPACK_EX] = 3,
+ [MP_F_DELETE_NAME] = 1,
+ [MP_F_DELETE_GLOBAL] = 1,
+ [MP_F_NEW_CELL] = 1,
+ [MP_F_MAKE_CLOSURE_FROM_RAW_CODE] = 3,
+ [MP_F_SETUP_CODE_STATE] = 5,
+};
+
+#define EXPORT_FUN(name) emit_native_x86_##name
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_X86_REG_EAX
+#define REG_ARG_1 ASM_X86_REG_ARG_1
+#define REG_ARG_2 ASM_X86_REG_ARG_2
+#define REG_ARG_3 ASM_X86_REG_ARG_3
+#define REG_ARG_4 ASM_X86_REG_ARG_4
+#define REG_ARG_5 ASM_X86_REG_ARG_5
+
+// caller-save, so can be used as temporaries
+#define REG_TEMP0 ASM_X86_REG_EAX
+#define REG_TEMP1 ASM_X86_REG_ECX
+#define REG_TEMP2 ASM_X86_REG_EDX
+
+// callee-save, so can be used as locals
+#define REG_LOCAL_1 ASM_X86_REG_EBX
+#define REG_LOCAL_2 ASM_X86_REG_ESI
+#define REG_LOCAL_3 ASM_X86_REG_EDI
+#define REG_LOCAL_NUM (3)
+
+#define ASM_PASS_COMPUTE ASM_X86_PASS_COMPUTE
+#define ASM_PASS_EMIT ASM_X86_PASS_EMIT
+
+#define ASM_T asm_x86_t
+#define ASM_NEW asm_x86_new
+#define ASM_FREE asm_x86_free
+#define ASM_GET_CODE asm_x86_get_code
+#define ASM_GET_CODE_POS asm_x86_get_code_pos
+#define ASM_GET_CODE_SIZE asm_x86_get_code_size
+#define ASM_START_PASS asm_x86_start_pass
+#define ASM_END_PASS asm_x86_end_pass
+#define ASM_ENTRY asm_x86_entry
+#define ASM_EXIT asm_x86_exit
+
+#define ASM_ALIGN asm_x86_align
+#define ASM_DATA asm_x86_data
+
+#define ASM_LABEL_ASSIGN asm_x86_label_assign
+#define ASM_JUMP asm_x86_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+ do { \
+ asm_x86_test_r8_with_r8(as, reg, reg); \
+ asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+ do { \
+ asm_x86_test_r8_with_r8(as, reg, reg); \
+ asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_x86_cmp_r32_with_r32(as, reg1, reg2); \
+ asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \
+ } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], ASM_X86_REG_EAX)
+
+#define ASM_MOV_REG_TO_LOCAL asm_x86_mov_r32_to_local
+#define ASM_MOV_IMM_TO_REG asm_x86_mov_i32_to_r32
+#define ASM_MOV_ALIGNED_IMM_TO_REG asm_x86_mov_i32_to_r32_aligned
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+ do { \
+ asm_x86_mov_i32_to_r32(as, (imm), (reg_temp)); \
+ asm_x86_mov_r32_to_local(as, (reg_temp), (local_num)); \
+ } while (false)
+#define ASM_MOV_LOCAL_TO_REG asm_x86_mov_local_to_r32
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_MOV_LOCAL_ADDR_TO_REG asm_x86_mov_local_addr_to_r32
+
+#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg))
+#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+
+#elif N_THUMB
+
+// thumb specific stuff
+
+#include "py/asmthumb.h"
+
+#define EXPORT_FUN(name) emit_native_thumb_##name
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_THUMB_REG_R0
+#define REG_ARG_1 ASM_THUMB_REG_R0
+#define REG_ARG_2 ASM_THUMB_REG_R1
+#define REG_ARG_3 ASM_THUMB_REG_R2
+#define REG_ARG_4 ASM_THUMB_REG_R3
+// rest of args go on stack
+
+#define REG_TEMP0 ASM_THUMB_REG_R0
+#define REG_TEMP1 ASM_THUMB_REG_R1
+#define REG_TEMP2 ASM_THUMB_REG_R2
+
+#define REG_LOCAL_1 ASM_THUMB_REG_R4
+#define REG_LOCAL_2 ASM_THUMB_REG_R5
+#define REG_LOCAL_3 ASM_THUMB_REG_R6
+#define REG_LOCAL_NUM (3)
+
+#define ASM_PASS_COMPUTE ASM_THUMB_PASS_COMPUTE
+#define ASM_PASS_EMIT ASM_THUMB_PASS_EMIT
+
+#define ASM_T asm_thumb_t
+#define ASM_NEW asm_thumb_new
+#define ASM_FREE asm_thumb_free
+#define ASM_GET_CODE asm_thumb_get_code
+#define ASM_GET_CODE_POS asm_thumb_get_code_pos
+#define ASM_GET_CODE_SIZE asm_thumb_get_code_size
+#define ASM_START_PASS asm_thumb_start_pass
+#define ASM_END_PASS asm_thumb_end_pass
+#define ASM_ENTRY asm_thumb_entry
+#define ASM_EXIT asm_thumb_exit
+
+#define ASM_ALIGN asm_thumb_align
+#define ASM_DATA asm_thumb_data
+
+#define ASM_LABEL_ASSIGN asm_thumb_label_assign
+#define ASM_JUMP asm_thumb_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+ do { \
+ asm_thumb_cmp_rlo_i8(as, reg, 0); \
+ asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+ do { \
+ asm_thumb_cmp_rlo_i8(as, reg, 0); \
+ asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \
+ asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
+ } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, ASM_THUMB_REG_R3)
+
+#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_thumb_mov_local_reg(as, (local_num), (reg))
+#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_optimised(as, (reg), (imm))
+#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_aligned(as, (reg), (imm))
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+ do { \
+ asm_thumb_mov_reg_i32_optimised(as, (reg_temp), (imm)); \
+ asm_thumb_mov_local_reg(as, (local_num), (reg_temp)); \
+ } while (false)
+#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local(as, (reg), (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src))
+#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local_addr(as, (reg), (local_num))
+
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift))
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+
+#elif N_ARM
+
+// ARM specific stuff
+
+#include "py/asmarm.h"
+
+#define ASM_WORD_SIZE (4)
+
+#define EXPORT_FUN(name) emit_native_arm_##name
+
+#define REG_RET ASM_ARM_REG_R0
+#define REG_ARG_1 ASM_ARM_REG_R0
+#define REG_ARG_2 ASM_ARM_REG_R1
+#define REG_ARG_3 ASM_ARM_REG_R2
+#define REG_ARG_4 ASM_ARM_REG_R3
+
+#define REG_TEMP0 ASM_ARM_REG_R0
+#define REG_TEMP1 ASM_ARM_REG_R1
+#define REG_TEMP2 ASM_ARM_REG_R2
+
+#define REG_LOCAL_1 ASM_ARM_REG_R4
+#define REG_LOCAL_2 ASM_ARM_REG_R5
+#define REG_LOCAL_3 ASM_ARM_REG_R6
+#define REG_LOCAL_NUM (3)
+
+#define ASM_PASS_COMPUTE ASM_ARM_PASS_COMPUTE
+#define ASM_PASS_EMIT ASM_ARM_PASS_EMIT
+
+#define ASM_T asm_arm_t
+#define ASM_NEW asm_arm_new
+#define ASM_FREE asm_arm_free
+#define ASM_GET_CODE asm_arm_get_code
+#define ASM_GET_CODE_POS asm_arm_get_code_pos
+#define ASM_GET_CODE_SIZE asm_arm_get_code_size
+#define ASM_START_PASS asm_arm_start_pass
+#define ASM_END_PASS asm_arm_end_pass
+#define ASM_ENTRY asm_arm_entry
+#define ASM_EXIT asm_arm_exit
+
+#define ASM_ALIGN asm_arm_align
+#define ASM_DATA asm_arm_data
+
+#define ASM_LABEL_ASSIGN asm_arm_label_assign
+#define ASM_JUMP asm_arm_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+ do { \
+ asm_arm_cmp_reg_i8(as, reg, 0); \
+ asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+ do { \
+ asm_arm_cmp_reg_i8(as, reg, 0); \
+ asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_arm_cmp_reg_reg(as, reg1, reg2); \
+ asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
+ } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, ASM_ARM_REG_R3)
+
+#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_arm_mov_local_reg(as, (local_num), (reg))
+#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm))
+#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm))
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+ do { \
+ asm_arm_mov_reg_i32(as, (reg_temp), (imm)); \
+ asm_arm_mov_local_reg(as, (local_num), (reg_temp)); \
+ } while (false)
+#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_arm_mov_reg_local(as, (reg), (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src))
+#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_arm_mov_reg_local_addr(as, (reg), (local_num))
+
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base))
+#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base))
+#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+
+#else
+
+#error unknown native emitter
+
+#endif
+
+#define EMIT_NATIVE_VIPER_TYPE_ERROR(emit, ...) do { \
+ *emit->error_slot = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, __VA_ARGS__); \
+ } while (0)
+
+typedef enum {
+ STACK_VALUE,
+ STACK_REG,
+ STACK_IMM,
+} stack_info_kind_t;
+
+// these enums must be distinct and the bottom 4 bits
+// must correspond to the correct MP_NATIVE_TYPE_xxx value
+typedef enum {
+ VTYPE_PYOBJ = 0x00 | MP_NATIVE_TYPE_OBJ,
+ VTYPE_BOOL = 0x00 | MP_NATIVE_TYPE_BOOL,
+ VTYPE_INT = 0x00 | MP_NATIVE_TYPE_INT,
+ VTYPE_UINT = 0x00 | MP_NATIVE_TYPE_UINT,
+ VTYPE_PTR = 0x00 | MP_NATIVE_TYPE_PTR,
+ VTYPE_PTR8 = 0x00 | MP_NATIVE_TYPE_PTR8,
+ VTYPE_PTR16 = 0x00 | MP_NATIVE_TYPE_PTR16,
+ VTYPE_PTR32 = 0x00 | MP_NATIVE_TYPE_PTR32,
+
+ VTYPE_PTR_NONE = 0x50 | MP_NATIVE_TYPE_PTR,
+
+ VTYPE_UNBOUND = 0x60 | MP_NATIVE_TYPE_OBJ,
+ VTYPE_BUILTIN_CAST = 0x70 | MP_NATIVE_TYPE_OBJ,
+} vtype_kind_t;
+
+STATIC qstr vtype_to_qstr(vtype_kind_t vtype) {
+ switch (vtype) {
+ case VTYPE_PYOBJ: return MP_QSTR_object;
+ case VTYPE_BOOL: return MP_QSTR_bool;
+ case VTYPE_INT: return MP_QSTR_int;
+ case VTYPE_UINT: return MP_QSTR_uint;
+ case VTYPE_PTR: return MP_QSTR_ptr;
+ case VTYPE_PTR8: return MP_QSTR_ptr8;
+ case VTYPE_PTR16: return MP_QSTR_ptr16;
+ case VTYPE_PTR32: return MP_QSTR_ptr32;
+ case VTYPE_PTR_NONE: default: return MP_QSTR_None;
+ }
+}
+
+typedef struct _stack_info_t {
+ vtype_kind_t vtype;
+ stack_info_kind_t kind;
+ union {
+ int u_reg;
+ mp_int_t u_imm;
+ } data;
+} stack_info_t;
+
+struct _emit_t {
+ mp_obj_t *error_slot;
+ int pass;
+
+ bool do_viper_types;
+
+ vtype_kind_t return_vtype;
+
+ mp_uint_t local_vtype_alloc;
+ vtype_kind_t *local_vtype;
+
+ mp_uint_t stack_info_alloc;
+ stack_info_t *stack_info;
+ vtype_kind_t saved_stack_vtype;
+
+ int prelude_offset;
+ int const_table_offset;
+ int n_state;
+ int stack_start;
+ int stack_size;
+
+ bool last_emit_was_return_value;
+
+ scope_t *scope;
+
+ ASM_T *as;
+};
+
+emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, mp_uint_t max_num_labels) {
+ emit_t *emit = m_new0(emit_t, 1);
+ emit->error_slot = error_slot;
+ emit->as = ASM_NEW(max_num_labels);
+ return emit;
+}
+
+void EXPORT_FUN(free)(emit_t *emit) {
+ ASM_FREE(emit->as, false);
+ m_del(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc);
+ m_del(stack_info_t, emit->stack_info, emit->stack_info_alloc);
+ m_del_obj(emit_t, emit);
+}
+
+STATIC void emit_native_set_native_type(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2) {
+ switch (op) {
+ case MP_EMIT_NATIVE_TYPE_ENABLE:
+ emit->do_viper_types = arg1;
+ break;
+
+ default: {
+ vtype_kind_t type;
+ switch (arg2) {
+ case MP_QSTR_object: type = VTYPE_PYOBJ; break;
+ case MP_QSTR_bool: type = VTYPE_BOOL; break;
+ case MP_QSTR_int: type = VTYPE_INT; break;
+ case MP_QSTR_uint: type = VTYPE_UINT; break;
+ case MP_QSTR_ptr: type = VTYPE_PTR; break;
+ case MP_QSTR_ptr8: type = VTYPE_PTR8; break;
+ case MP_QSTR_ptr16: type = VTYPE_PTR16; break;
+ case MP_QSTR_ptr32: type = VTYPE_PTR32; break;
+ default: EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "unknown type '%q'", arg2); return;
+ }
+ if (op == MP_EMIT_NATIVE_TYPE_RETURN) {
+ emit->return_vtype = type;
+ } else {
+ assert(arg1 < emit->local_vtype_alloc);
+ emit->local_vtype[arg1] = type;
+ }
+ break;
+ }
+ }
+}
+
+STATIC void emit_pre_pop_reg(emit_t *emit, vtype_kind_t *vtype, int reg_dest);
+STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg);
+STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num);
+STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num);
+
+#define STATE_START (sizeof(mp_code_state) / sizeof(mp_uint_t))
+
+STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
+ DEBUG_printf("start_pass(pass=%u, scope=%p)\n", pass, scope);
+
+ emit->pass = pass;
+ emit->stack_start = 0;
+ emit->stack_size = 0;
+ emit->last_emit_was_return_value = false;
+ emit->scope = scope;
+
+ // allocate memory for keeping track of the types of locals
+ if (emit->local_vtype_alloc < scope->num_locals) {
+ emit->local_vtype = m_renew(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc, scope->num_locals);
+ emit->local_vtype_alloc = scope->num_locals;
+ }
+
+ // allocate memory for keeping track of the objects on the stack
+ // XXX don't know stack size on entry, and it should be maximum over all scopes
+ // XXX this is such a big hack and really needs to be fixed
+ if (emit->stack_info == NULL) {
+ emit->stack_info_alloc = scope->stack_size + 200;
+ emit->stack_info = m_new(stack_info_t, emit->stack_info_alloc);
+ }
+
+ // set default type for return
+ emit->return_vtype = VTYPE_PYOBJ;
+
+ // set default type for arguments
+ mp_uint_t num_args = emit->scope->num_pos_args + emit->scope->num_kwonly_args;
+ if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) {
+ num_args += 1;
+ }
+ if (scope->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) {
+ num_args += 1;
+ }
+ for (mp_uint_t i = 0; i < num_args; i++) {
+ emit->local_vtype[i] = VTYPE_PYOBJ;
+ }
+
+ // local variables begin unbound, and have unknown type
+ for (mp_uint_t i = num_args; i < emit->local_vtype_alloc; i++) {
+ emit->local_vtype[i] = VTYPE_UNBOUND;
+ }
+
+ // values on stack begin unbound
+ for (mp_uint_t i = 0; i < emit->stack_info_alloc; i++) {
+ emit->stack_info[i].kind = STACK_VALUE;
+ emit->stack_info[i].vtype = VTYPE_UNBOUND;
+ }
+
+ ASM_START_PASS(emit->as, pass == MP_PASS_EMIT ? ASM_PASS_EMIT : ASM_PASS_COMPUTE);
+
+ // generate code for entry to function
+
+ if (emit->do_viper_types) {
+
+ // right now we have a restriction of maximum of 4 arguments
+ if (scope->num_pos_args >= 5) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "Viper functions don't currently support more than 4 arguments");
+ return;
+ }
+
+ // entry to function
+ int num_locals = 0;
+ if (pass > MP_PASS_SCOPE) {
+ num_locals = scope->num_locals - REG_LOCAL_NUM;
+ if (num_locals < 0) {
+ num_locals = 0;
+ }
+ emit->stack_start = num_locals;
+ num_locals += scope->stack_size;
+ }
+ ASM_ENTRY(emit->as, num_locals);
+
+ // TODO don't load r7 if we don't need it
+ #if N_THUMB
+ asm_thumb_mov_reg_i32(emit->as, ASM_THUMB_REG_R7, (mp_uint_t)mp_fun_table);
+ #elif N_ARM
+ asm_arm_mov_reg_i32(emit->as, ASM_ARM_REG_R7, (mp_uint_t)mp_fun_table);
+ #endif
+
+ #if N_X86
+ for (int i = 0; i < scope->num_pos_args; i++) {
+ if (i == 0) {
+ asm_x86_mov_arg_to_r32(emit->as, i, REG_LOCAL_1);
+ } else if (i == 1) {
+ asm_x86_mov_arg_to_r32(emit->as, i, REG_LOCAL_2);
+ } else if (i == 2) {
+ asm_x86_mov_arg_to_r32(emit->as, i, REG_LOCAL_3);
+ } else {
+ asm_x86_mov_arg_to_r32(emit->as, i, REG_TEMP0);
+ asm_x86_mov_r32_to_local(emit->as, REG_TEMP0, i - REG_LOCAL_NUM);
+ }
+ }
+ #else
+ for (int i = 0; i < scope->num_pos_args; i++) {
+ if (i == 0) {
+ ASM_MOV_REG_REG(emit->as, REG_LOCAL_1, REG_ARG_1);
+ } else if (i == 1) {
+ ASM_MOV_REG_REG(emit->as, REG_LOCAL_2, REG_ARG_2);
+ } else if (i == 2) {
+ ASM_MOV_REG_REG(emit->as, REG_LOCAL_3, REG_ARG_3);
+ } else if (i == 3) {
+ ASM_MOV_REG_TO_LOCAL(emit->as, REG_ARG_4, i - REG_LOCAL_NUM);
+ } else {
+ // TODO not implemented
+ assert(0);
+ }
+ }
+ #endif
+
+ } else {
+ // work out size of state (locals plus stack)
+ emit->n_state = scope->num_locals + scope->stack_size;
+
+ // allocate space on C-stack for code_state structure, which includes state
+ ASM_ENTRY(emit->as, STATE_START + emit->n_state);
+
+ // TODO don't load r7 if we don't need it
+ #if N_THUMB
+ asm_thumb_mov_reg_i32(emit->as, ASM_THUMB_REG_R7, (mp_uint_t)mp_fun_table);
+ #elif N_ARM
+ asm_arm_mov_reg_i32(emit->as, ASM_ARM_REG_R7, (mp_uint_t)mp_fun_table);
+ #endif
+
+ // prepare incoming arguments for call to mp_setup_code_state
+ #if N_X86
+ asm_x86_mov_arg_to_r32(emit->as, 0, REG_ARG_2);
+ asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_3);
+ asm_x86_mov_arg_to_r32(emit->as, 2, REG_ARG_4);
+ asm_x86_mov_arg_to_r32(emit->as, 3, REG_ARG_5);
+ #else
+ #if N_THUMB
+ ASM_MOV_REG_REG(emit->as, ASM_THUMB_REG_R4, REG_ARG_4);
+ #elif N_ARM
+ ASM_MOV_REG_REG(emit->as, ASM_ARM_REG_R4, REG_ARG_4);
+ #else
+ ASM_MOV_REG_REG(emit->as, REG_ARG_5, REG_ARG_4);
+ #endif
+ ASM_MOV_REG_REG(emit->as, REG_ARG_4, REG_ARG_3);
+ ASM_MOV_REG_REG(emit->as, REG_ARG_3, REG_ARG_2);
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_ARG_1);
+ #endif
+
+ // set code_state.ip (offset from start of this function to prelude info)
+ // XXX this encoding may change size
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, emit->prelude_offset, offsetof(mp_code_state, ip) / sizeof(mp_uint_t), REG_ARG_1);
+
+ // set code_state.n_state
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, emit->n_state, offsetof(mp_code_state, n_state) / sizeof(mp_uint_t), REG_ARG_1);
+
+ // put address of code_state into first arg
+ ASM_MOV_LOCAL_ADDR_TO_REG(emit->as, 0, REG_ARG_1);
+
+ // call mp_setup_code_state to prepare code_state structure
+ #if N_THUMB
+ asm_thumb_op16(emit->as, 0xb400 | (1 << ASM_THUMB_REG_R4)); // push 5th arg
+ asm_thumb_bl_ind(emit->as, mp_fun_table[MP_F_SETUP_CODE_STATE], MP_F_SETUP_CODE_STATE, ASM_THUMB_REG_R4);
+ asm_thumb_op16(emit->as, 0xbc00 | (1 << REG_RET)); // pop dummy (was 5th arg)
+ #elif N_ARM
+ asm_arm_push(emit->as, 1 << ASM_ARM_REG_R4); // push 5th arg
+ asm_arm_bl_ind(emit->as, mp_fun_table[MP_F_SETUP_CODE_STATE], MP_F_SETUP_CODE_STATE, ASM_ARM_REG_R4);
+ asm_arm_pop(emit->as, 1 << REG_RET); // pop dummy (was 5th arg)
+ #else
+ ASM_CALL_IND(emit->as, mp_fun_table[MP_F_SETUP_CODE_STATE], MP_F_SETUP_CODE_STATE);
+ #endif
+
+ // cache some locals in registers
+ if (scope->num_locals > 0) {
+ ASM_MOV_LOCAL_TO_REG(emit->as, STATE_START + emit->n_state - 1 - 0, REG_LOCAL_1);
+ if (scope->num_locals > 1) {
+ ASM_MOV_LOCAL_TO_REG(emit->as, STATE_START + emit->n_state - 1 - 1, REG_LOCAL_2);
+ if (scope->num_locals > 2) {
+ ASM_MOV_LOCAL_TO_REG(emit->as, STATE_START + emit->n_state - 1 - 2, REG_LOCAL_3);
+ }
+ }
+ }
+
+ // set the type of closed over variables
+ for (mp_uint_t i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL) {
+ emit->local_vtype[id->local_num] = VTYPE_PYOBJ;
+ }
+ }
+ }
+
+}
+
+STATIC void emit_native_end_pass(emit_t *emit) {
+ if (!emit->last_emit_was_return_value) {
+ ASM_EXIT(emit->as);
+ }
+
+ if (!emit->do_viper_types) {
+ emit->prelude_offset = ASM_GET_CODE_POS(emit->as);
+ ASM_DATA(emit->as, 1, emit->scope->scope_flags);
+ ASM_DATA(emit->as, 1, emit->scope->num_pos_args);
+ ASM_DATA(emit->as, 1, emit->scope->num_kwonly_args);
+ ASM_DATA(emit->as, 1, emit->scope->num_def_pos_args);
+
+ // write code info
+ #if MICROPY_PERSISTENT_CODE
+ ASM_DATA(emit->as, 1, 5);
+ ASM_DATA(emit->as, 1, emit->scope->simple_name);
+ ASM_DATA(emit->as, 1, emit->scope->simple_name >> 8);
+ ASM_DATA(emit->as, 1, emit->scope->source_file);
+ ASM_DATA(emit->as, 1, emit->scope->source_file >> 8);
+ #else
+ ASM_DATA(emit->as, 1, 1);
+ #endif
+
+ // bytecode prelude: initialise closed over variables
+ for (int i = 0; i < emit->scope->id_info_len; i++) {
+ id_info_t *id = &emit->scope->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL) {
+ assert(id->local_num < 255);
+ ASM_DATA(emit->as, 1, id->local_num); // write the local which should be converted to a cell
+ }
+ }
+ ASM_DATA(emit->as, 1, 255); // end of list sentinel
+
+ ASM_ALIGN(emit->as, ASM_WORD_SIZE);
+ emit->const_table_offset = ASM_GET_CODE_POS(emit->as);
+
+ // write argument names as qstr objects
+ // see comment in corresponding part of emitbc.c about the logic here
+ for (int i = 0; i < emit->scope->num_pos_args + emit->scope->num_kwonly_args; i++) {
+ qstr qst = MP_QSTR__star_;
+ for (int j = 0; j < emit->scope->id_info_len; ++j) {
+ id_info_t *id = &emit->scope->id_info[j];
+ if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) {
+ qst = id->qst;
+ break;
+ }
+ }
+ ASM_DATA(emit->as, ASM_WORD_SIZE, (mp_uint_t)MP_OBJ_NEW_QSTR(qst));
+ }
+
+ }
+
+ ASM_END_PASS(emit->as);
+
+ // check stack is back to zero size
+ if (emit->stack_size != 0) {
+ mp_printf(&mp_plat_print, "ERROR: stack size not back to zero; got %d\n", emit->stack_size);
+ }
+
+ if (emit->pass == MP_PASS_EMIT) {
+ void *f = ASM_GET_CODE(emit->as);
+ mp_uint_t f_len = ASM_GET_CODE_SIZE(emit->as);
+
+ // compute type signature
+ // note that the lower 4 bits of a vtype are tho correct MP_NATIVE_TYPE_xxx
+ mp_uint_t type_sig = emit->return_vtype & 0xf;
+ for (mp_uint_t i = 0; i < emit->scope->num_pos_args; i++) {
+ type_sig |= (emit->local_vtype[i] & 0xf) << (i * 4 + 4);
+ }
+
+ mp_emit_glue_assign_native(emit->scope->raw_code,
+ emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY,
+ f, f_len, (mp_uint_t*)((byte*)f + emit->const_table_offset),
+ emit->scope->num_pos_args, emit->scope->scope_flags, type_sig);
+ }
+}
+
+STATIC bool emit_native_last_emit_was_return_value(emit_t *emit) {
+ return emit->last_emit_was_return_value;
+}
+
+STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) {
+ assert((mp_int_t)emit->stack_size + stack_size_delta >= 0);
+ emit->stack_size += stack_size_delta;
+ if (emit->pass > MP_PASS_SCOPE && emit->stack_size > emit->scope->stack_size) {
+ emit->scope->stack_size = emit->stack_size;
+ }
+#ifdef DEBUG_PRINT
+ DEBUG_printf(" adjust_stack; stack_size=%d+%d; stack now:", emit->stack_size - stack_size_delta, stack_size_delta);
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->data.u_reg);
+ }
+ DEBUG_printf("\n");
+#endif
+}
+
+STATIC void emit_native_adjust_stack_size(emit_t *emit, mp_int_t delta) {
+ DEBUG_printf("adjust_stack_size(" INT_FMT ")\n", delta);
+ // If we are adjusting the stack in a positive direction (pushing) then we
+ // need to fill in values for the stack kind and vtype of the newly-pushed
+ // entries. These should be set to "value" (ie not reg or imm) because we
+ // should only need to adjust the stack due to a jump to this part in the
+ // code (and hence we have settled the stack before the jump).
+ for (mp_int_t i = 0; i < delta; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size + i];
+ si->kind = STACK_VALUE;
+ // TODO we don't know the vtype to use here. At the moment this is a
+ // hack to get the case of multi comparison working.
+ if (delta == 1) {
+ si->vtype = emit->saved_stack_vtype;
+ } else {
+ si->vtype = VTYPE_PYOBJ;
+ }
+ }
+ adjust_stack(emit, delta);
+}
+
+STATIC void emit_native_set_source_line(emit_t *emit, mp_uint_t source_line) {
+ (void)emit;
+ (void)source_line;
+}
+
+/*
+STATIC void emit_pre_raw(emit_t *emit, int stack_size_delta) {
+ adjust_stack(emit, stack_size_delta);
+ emit->last_emit_was_return_value = false;
+}
+*/
+
+// this must be called at start of emit functions
+STATIC void emit_native_pre(emit_t *emit) {
+ emit->last_emit_was_return_value = false;
+ // settle the stack
+ /*
+ if (regs_needed != 0) {
+ for (int i = 0; i < emit->stack_size; i++) {
+ switch (emit->stack_info[i].kind) {
+ case STACK_VALUE:
+ break;
+
+ case STACK_REG:
+ // TODO only push reg if in regs_needed
+ emit->stack_info[i].kind = STACK_VALUE;
+ ASM_MOV_REG_TO_LOCAL(emit->as, emit->stack_info[i].data.u_reg, emit->stack_start + i);
+ break;
+
+ case STACK_IMM:
+ // don't think we ever need to push imms for settling
+ //ASM_MOV_IMM_TO_LOCAL(emit->last_imm, emit->stack_start + i);
+ break;
+ }
+ }
+ }
+ */
+}
+
+// depth==0 is top, depth==1 is before top, etc
+STATIC stack_info_t *peek_stack(emit_t *emit, mp_uint_t depth) {
+ return &emit->stack_info[emit->stack_size - 1 - depth];
+}
+
+// depth==0 is top, depth==1 is before top, etc
+STATIC vtype_kind_t peek_vtype(emit_t *emit, mp_uint_t depth) {
+ return peek_stack(emit, depth)->vtype;
+}
+
+// pos=1 is TOS, pos=2 is next, etc
+// use pos=0 for no skipping
+STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
+ skip_stack_pos = emit->stack_size - skip_stack_pos;
+ for (int i = 0; i < emit->stack_size; i++) {
+ if (i != skip_stack_pos) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_REG && si->data.u_reg == reg_needed) {
+ si->kind = STACK_VALUE;
+ ASM_MOV_REG_TO_LOCAL(emit->as, si->data.u_reg, emit->stack_start + i);
+ }
+ }
+ }
+}
+
+STATIC void need_reg_all(emit_t *emit) {
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_REG) {
+ si->kind = STACK_VALUE;
+ ASM_MOV_REG_TO_LOCAL(emit->as, si->data.u_reg, emit->stack_start + i);
+ }
+ }
+}
+
+STATIC void need_stack_settled(emit_t *emit) {
+ DEBUG_printf(" need_stack_settled; stack_size=%d\n", emit->stack_size);
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_REG) {
+ DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
+ si->kind = STACK_VALUE;
+ ASM_MOV_REG_TO_LOCAL(emit->as, si->data.u_reg, emit->stack_start + i);
+ }
+ }
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_IMM) {
+ DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i);
+ si->kind = STACK_VALUE;
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->data.u_imm, emit->stack_start + i, REG_TEMP0);
+ }
+ }
+}
+
+// pos=1 is TOS, pos=2 is next, etc
+STATIC void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int reg_dest) {
+ need_reg_single(emit, reg_dest, pos);
+ stack_info_t *si = &emit->stack_info[emit->stack_size - pos];
+ *vtype = si->vtype;
+ switch (si->kind) {
+ case STACK_VALUE:
+ ASM_MOV_LOCAL_TO_REG(emit->as, emit->stack_start + emit->stack_size - pos, reg_dest);
+ break;
+
+ case STACK_REG:
+ if (si->data.u_reg != reg_dest) {
+ ASM_MOV_REG_REG(emit->as, reg_dest, si->data.u_reg);
+ }
+ break;
+
+ case STACK_IMM:
+ ASM_MOV_IMM_TO_REG(emit->as, si->data.u_imm, reg_dest);
+ break;
+ }
+}
+
+// does an efficient X=pop(); discard(); push(X)
+// needs a (non-temp) register in case the poped element was stored in the stack
+STATIC void emit_fold_stack_top(emit_t *emit, int reg_dest) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 2];
+ si[0] = si[1];
+ if (si->kind == STACK_VALUE) {
+ // if folded element was on the stack we need to put it in a register
+ ASM_MOV_LOCAL_TO_REG(emit->as, emit->stack_start + emit->stack_size - 1, reg_dest);
+ si->kind = STACK_REG;
+ si->data.u_reg = reg_dest;
+ }
+ adjust_stack(emit, -1);
+}
+
+// If stacked value is in a register and the register is not r1 or r2, then
+// *reg_dest is set to that register. Otherwise the value is put in *reg_dest.
+STATIC void emit_pre_pop_reg_flexible(emit_t *emit, vtype_kind_t *vtype, int *reg_dest, int not_r1, int not_r2) {
+ emit->last_emit_was_return_value = false;
+ stack_info_t *si = peek_stack(emit, 0);
+ if (si->kind == STACK_REG && si->data.u_reg != not_r1 && si->data.u_reg != not_r2) {
+ *vtype = si->vtype;
+ *reg_dest = si->data.u_reg;
+ need_reg_single(emit, *reg_dest, 1);
+ } else {
+ emit_access_stack(emit, 1, vtype, *reg_dest);
+ }
+ adjust_stack(emit, -1);
+}
+
+STATIC void emit_pre_pop_discard(emit_t *emit) {
+ emit->last_emit_was_return_value = false;
+ adjust_stack(emit, -1);
+}
+
+STATIC void emit_pre_pop_reg(emit_t *emit, vtype_kind_t *vtype, int reg_dest) {
+ emit->last_emit_was_return_value = false;
+ emit_access_stack(emit, 1, vtype, reg_dest);
+ adjust_stack(emit, -1);
+}
+
+STATIC void emit_pre_pop_reg_reg(emit_t *emit, vtype_kind_t *vtypea, int rega, vtype_kind_t *vtypeb, int regb) {
+ emit_pre_pop_reg(emit, vtypea, rega);
+ emit_pre_pop_reg(emit, vtypeb, regb);
+}
+
+STATIC void emit_pre_pop_reg_reg_reg(emit_t *emit, vtype_kind_t *vtypea, int rega, vtype_kind_t *vtypeb, int regb, vtype_kind_t *vtypec, int regc) {
+ emit_pre_pop_reg(emit, vtypea, rega);
+ emit_pre_pop_reg(emit, vtypeb, regb);
+ emit_pre_pop_reg(emit, vtypec, regc);
+}
+
+STATIC void emit_post(emit_t *emit) {
+ (void)emit;
+}
+
+STATIC void emit_post_top_set_vtype(emit_t *emit, vtype_kind_t new_vtype) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1];
+ si->vtype = new_vtype;
+}
+
+STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size];
+ si->vtype = vtype;
+ si->kind = STACK_REG;
+ si->data.u_reg = reg;
+ adjust_stack(emit, 1);
+}
+
+STATIC void emit_post_push_imm(emit_t *emit, vtype_kind_t vtype, mp_int_t imm) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size];
+ si->vtype = vtype;
+ si->kind = STACK_IMM;
+ si->data.u_imm = imm;
+ adjust_stack(emit, 1);
+}
+
+STATIC void emit_post_push_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb) {
+ emit_post_push_reg(emit, vtypea, rega);
+ emit_post_push_reg(emit, vtypeb, regb);
+}
+
+STATIC void emit_post_push_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb, vtype_kind_t vtypec, int regc) {
+ emit_post_push_reg(emit, vtypea, rega);
+ emit_post_push_reg(emit, vtypeb, regb);
+ emit_post_push_reg(emit, vtypec, regc);
+}
+
+STATIC void emit_post_push_reg_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb, vtype_kind_t vtypec, int regc, vtype_kind_t vtyped, int regd) {
+ emit_post_push_reg(emit, vtypea, rega);
+ emit_post_push_reg(emit, vtypeb, regb);
+ emit_post_push_reg(emit, vtypec, regc);
+ emit_post_push_reg(emit, vtyped, regd);
+}
+
+STATIC void emit_call(emit_t *emit, mp_fun_kind_t fun_kind) {
+ need_reg_all(emit);
+ ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
+}
+
+STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
+ need_reg_all(emit);
+ ASM_MOV_IMM_TO_REG(emit->as, arg_val, arg_reg);
+ ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
+}
+
+// the first arg is stored in the code aligned on a mp_uint_t boundary
+STATIC void emit_call_with_imm_arg_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
+ need_reg_all(emit);
+ ASM_MOV_ALIGNED_IMM_TO_REG(emit->as, arg_val, arg_reg);
+ ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
+}
+
+STATIC void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) {
+ need_reg_all(emit);
+ ASM_MOV_IMM_TO_REG(emit->as, arg_val1, arg_reg1);
+ ASM_MOV_IMM_TO_REG(emit->as, arg_val2, arg_reg2);
+ ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
+}
+
+// the first arg is stored in the code aligned on a mp_uint_t boundary
+STATIC void emit_call_with_3_imm_args_and_first_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2, mp_int_t arg_val3, int arg_reg3) {
+ need_reg_all(emit);
+ ASM_MOV_ALIGNED_IMM_TO_REG(emit->as, arg_val1, arg_reg1);
+ ASM_MOV_IMM_TO_REG(emit->as, arg_val2, arg_reg2);
+ ASM_MOV_IMM_TO_REG(emit->as, arg_val3, arg_reg3);
+ ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
+}
+
+// vtype of all n_pop objects is VTYPE_PYOBJ
+// Will convert any items that are not VTYPE_PYOBJ to this type and put them back on the stack.
+// If any conversions of non-immediate values are needed, then it uses REG_ARG_1, REG_ARG_2 and REG_RET.
+// Otherwise, it does not use any temporary registers (but may use reg_dest before loading it with stack pointer).
+STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_pop) {
+ need_reg_all(emit);
+
+ // First, store any immediate values to their respective place on the stack.
+ for (mp_uint_t i = 0; i < n_pop; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+ // must push any imm's to stack
+ // must convert them to VTYPE_PYOBJ for viper code
+ if (si->kind == STACK_IMM) {
+ si->kind = STACK_VALUE;
+ switch (si->vtype) {
+ case VTYPE_PYOBJ:
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->data.u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ break;
+ case VTYPE_BOOL:
+ if (si->data.u_imm == 0) {
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (mp_uint_t)mp_const_false, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ } else {
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (mp_uint_t)mp_const_true, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ }
+ si->vtype = VTYPE_PYOBJ;
+ break;
+ case VTYPE_INT:
+ case VTYPE_UINT:
+ ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (si->data.u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ si->vtype = VTYPE_PYOBJ;
+ break;
+ default:
+ // not handled
+ assert(0);
+ }
+ }
+
+ // verify that this value is on the stack
+ assert(si->kind == STACK_VALUE);
+ }
+
+ // Second, convert any non-VTYPE_PYOBJ to that type.
+ for (mp_uint_t i = 0; i < n_pop; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+ if (si->vtype != VTYPE_PYOBJ) {
+ mp_uint_t local_num = emit->stack_start + emit->stack_size - 1 - i;
+ ASM_MOV_LOCAL_TO_REG(emit->as, local_num, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, si->vtype, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_TO_LOCAL(emit->as, REG_RET, local_num);
+ si->vtype = VTYPE_PYOBJ;
+ DEBUG_printf(" convert_native_to_obj(local_num=" UINT_FMT ")\n", local_num);
+ }
+ }
+
+ // Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
+ adjust_stack(emit, -n_pop);
+ ASM_MOV_LOCAL_ADDR_TO_REG(emit->as, emit->stack_start + emit->stack_size, reg_dest);
+}
+
+// vtype of all n_push objects is VTYPE_PYOBJ
+STATIC void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_push) {
+ need_reg_all(emit);
+ for (mp_uint_t i = 0; i < n_push; i++) {
+ emit->stack_info[emit->stack_size + i].kind = STACK_VALUE;
+ emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ;
+ }
+ ASM_MOV_LOCAL_ADDR_TO_REG(emit->as, emit->stack_start + emit->stack_size, reg_dest);
+ adjust_stack(emit, n_push);
+}
+
+STATIC void emit_native_label_assign(emit_t *emit, mp_uint_t l) {
+ DEBUG_printf("label_assign(" UINT_FMT ")\n", l);
+ emit_native_pre(emit);
+ // need to commit stack because we can jump here from elsewhere
+ need_stack_settled(emit);
+ ASM_LABEL_ASSIGN(emit->as, l);
+ emit_post(emit);
+}
+
+STATIC void emit_native_import_name(emit_t *emit, qstr qst) {
+ DEBUG_printf("import_name %s\n", qstr_str(qst));
+ vtype_kind_t vtype_fromlist;
+ vtype_kind_t vtype_level;
+ emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3); // arg2 = fromlist, arg3 = level
+ assert(vtype_fromlist == VTYPE_PYOBJ);
+ assert(vtype_level == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_import_from(emit_t *emit, qstr qst) {
+ DEBUG_printf("import_from %s\n", qstr_str(qst));
+ emit_native_pre(emit);
+ vtype_kind_t vtype_module;
+ emit_access_stack(emit, 1, &vtype_module, REG_ARG_1); // arg1 = module
+ assert(vtype_module == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_IMPORT_FROM, qst, REG_ARG_2); // arg2 = import name
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_import_star(emit_t *emit) {
+ DEBUG_printf("import_star\n");
+ vtype_kind_t vtype_module;
+ emit_pre_pop_reg(emit, &vtype_module, REG_ARG_1); // arg1 = module
+ assert(vtype_module == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_IMPORT_ALL);
+ emit_post(emit);
+}
+
+STATIC void emit_native_load_const_tok(emit_t *emit, mp_token_kind_t tok) {
+ DEBUG_printf("load_const_tok(tok=%u)\n", tok);
+ emit_native_pre(emit);
+ vtype_kind_t vtype;
+ mp_uint_t val;
+ if (emit->do_viper_types) {
+ switch (tok) {
+ case MP_TOKEN_KW_NONE: vtype = VTYPE_PTR_NONE; val = 0; break;
+ case MP_TOKEN_KW_FALSE: vtype = VTYPE_BOOL; val = 0; break;
+ case MP_TOKEN_KW_TRUE: vtype = VTYPE_BOOL; val = 1; break;
+ no_other_choice1:
+ case MP_TOKEN_ELLIPSIS: vtype = VTYPE_PYOBJ; val = (mp_uint_t)&mp_const_ellipsis_obj; break;
+ default: assert(0); goto no_other_choice1; // to help flow control analysis
+ }
+ } else {
+ vtype = VTYPE_PYOBJ;
+ switch (tok) {
+ case MP_TOKEN_KW_NONE: val = (mp_uint_t)mp_const_none; break;
+ case MP_TOKEN_KW_FALSE: val = (mp_uint_t)mp_const_false; break;
+ case MP_TOKEN_KW_TRUE: val = (mp_uint_t)mp_const_true; break;
+ no_other_choice2:
+ case MP_TOKEN_ELLIPSIS: val = (mp_uint_t)&mp_const_ellipsis_obj; break;
+ default: assert(0); goto no_other_choice2; // to help flow control analysis
+ }
+ }
+ emit_post_push_imm(emit, vtype, val);
+}
+
+STATIC void emit_native_load_const_small_int(emit_t *emit, mp_int_t arg) {
+ DEBUG_printf("load_const_small_int(int=" INT_FMT ")\n", arg);
+ emit_native_pre(emit);
+ if (emit->do_viper_types) {
+ emit_post_push_imm(emit, VTYPE_INT, arg);
+ } else {
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)MP_OBJ_NEW_SMALL_INT(arg));
+ }
+}
+
+STATIC void emit_native_load_const_str(emit_t *emit, qstr qst) {
+ emit_native_pre(emit);
+ // TODO: Eventually we want to be able to work with raw pointers in viper to
+ // do native array access. For now we just load them as any other object.
+ /*
+ if (emit->do_viper_types) {
+ // not implemented properly
+ // load a pointer to the asciiz string?
+ assert(0);
+ emit_post_push_imm(emit, VTYPE_PTR, (mp_uint_t)qstr_str(qst));
+ } else
+ */
+ {
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)MP_OBJ_NEW_QSTR(qst));
+ }
+}
+
+STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj) {
+ emit_native_pre(emit);
+ need_reg_single(emit, REG_RET, 0);
+ ASM_MOV_ALIGNED_IMM_TO_REG(emit->as, (mp_uint_t)obj, REG_RET);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_null(emit_t *emit) {
+ emit_native_pre(emit);
+ emit_post_push_imm(emit, VTYPE_PYOBJ, 0);
+}
+
+STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ DEBUG_printf("load_fast(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+ vtype_kind_t vtype = emit->local_vtype[local_num];
+ if (vtype == VTYPE_UNBOUND) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "local '%q' used before type known", qst);
+ }
+ emit_native_pre(emit);
+ if (local_num == 0) {
+ emit_post_push_reg(emit, vtype, REG_LOCAL_1);
+ } else if (local_num == 1) {
+ emit_post_push_reg(emit, vtype, REG_LOCAL_2);
+ } else if (local_num == 2) {
+ emit_post_push_reg(emit, vtype, REG_LOCAL_3);
+ } else {
+ need_reg_single(emit, REG_TEMP0, 0);
+ if (emit->do_viper_types) {
+ ASM_MOV_LOCAL_TO_REG(emit->as, local_num - REG_LOCAL_NUM, REG_TEMP0);
+ } else {
+ ASM_MOV_LOCAL_TO_REG(emit->as, STATE_START + emit->n_state - 1 - local_num, REG_TEMP0);
+ }
+ emit_post_push_reg(emit, vtype, REG_TEMP0);
+ }
+}
+
+STATIC void emit_native_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ DEBUG_printf("load_deref(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+ need_reg_single(emit, REG_RET, 0);
+ emit_native_load_fast(emit, qst, local_num);
+ vtype_kind_t vtype;
+ int reg_base = REG_RET;
+ emit_pre_pop_reg_flexible(emit, &vtype, ®_base, -1, -1);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_RET, reg_base, 1);
+ // closed over vars are always Python objects
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_name(emit_t *emit, qstr qst) {
+ DEBUG_printf("load_name(%s)\n", qstr_str(qst));
+ emit_native_pre(emit);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_NAME, qst, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_global(emit_t *emit, qstr qst) {
+ DEBUG_printf("load_global(%s)\n", qstr_str(qst));
+ emit_native_pre(emit);
+ // check for builtin casting operators
+ if (emit->do_viper_types && qst == MP_QSTR_int) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_INT);
+ } else if (emit->do_viper_types && qst == MP_QSTR_uint) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_UINT);
+ } else if (emit->do_viper_types && qst == MP_QSTR_ptr) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR);
+ } else if (emit->do_viper_types && qst == MP_QSTR_ptr8) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR8);
+ } else if (emit->do_viper_types && qst == MP_QSTR_ptr16) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR16);
+ } else if (emit->do_viper_types && qst == MP_QSTR_ptr32) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR32);
+ } else {
+ emit_call_with_imm_arg(emit, MP_F_LOAD_GLOBAL, qst, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+}
+
+STATIC void emit_native_load_attr(emit_t *emit, qstr qst) {
+ // depends on type of subject:
+ // - integer, function, pointer to integers: error
+ // - pointer to structure: get member, quite easy
+ // - Python object: call mp_load_attr, and needs to be typed to convert result
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_method(emit_t *emit, qstr qst) {
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, qst, REG_ARG_2); // arg2 = method name
+}
+
+STATIC void emit_native_load_build_class(emit_t *emit) {
+ emit_native_pre(emit);
+ emit_call(emit, MP_F_LOAD_BUILD_CLASS);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_subscr(emit_t *emit) {
+ DEBUG_printf("load_subscr\n");
+ // need to compile: base[index]
+
+ // pop: index, base
+ // optimise case where index is an immediate
+ vtype_kind_t vtype_base = peek_vtype(emit, 1);
+
+ if (vtype_base == VTYPE_PYOBJ) {
+ // standard Python subscr
+ // TODO factor this implicit cast code with other uses of it
+ vtype_kind_t vtype_index = peek_vtype(emit, 0);
+ if (vtype_index == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype_index, REG_ARG_2);
+ } else {
+ emit_pre_pop_reg(emit, &vtype_index, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype_index, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+ }
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_SENTINEL, REG_ARG_3);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ // viper load
+ // TODO The different machine architectures have very different
+ // capabilities and requirements for loads, so probably best to
+ // write a completely separate load-optimiser for each one.
+ stack_info_t *top = peek_stack(emit, 0);
+ if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
+ // index is an immediate
+ mp_int_t index_value = top->data.u_imm;
+ emit_pre_pop_discard(emit); // discard index
+ int reg_base = REG_ARG_1;
+ int reg_index = REG_ARG_2;
+ emit_pre_pop_reg_flexible(emit, &vtype_base, ®_base, reg_index, reg_index);
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb ldrb r1, [r2, r3]
+ if (index_value != 0) {
+ // index is non-zero
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_IMM_TO_REG(emit->as, index_value, reg_index);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
+ reg_base = reg_index;
+ }
+ ASM_LOAD8_REG_REG(emit->as, REG_RET, reg_base); // load from (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_IMM_TO_REG(emit->as, index_value << 1, reg_index);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
+ reg_base = reg_index;
+ }
+ ASM_LOAD16_REG_REG(emit->as, REG_RET, reg_base); // load from (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to 32-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_ldr_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_IMM_TO_REG(emit->as, index_value << 2, reg_index);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
+ reg_base = reg_index;
+ }
+ ASM_LOAD32_REG_REG(emit->as, REG_RET, reg_base); // load from (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't load from '%q'", vtype_to_qstr(vtype_base));
+ }
+ } else {
+ // index is not an immediate
+ vtype_kind_t vtype_index;
+ int reg_index = REG_ARG_2;
+ emit_pre_pop_reg_flexible(emit, &vtype_index, ®_index, REG_ARG_1, REG_ARG_1);
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+ if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't load with '%q' index", vtype_to_qstr(vtype_index));
+ }
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb ldrb r1, [r2, r3]
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_LOAD8_REG_REG(emit->as, REG_RET, REG_ARG_1); // store value to (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_LOAD16_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to word-size memory
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_LOAD32_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't load from '%q'", vtype_to_qstr(vtype_base));
+ }
+ }
+ emit_post_push_reg(emit, VTYPE_INT, REG_RET);
+ }
+}
+
+STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ vtype_kind_t vtype;
+ if (local_num == 0) {
+ emit_pre_pop_reg(emit, &vtype, REG_LOCAL_1);
+ } else if (local_num == 1) {
+ emit_pre_pop_reg(emit, &vtype, REG_LOCAL_2);
+ } else if (local_num == 2) {
+ emit_pre_pop_reg(emit, &vtype, REG_LOCAL_3);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+ if (emit->do_viper_types) {
+ ASM_MOV_REG_TO_LOCAL(emit->as, REG_TEMP0, local_num - REG_LOCAL_NUM);
+ } else {
+ ASM_MOV_REG_TO_LOCAL(emit->as, REG_TEMP0, STATE_START + emit->n_state - 1 - local_num);
+ }
+ }
+ emit_post(emit);
+
+ // check types
+ if (emit->local_vtype[local_num] == VTYPE_UNBOUND) {
+ // first time this local is assigned, so give it a type of the object stored in it
+ emit->local_vtype[local_num] = vtype;
+ } else if (emit->local_vtype[local_num] != vtype) {
+ // type of local is not the same as object stored in it
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "local '%q' has type '%q' but source is '%q'",
+ qst, vtype_to_qstr(emit->local_vtype[local_num]), vtype_to_qstr(vtype));
+ }
+}
+
+STATIC void emit_native_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ DEBUG_printf("store_deref(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+ need_reg_single(emit, REG_TEMP0, 0);
+ need_reg_single(emit, REG_TEMP1, 0);
+ emit_native_load_fast(emit, qst, local_num);
+ vtype_kind_t vtype;
+ int reg_base = REG_TEMP0;
+ emit_pre_pop_reg_flexible(emit, &vtype, ®_base, -1, -1);
+ int reg_src = REG_TEMP1;
+ emit_pre_pop_reg_flexible(emit, &vtype, ®_src, reg_base, reg_base);
+ ASM_STORE_REG_REG_OFFSET(emit->as, reg_src, reg_base, 1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_name(emit_t *emit, qstr qst) {
+ // mp_store_name, but needs conversion of object (maybe have mp_viper_store_name(obj, type))
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ assert(vtype == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_STORE_NAME, qst, REG_ARG_1); // arg1 = name
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_global(emit_t *emit, qstr qst) {
+ vtype_kind_t vtype = peek_vtype(emit, 0);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+ }
+ emit_call_with_imm_arg(emit, MP_F_STORE_GLOBAL, qst, REG_ARG_1); // arg1 = name
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_attr(emit_t *emit, qstr qst) {
+ vtype_kind_t vtype_base, vtype_val;
+ emit_pre_pop_reg_reg(emit, &vtype_base, REG_ARG_1, &vtype_val, REG_ARG_3); // arg1 = base, arg3 = value
+ assert(vtype_base == VTYPE_PYOBJ);
+ assert(vtype_val == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_subscr(emit_t *emit) {
+ DEBUG_printf("store_subscr\n");
+ // need to compile: base[index] = value
+
+ // pop: index, base, value
+ // optimise case where index is an immediate
+ vtype_kind_t vtype_base = peek_vtype(emit, 1);
+
+ if (vtype_base == VTYPE_PYOBJ) {
+ // standard Python subscr
+ vtype_kind_t vtype_index = peek_vtype(emit, 0);
+ vtype_kind_t vtype_value = peek_vtype(emit, 2);
+ if (vtype_index != VTYPE_PYOBJ || vtype_value != VTYPE_PYOBJ) {
+ // need to implicitly convert non-objects to objects
+ // TODO do this properly
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_1, 3);
+ adjust_stack(emit, 3);
+ }
+ emit_pre_pop_reg_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1, &vtype_value, REG_ARG_3);
+ emit_call(emit, MP_F_OBJ_SUBSCR);
+ } else {
+ // viper store
+ // TODO The different machine architectures have very different
+ // capabilities and requirements for stores, so probably best to
+ // write a completely separate store-optimiser for each one.
+ stack_info_t *top = peek_stack(emit, 0);
+ if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
+ // index is an immediate
+ mp_int_t index_value = top->data.u_imm;
+ emit_pre_pop_discard(emit); // discard index
+ vtype_kind_t vtype_value;
+ int reg_base = REG_ARG_1;
+ int reg_index = REG_ARG_2;
+ int reg_value = REG_ARG_3;
+ emit_pre_pop_reg_flexible(emit, &vtype_base, ®_base, reg_index, reg_value);
+ #if N_X86
+ // special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
+ emit_pre_pop_reg(emit, &vtype_value, reg_value);
+ #else
+ emit_pre_pop_reg_flexible(emit, &vtype_value, ®_value, reg_base, reg_index);
+ #endif
+ if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't store '%q'", vtype_to_qstr(vtype_value));
+ }
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb strb r1, [r2, r3]
+ if (index_value != 0) {
+ // index is non-zero
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_strb_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_IMM_TO_REG(emit->as, index_value, reg_index);
+ #if N_ARM
+ asm_arm_strb_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+ return;
+ #endif
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
+ reg_base = reg_index;
+ }
+ ASM_STORE8_REG_REG(emit->as, reg_value, reg_base); // store value to (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_strh_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_IMM_TO_REG(emit->as, index_value << 1, reg_index);
+ #if N_ARM
+ asm_arm_strh_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+ return;
+ #endif
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
+ reg_base = reg_index;
+ }
+ ASM_STORE16_REG_REG(emit->as, reg_value, reg_base); // store value to (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to 32-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_str_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_IMM_TO_REG(emit->as, index_value << 2, reg_index);
+ #if N_ARM
+ asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+ return;
+ #endif
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
+ reg_base = reg_index;
+ }
+ ASM_STORE32_REG_REG(emit->as, reg_value, reg_base); // store value to (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't store to '%q'", vtype_to_qstr(vtype_base));
+ }
+ } else {
+ // index is not an immediate
+ vtype_kind_t vtype_index, vtype_value;
+ int reg_index = REG_ARG_2;
+ int reg_value = REG_ARG_3;
+ emit_pre_pop_reg_flexible(emit, &vtype_index, ®_index, REG_ARG_1, reg_value);
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+ if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't store with '%q' index", vtype_to_qstr(vtype_index));
+ }
+ #if N_X86
+ // special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
+ emit_pre_pop_reg(emit, &vtype_value, reg_value);
+ #else
+ emit_pre_pop_reg_flexible(emit, &vtype_value, ®_value, REG_ARG_1, reg_index);
+ #endif
+ if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't store '%q'", vtype_to_qstr(vtype_value));
+ }
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb strb r1, [r2, r3]
+ #if N_ARM
+ asm_arm_strb_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+ break;
+ #endif
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_STORE8_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ #if N_ARM
+ asm_arm_strh_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+ break;
+ #endif
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_STORE16_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to 32-bit memory
+ #if N_ARM
+ asm_arm_str_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+ break;
+ #endif
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_STORE32_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't store to '%q'", vtype_to_qstr(vtype_base));
+ }
+ }
+
+ }
+}
+
+STATIC void emit_native_delete_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ // TODO: This is not compliant implementation. We could use MP_OBJ_SENTINEL
+ // to mark deleted vars but then every var would need to be checked on
+ // each access. Very inefficient, so just set value to None to enable GC.
+ emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE);
+ emit_native_store_fast(emit, qst, local_num);
+}
+
+STATIC void emit_native_delete_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ // TODO implement me!
+ (void)emit;
+ (void)qst;
+ (void)local_num;
+}
+
+STATIC void emit_native_delete_name(emit_t *emit, qstr qst) {
+ emit_native_pre(emit);
+ emit_call_with_imm_arg(emit, MP_F_DELETE_NAME, qst, REG_ARG_1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_delete_global(emit_t *emit, qstr qst) {
+ emit_native_pre(emit);
+ emit_call_with_imm_arg(emit, MP_F_DELETE_GLOBAL, qst, REG_ARG_1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_delete_attr(emit_t *emit, qstr qst) {
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_call_with_2_imm_args(emit, MP_F_STORE_ATTR, qst, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3); // arg2 = attribute name, arg3 = value (null for delete)
+ emit_post(emit);
+}
+
+STATIC void emit_native_delete_subscr(emit_t *emit) {
+ vtype_kind_t vtype_index, vtype_base;
+ emit_pre_pop_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1); // index, base
+ assert(vtype_index == VTYPE_PYOBJ);
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
+}
+
+STATIC void emit_native_dup_top(emit_t *emit) {
+ DEBUG_printf("dup_top\n");
+ vtype_kind_t vtype;
+ int reg = REG_TEMP0;
+ emit_pre_pop_reg_flexible(emit, &vtype, ®, -1, -1);
+ emit_post_push_reg_reg(emit, vtype, reg, vtype, reg);
+}
+
+STATIC void emit_native_dup_top_two(emit_t *emit) {
+ vtype_kind_t vtype0, vtype1;
+ emit_pre_pop_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1);
+ emit_post_push_reg_reg_reg_reg(emit, vtype1, REG_TEMP1, vtype0, REG_TEMP0, vtype1, REG_TEMP1, vtype0, REG_TEMP0);
+}
+
+STATIC void emit_native_pop_top(emit_t *emit) {
+ DEBUG_printf("pop_top\n");
+ emit_pre_pop_discard(emit);
+ emit_post(emit);
+}
+
+STATIC void emit_native_rot_two(emit_t *emit) {
+ DEBUG_printf("rot_two\n");
+ vtype_kind_t vtype0, vtype1;
+ emit_pre_pop_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1);
+ emit_post_push_reg_reg(emit, vtype0, REG_TEMP0, vtype1, REG_TEMP1);
+}
+
+STATIC void emit_native_rot_three(emit_t *emit) {
+ DEBUG_printf("rot_three\n");
+ vtype_kind_t vtype0, vtype1, vtype2;
+ emit_pre_pop_reg_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1, &vtype2, REG_TEMP2);
+ emit_post_push_reg_reg_reg(emit, vtype0, REG_TEMP0, vtype2, REG_TEMP2, vtype1, REG_TEMP1);
+}
+
+STATIC void emit_native_jump(emit_t *emit, mp_uint_t label) {
+ DEBUG_printf("jump(label=" UINT_FMT ")\n", label);
+ emit_native_pre(emit);
+ // need to commit stack because we are jumping elsewhere
+ need_stack_settled(emit);
+ ASM_JUMP(emit->as, label);
+ emit_post(emit);
+}
+
+STATIC void emit_native_jump_helper(emit_t *emit, bool pop) {
+ vtype_kind_t vtype = peek_vtype(emit, 0);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ if (!pop) {
+ adjust_stack(emit, 1);
+ }
+ emit_call(emit, MP_F_OBJ_IS_TRUE);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_RET);
+ if (!pop) {
+ adjust_stack(emit, 1);
+ }
+ if (!(vtype == VTYPE_BOOL || vtype == VTYPE_INT || vtype == VTYPE_UINT)) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't implicitly convert '%q' to 'bool'", vtype_to_qstr(vtype));
+ }
+ }
+ // For non-pop need to save the vtype so that emit_native_adjust_stack_size
+ // can use it. This is a bit of a hack.
+ if (!pop) {
+ emit->saved_stack_vtype = vtype;
+ }
+ // need to commit stack because we may jump elsewhere
+ need_stack_settled(emit);
+}
+
+STATIC void emit_native_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) {
+ DEBUG_printf("pop_jump_if(cond=%u, label=" UINT_FMT ")\n", cond, label);
+ emit_native_jump_helper(emit, true);
+ if (cond) {
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
+ } else {
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label);
+ }
+ emit_post(emit);
+}
+
+STATIC void emit_native_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) {
+ DEBUG_printf("jump_if_or_pop(cond=%u, label=" UINT_FMT ")\n", cond, label);
+ emit_native_jump_helper(emit, false);
+ if (cond) {
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
+ } else {
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label);
+ }
+ adjust_stack(emit, -1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_break_loop(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+ (void)except_depth;
+ emit_native_jump(emit, label & ~MP_EMIT_BREAK_FROM_FOR); // TODO properly
+}
+
+STATIC void emit_native_continue_loop(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+ (void)except_depth;
+ emit_native_jump(emit, label); // TODO properly
+}
+
+STATIC void emit_native_setup_with(emit_t *emit, mp_uint_t label) {
+ // the context manager is on the top of the stack
+ // stack: (..., ctx_mgr)
+
+ // get __exit__ method
+ vtype_kind_t vtype;
+ emit_access_stack(emit, 1, &vtype, REG_ARG_1); // arg1 = ctx_mgr
+ assert(vtype == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___exit__, REG_ARG_2);
+ // stack: (..., ctx_mgr, __exit__, self)
+
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_3); // self
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // __exit__
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // ctx_mgr
+ emit_post_push_reg(emit, vtype, REG_ARG_2); // __exit__
+ emit_post_push_reg(emit, vtype, REG_ARG_3); // self
+ // stack: (..., __exit__, self)
+ // REG_ARG_1=ctx_mgr
+
+ // get __enter__ method
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___enter__, REG_ARG_2); // arg2 = method name
+ // stack: (..., __exit__, self, __enter__, self)
+
+ // call __enter__ method
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2); // pointer to items, including meth and self
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 0, REG_ARG_1, 0, REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // push return value of __enter__
+ // stack: (..., __exit__, self, as_value)
+
+ // need to commit stack because we may jump elsewhere
+ need_stack_settled(emit);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_1, sizeof(nlr_buf_t) / sizeof(mp_uint_t)); // arg1 = pointer to nlr buf
+ emit_call(emit, MP_F_NLR_PUSH);
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
+
+ emit_access_stack(emit, sizeof(nlr_buf_t) / sizeof(mp_uint_t) + 1, &vtype, REG_RET); // access return value of __enter__
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // push return value of __enter__
+ // stack: (..., __exit__, self, as_value, nlr_buf, as_value)
+}
+
+STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
+ // note: label+1 is available as an auxiliary label
+
+ // stack: (..., __exit__, self, as_value, nlr_buf)
+ emit_native_pre(emit);
+ emit_call(emit, MP_F_NLR_POP);
+ adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)) - 1);
+ // stack: (..., __exit__, self)
+
+ // call __exit__
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none);
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none);
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5);
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2);
+
+ // jump to after with cleanup nlr_catch block
+ adjust_stack(emit, 1); // dummy nlr_buf.prev
+ emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE); // nlr_buf.ret_val = no exception
+ emit_native_jump(emit, label + 1);
+
+ // nlr_catch
+ emit_native_label_assign(emit, label);
+
+ // adjust stack counter for: __exit__, self, as_value
+ adjust_stack(emit, 3);
+ // stack: (..., __exit__, self, as_value, nlr_buf.prev, nlr_buf.ret_val)
+
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // get the thrown value (exc)
+ adjust_stack(emit, -2); // discard nlr_buf.prev and as_value
+ // stack: (..., __exit__, self)
+ // REG_ARG_1=exc
+
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // self
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_3); // __exit__
+ adjust_stack(emit, 1); // dummy nlr_buf.prev
+ emit_post_push_reg(emit, vtype, REG_ARG_1); // push exc to save it for later
+ emit_post_push_reg(emit, vtype, REG_ARG_3); // __exit__
+ emit_post_push_reg(emit, vtype, REG_ARG_2); // self
+ // stack: (..., exc, __exit__, self)
+ // REG_ARG_1=exc
+
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_1, 0); // get type(exc)
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_2); // push type(exc)
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_1); // push exc value
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none); // traceback info
+ // stack: (..., exc, __exit__, self, type(exc), exc, traceback)
+
+ // call __exit__ method
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5);
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2);
+ // stack: (..., exc)
+
+ // if REG_RET is true then we need to replace top-of-stack with None (swallow exception)
+ if (REG_ARG_1 != REG_RET) {
+ ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_RET);
+ }
+ emit_call(emit, MP_F_OBJ_IS_TRUE);
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label + 1);
+
+ // replace exc with None
+ emit_pre_pop_discard(emit);
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none);
+
+ // end of with cleanup nlr_catch block
+ emit_native_label_assign(emit, label + 1);
+}
+
+STATIC void emit_native_setup_except(emit_t *emit, mp_uint_t label) {
+ emit_native_pre(emit);
+ // need to commit stack because we may jump elsewhere
+ need_stack_settled(emit);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_1, sizeof(nlr_buf_t) / sizeof(mp_uint_t)); // arg1 = pointer to nlr buf
+ emit_call(emit, MP_F_NLR_PUSH);
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
+ emit_post(emit);
+}
+
+STATIC void emit_native_setup_finally(emit_t *emit, mp_uint_t label) {
+ emit_native_setup_except(emit, label);
+}
+
+STATIC void emit_native_end_finally(emit_t *emit) {
+ // logic:
+ // exc = pop_stack
+ // if exc == None: pass
+ // else: raise exc
+ // the check if exc is None is done in the MP_F_NATIVE_RAISE stub
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // get nlr_buf.ret_val
+ emit_pre_pop_discard(emit); // discard nlr_buf.prev
+ emit_call(emit, MP_F_NATIVE_RAISE);
+ emit_post(emit);
+}
+
+STATIC void emit_native_get_iter(emit_t *emit) {
+ // perhaps the difficult one, as we want to rewrite for loops using native code
+ // in cases where we iterate over a Python object, can we use normal runtime calls?
+
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ assert(vtype == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_GETITER);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_for_iter(emit_t *emit, mp_uint_t label) {
+ emit_native_pre(emit);
+ vtype_kind_t vtype;
+ emit_access_stack(emit, 1, &vtype, REG_ARG_1);
+ assert(vtype == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_ITERNEXT);
+ ASM_MOV_IMM_TO_REG(emit->as, (mp_uint_t)MP_OBJ_STOP_ITERATION, REG_TEMP1);
+ ASM_JUMP_IF_REG_EQ(emit->as, REG_RET, REG_TEMP1, label);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_for_iter_end(emit_t *emit) {
+ // adjust stack counter (we get here from for_iter ending, which popped the value for us)
+ emit_native_pre(emit);
+ adjust_stack(emit, -1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_pop_block(emit_t *emit) {
+ emit_native_pre(emit);
+ emit_call(emit, MP_F_NLR_POP);
+ adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)) + 1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_pop_except(emit_t *emit) {
+ (void)emit;
+ /*
+ emit_native_pre(emit);
+ emit_call(emit, MP_F_NLR_POP);
+ adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)));
+ emit_post(emit);
+ */
+}
+
+STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_call_with_imm_arg(emit, MP_F_UNARY_OP, op, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ adjust_stack(emit, 1);
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "unary op %q not implemented", mp_unary_op_method_name[op]);
+ }
+}
+
+STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
+ DEBUG_printf("binary_op(" UINT_FMT ")\n", op);
+ vtype_kind_t vtype_lhs = peek_vtype(emit, 1);
+ vtype_kind_t vtype_rhs = peek_vtype(emit, 0);
+ if (vtype_lhs == VTYPE_INT && vtype_rhs == VTYPE_INT) {
+ #if N_X64 || N_X86
+ // special cases for x86 and shifting
+ if (op == MP_BINARY_OP_LSHIFT
+ || op == MP_BINARY_OP_INPLACE_LSHIFT
+ || op == MP_BINARY_OP_RSHIFT
+ || op == MP_BINARY_OP_INPLACE_RSHIFT) {
+ #if N_X64
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, ASM_X64_REG_RCX, &vtype_lhs, REG_RET);
+ #else
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, ASM_X86_REG_ECX, &vtype_lhs, REG_RET);
+ #endif
+ if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_INPLACE_LSHIFT) {
+ ASM_LSL_REG(emit->as, REG_RET);
+ } else {
+ ASM_ASR_REG(emit->as, REG_RET);
+ }
+ emit_post_push_reg(emit, VTYPE_INT, REG_RET);
+ return;
+ }
+ #endif
+ int reg_rhs = REG_ARG_3;
+ emit_pre_pop_reg_flexible(emit, &vtype_rhs, ®_rhs, REG_RET, REG_ARG_2);
+ emit_pre_pop_reg(emit, &vtype_lhs, REG_ARG_2);
+ if (0) {
+ // dummy
+ #if !(N_X64 || N_X86)
+ } else if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_INPLACE_LSHIFT) {
+ ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_RSHIFT || op == MP_BINARY_OP_INPLACE_RSHIFT) {
+ ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ #endif
+ } else if (op == MP_BINARY_OP_OR || op == MP_BINARY_OP_INPLACE_OR) {
+ ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_XOR || op == MP_BINARY_OP_INPLACE_XOR) {
+ ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_AND || op == MP_BINARY_OP_INPLACE_AND) {
+ ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_ADD || op == MP_BINARY_OP_INPLACE_ADD) {
+ ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_SUBTRACT || op == MP_BINARY_OP_INPLACE_SUBTRACT) {
+ ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_MULTIPLY || op == MP_BINARY_OP_INPLACE_MULTIPLY) {
+ ASM_MUL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+ } else if (MP_BINARY_OP_LESS <= op && op <= MP_BINARY_OP_NOT_EQUAL) {
+ // comparison ops are (in enum order):
+ // MP_BINARY_OP_LESS
+ // MP_BINARY_OP_MORE
+ // MP_BINARY_OP_EQUAL
+ // MP_BINARY_OP_LESS_EQUAL
+ // MP_BINARY_OP_MORE_EQUAL
+ // MP_BINARY_OP_NOT_EQUAL
+ need_reg_single(emit, REG_RET, 0);
+ #if N_X64
+ asm_x64_xor_r64_r64(emit->as, REG_RET, REG_RET);
+ asm_x64_cmp_r64_with_r64(emit->as, reg_rhs, REG_ARG_2);
+ static byte ops[6] = {
+ ASM_X64_CC_JL,
+ ASM_X64_CC_JG,
+ ASM_X64_CC_JE,
+ ASM_X64_CC_JLE,
+ ASM_X64_CC_JGE,
+ ASM_X64_CC_JNE,
+ };
+ asm_x64_setcc_r8(emit->as, ops[op - MP_BINARY_OP_LESS], REG_RET);
+ #elif N_X86
+ asm_x86_xor_r32_r32(emit->as, REG_RET, REG_RET);
+ asm_x86_cmp_r32_with_r32(emit->as, reg_rhs, REG_ARG_2);
+ static byte ops[6] = {
+ ASM_X86_CC_JL,
+ ASM_X86_CC_JG,
+ ASM_X86_CC_JE,
+ ASM_X86_CC_JLE,
+ ASM_X86_CC_JGE,
+ ASM_X86_CC_JNE,
+ };
+ asm_x86_setcc_r8(emit->as, ops[op - MP_BINARY_OP_LESS], REG_RET);
+ #elif N_THUMB
+ asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
+ static uint16_t ops[6] = {
+ ASM_THUMB_OP_ITE_GE,
+ ASM_THUMB_OP_ITE_GT,
+ ASM_THUMB_OP_ITE_EQ,
+ ASM_THUMB_OP_ITE_GT,
+ ASM_THUMB_OP_ITE_GE,
+ ASM_THUMB_OP_ITE_EQ,
+ };
+ static byte ret[6] = { 0, 1, 1, 0, 1, 0, };
+ asm_thumb_op16(emit->as, ops[op - MP_BINARY_OP_LESS]);
+ asm_thumb_mov_rlo_i8(emit->as, REG_RET, ret[op - MP_BINARY_OP_LESS]);
+ asm_thumb_mov_rlo_i8(emit->as, REG_RET, ret[op - MP_BINARY_OP_LESS] ^ 1);
+ #elif N_ARM
+ asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
+ static uint ccs[6] = {
+ ASM_ARM_CC_LT,
+ ASM_ARM_CC_GT,
+ ASM_ARM_CC_EQ,
+ ASM_ARM_CC_LE,
+ ASM_ARM_CC_GE,
+ ASM_ARM_CC_NE,
+ };
+ asm_arm_setcc_reg(emit->as, REG_RET, ccs[op - MP_BINARY_OP_LESS]);
+ #else
+ #error not implemented
+ #endif
+ emit_post_push_reg(emit, VTYPE_BOOL, REG_RET);
+ } else {
+ // TODO other ops not yet implemented
+ adjust_stack(emit, 1);
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "binary op %q not implemented", mp_binary_op_method_name[op]);
+ }
+ } else if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_3, &vtype_lhs, REG_ARG_2);
+ bool invert = false;
+ if (op == MP_BINARY_OP_NOT_IN) {
+ invert = true;
+ op = MP_BINARY_OP_IN;
+ } else if (op == MP_BINARY_OP_IS_NOT) {
+ invert = true;
+ op = MP_BINARY_OP_IS;
+ }
+ emit_call_with_imm_arg(emit, MP_F_BINARY_OP, op, REG_ARG_1);
+ if (invert) {
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+ emit_call_with_imm_arg(emit, MP_F_UNARY_OP, MP_UNARY_OP_NOT, REG_ARG_1);
+ }
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ adjust_stack(emit, -1);
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "can't do binary op between '%q' and '%q'",
+ vtype_to_qstr(vtype_lhs), vtype_to_qstr(vtype_rhs));
+ }
+}
+
+STATIC void emit_native_build_tuple(emit_t *emit, mp_uint_t n_args) {
+ // for viper: call runtime, with types of args
+ // if wrapped in byte_array, or something, allocates memory and fills it
+ emit_native_pre(emit);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
+ emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE, n_args, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new tuple
+}
+
+STATIC void emit_native_build_list(emit_t *emit, mp_uint_t n_args) {
+ emit_native_pre(emit);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
+ emit_call_with_imm_arg(emit, MP_F_BUILD_LIST, n_args, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new list
+}
+
+STATIC void emit_native_list_append(emit_t *emit, mp_uint_t list_index) {
+ // only used in list comprehension
+ vtype_kind_t vtype_list, vtype_item;
+ emit_pre_pop_reg(emit, &vtype_item, REG_ARG_2);
+ emit_access_stack(emit, list_index, &vtype_list, REG_ARG_1);
+ assert(vtype_list == VTYPE_PYOBJ);
+ assert(vtype_item == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_LIST_APPEND);
+ emit_post(emit);
+}
+
+STATIC void emit_native_build_map(emit_t *emit, mp_uint_t n_args) {
+ emit_native_pre(emit);
+ emit_call_with_imm_arg(emit, MP_F_BUILD_MAP, n_args, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new map
+}
+
+STATIC void emit_native_store_map(emit_t *emit) {
+ vtype_kind_t vtype_key, vtype_value, vtype_map;
+ emit_pre_pop_reg_reg_reg(emit, &vtype_key, REG_ARG_2, &vtype_value, REG_ARG_3, &vtype_map, REG_ARG_1); // key, value, map
+ assert(vtype_key == VTYPE_PYOBJ);
+ assert(vtype_value == VTYPE_PYOBJ);
+ assert(vtype_map == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_STORE_MAP);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // map
+}
+
+STATIC void emit_native_map_add(emit_t *emit, mp_uint_t map_index) {
+ // only used in list comprehension
+ vtype_kind_t vtype_map, vtype_key, vtype_value;
+ emit_pre_pop_reg_reg(emit, &vtype_key, REG_ARG_2, &vtype_value, REG_ARG_3);
+ emit_access_stack(emit, map_index, &vtype_map, REG_ARG_1);
+ assert(vtype_map == VTYPE_PYOBJ);
+ assert(vtype_key == VTYPE_PYOBJ);
+ assert(vtype_value == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_STORE_MAP);
+ emit_post(emit);
+}
+
+#if MICROPY_PY_BUILTINS_SET
+STATIC void emit_native_build_set(emit_t *emit, mp_uint_t n_args) {
+ emit_native_pre(emit);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
+ emit_call_with_imm_arg(emit, MP_F_BUILD_SET, n_args, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new set
+}
+
+STATIC void emit_native_set_add(emit_t *emit, mp_uint_t set_index) {
+ // only used in set comprehension
+ vtype_kind_t vtype_set, vtype_item;
+ emit_pre_pop_reg(emit, &vtype_item, REG_ARG_2);
+ emit_access_stack(emit, set_index, &vtype_set, REG_ARG_1);
+ assert(vtype_set == VTYPE_PYOBJ);
+ assert(vtype_item == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_STORE_SET);
+ emit_post(emit);
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE
+STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args) {
+ DEBUG_printf("build_slice %d\n", n_args);
+ if (n_args == 2) {
+ vtype_kind_t vtype_start, vtype_stop;
+ emit_pre_pop_reg_reg(emit, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop
+ assert(vtype_start == VTYPE_PYOBJ);
+ assert(vtype_stop == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_NEW_SLICE, (mp_uint_t)mp_const_none, REG_ARG_3); // arg3 = step
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ assert(n_args == 3);
+ vtype_kind_t vtype_start, vtype_stop, vtype_step;
+ emit_pre_pop_reg_reg_reg(emit, &vtype_step, REG_ARG_3, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop, arg3 = step
+ assert(vtype_start == VTYPE_PYOBJ);
+ assert(vtype_stop == VTYPE_PYOBJ);
+ assert(vtype_step == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_NEW_SLICE);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+}
+#endif
+
+STATIC void emit_native_unpack_sequence(emit_t *emit, mp_uint_t n_args) {
+ DEBUG_printf("unpack_sequence %d\n", n_args);
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_args); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_UNPACK_SEQUENCE, n_args, REG_ARG_2); // arg2 = n_args
+}
+
+STATIC void emit_native_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right) {
+ DEBUG_printf("unpack_ex %d %d\n", n_left, n_right);
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_left + n_right + 1); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_UNPACK_EX, n_left | (n_right << 8), REG_ARG_2); // arg2 = n_left + n_right
+}
+
+STATIC void emit_native_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ // call runtime, with type info for args, or don't support dict/default params, or only support Python objects for them
+ emit_native_pre(emit);
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ emit_call_with_3_imm_args_and_first_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, (mp_uint_t)scope->raw_code, REG_ARG_1, (mp_uint_t)MP_OBJ_NULL, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
+ } else {
+ vtype_kind_t vtype_def_tuple, vtype_def_dict;
+ emit_pre_pop_reg_reg(emit, &vtype_def_dict, REG_ARG_3, &vtype_def_tuple, REG_ARG_2);
+ assert(vtype_def_tuple == VTYPE_PYOBJ);
+ assert(vtype_def_dict == VTYPE_PYOBJ);
+ emit_call_with_imm_arg_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, (mp_uint_t)scope->raw_code, REG_ARG_1);
+ }
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ emit_native_pre(emit);
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_closed_over);
+ ASM_MOV_IMM_TO_REG(emit->as, n_closed_over, REG_ARG_2);
+ } else {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_closed_over + 2);
+ ASM_MOV_IMM_TO_REG(emit->as, 0x100 | n_closed_over, REG_ARG_2);
+ }
+ ASM_MOV_ALIGNED_IMM_TO_REG(emit->as, (mp_uint_t)scope->raw_code, REG_ARG_1);
+ ASM_CALL_IND(emit->as, mp_fun_table[MP_F_MAKE_CLOSURE_FROM_RAW_CODE], MP_F_MAKE_CLOSURE_FROM_RAW_CODE);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ DEBUG_printf("call_function(n_pos=" UINT_FMT ", n_kw=" UINT_FMT ", star_flags=" UINT_FMT ")\n", n_positional, n_keyword, star_flags);
+
+ // TODO: in viper mode, call special runtime routine with type info for args,
+ // and wanted type info for return, to remove need for boxing/unboxing
+
+ emit_native_pre(emit);
+ vtype_kind_t vtype_fun = peek_vtype(emit, n_positional + 2 * n_keyword);
+ if (vtype_fun == VTYPE_BUILTIN_CAST) {
+ // casting operator
+ assert(n_positional == 1 && n_keyword == 0);
+ assert(!star_flags);
+ DEBUG_printf(" cast to %d\n", vtype_fun);
+ vtype_kind_t vtype_cast = peek_stack(emit, 1)->data.u_imm;
+ switch (peek_vtype(emit, 0)) {
+ case VTYPE_PYOBJ: {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ emit_pre_pop_discard(emit);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_OBJ_TO_NATIVE, vtype_cast, REG_ARG_2); // arg2 = type
+ emit_post_push_reg(emit, vtype_cast, REG_RET);
+ break;
+ }
+ case VTYPE_BOOL:
+ case VTYPE_INT:
+ case VTYPE_UINT:
+ case VTYPE_PTR:
+ case VTYPE_PTR8:
+ case VTYPE_PTR16:
+ case VTYPE_PTR32:
+ case VTYPE_PTR_NONE:
+ emit_fold_stack_top(emit, REG_ARG_1);
+ emit_post_top_set_vtype(emit, vtype_cast);
+ break;
+ default:
+ assert(!"TODO: convert obj to int");
+ }
+ } else {
+ assert(vtype_fun == VTYPE_PYOBJ);
+ if (star_flags) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword + 3); // pointer to args
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW_VAR, 0, REG_ARG_1, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ if (n_positional != 0 || n_keyword != 0) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword); // pointer to args
+ }
+ emit_pre_pop_reg(emit, &vtype_fun, REG_ARG_1); // the function
+ emit_call_with_imm_arg(emit, MP_F_NATIVE_CALL_FUNCTION_N_KW, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+ }
+}
+
+STATIC void emit_native_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ if (star_flags) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword + 4); // pointer to args
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW_VAR, 1, REG_ARG_1, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ emit_native_pre(emit);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2 + n_positional + 2 * n_keyword); // pointer to items, including meth and self
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, n_positional, REG_ARG_1, n_keyword, REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+}
+
+STATIC void emit_native_return_value(emit_t *emit) {
+ DEBUG_printf("return_value\n");
+ if (emit->do_viper_types) {
+ if (peek_vtype(emit, 0) == VTYPE_PTR_NONE) {
+ emit_pre_pop_discard(emit);
+ if (emit->return_vtype == VTYPE_PYOBJ) {
+ ASM_MOV_IMM_TO_REG(emit->as, (mp_uint_t)mp_const_none, REG_RET);
+ } else {
+ ASM_MOV_IMM_TO_REG(emit->as, 0, REG_RET);
+ }
+ } else {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_RET);
+ if (vtype != emit->return_vtype) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ "return expected '%q' but got '%q'",
+ vtype_to_qstr(emit->return_vtype), vtype_to_qstr(vtype));
+ }
+ }
+ } else {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_RET);
+ assert(vtype == VTYPE_PYOBJ);
+ }
+ emit->last_emit_was_return_value = true;
+ //ASM_BREAK_POINT(emit->as); // to insert a break-point for debugging
+ ASM_EXIT(emit->as);
+}
+
+STATIC void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) {
+ assert(n_args == 1);
+ vtype_kind_t vtype_exc;
+ emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise
+ if (vtype_exc != VTYPE_PYOBJ) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "must raise an object");
+ }
+ // TODO probably make this 1 call to the runtime (which could even call convert, native_raise(obj, type))
+ emit_call(emit, MP_F_NATIVE_RAISE);
+}
+
+STATIC void emit_native_yield_value(emit_t *emit) {
+ // not supported (for now)
+ (void)emit;
+ assert(0);
+}
+STATIC void emit_native_yield_from(emit_t *emit) {
+ // not supported (for now)
+ (void)emit;
+ assert(0);
+}
+
+STATIC void emit_native_start_except_handler(emit_t *emit) {
+ // This instruction follows an nlr_pop, so the stack counter is back to zero, when really
+ // it should be up by a whole nlr_buf_t. We then want to pop the nlr_buf_t here, but save
+ // the first 2 elements, so we can get the thrown value.
+ adjust_stack(emit, 1);
+ vtype_kind_t vtype_nlr;
+ emit_pre_pop_reg(emit, &vtype_nlr, REG_ARG_1); // get the thrown value
+ emit_pre_pop_discard(emit); // discard the linked-list pointer in the nlr_buf
+ emit_post_push_reg_reg_reg(emit, VTYPE_PYOBJ, REG_ARG_1, VTYPE_PYOBJ, REG_ARG_1, VTYPE_PYOBJ, REG_ARG_1); // push the 3 exception items
+}
+
+STATIC void emit_native_end_except_handler(emit_t *emit) {
+ adjust_stack(emit, -1);
+}
+
+const emit_method_table_t EXPORT_FUN(method_table) = {
+ emit_native_set_native_type,
+ emit_native_start_pass,
+ emit_native_end_pass,
+ emit_native_last_emit_was_return_value,
+ emit_native_adjust_stack_size,
+ emit_native_set_source_line,
+
+ {
+ emit_native_load_fast,
+ emit_native_load_deref,
+ emit_native_load_name,
+ emit_native_load_global,
+ },
+ {
+ emit_native_store_fast,
+ emit_native_store_deref,
+ emit_native_store_name,
+ emit_native_store_global,
+ },
+ {
+ emit_native_delete_fast,
+ emit_native_delete_deref,
+ emit_native_delete_name,
+ emit_native_delete_global,
+ },
+
+ emit_native_label_assign,
+ emit_native_import_name,
+ emit_native_import_from,
+ emit_native_import_star,
+ emit_native_load_const_tok,
+ emit_native_load_const_small_int,
+ emit_native_load_const_str,
+ emit_native_load_const_obj,
+ emit_native_load_null,
+ emit_native_load_attr,
+ emit_native_load_method,
+ emit_native_load_build_class,
+ emit_native_load_subscr,
+ emit_native_store_attr,
+ emit_native_store_subscr,
+ emit_native_delete_attr,
+ emit_native_delete_subscr,
+ emit_native_dup_top,
+ emit_native_dup_top_two,
+ emit_native_pop_top,
+ emit_native_rot_two,
+ emit_native_rot_three,
+ emit_native_jump,
+ emit_native_pop_jump_if,
+ emit_native_jump_if_or_pop,
+ emit_native_break_loop,
+ emit_native_continue_loop,
+ emit_native_setup_with,
+ emit_native_with_cleanup,
+ emit_native_setup_except,
+ emit_native_setup_finally,
+ emit_native_end_finally,
+ emit_native_get_iter,
+ emit_native_for_iter,
+ emit_native_for_iter_end,
+ emit_native_pop_block,
+ emit_native_pop_except,
+ emit_native_unary_op,
+ emit_native_binary_op,
+ emit_native_build_tuple,
+ emit_native_build_list,
+ emit_native_list_append,
+ emit_native_build_map,
+ emit_native_store_map,
+ emit_native_map_add,
+ #if MICROPY_PY_BUILTINS_SET
+ emit_native_build_set,
+ emit_native_set_add,
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE
+ emit_native_build_slice,
+ #endif
+ emit_native_unpack_sequence,
+ emit_native_unpack_ex,
+ emit_native_make_function,
+ emit_native_make_closure,
+ emit_native_call_function,
+ emit_native_call_method,
+ emit_native_return_value,
+ emit_native_raise_varargs,
+ emit_native_yield_value,
+ emit_native_yield_from,
+
+ emit_native_start_except_handler,
+ emit_native_end_except_handler,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/formatfloat.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,427 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+#if MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_NONE
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include "py/formatfloat.h"
+
+/***********************************************************************
+
+ Routine for converting a arbitrary floating
+ point number into a string.
+
+ The code in this funcion was inspired from Fred Bayer's pdouble.c.
+ Since pdouble.c was released as Public Domain, I'm releasing this
+ code as public domain as well.
+
+ The original code can be found in https://github.com/dhylands/format-float
+
+ Dave Hylands
+
+***********************************************************************/
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+// 1 sign bit, 8 exponent bits, and 23 mantissa bits.
+// exponent values 0 and 255 are reserved, exponent can be 1 to 254.
+// exponent is stored with a bias of 127.
+// The min and max floats are on the order of 1x10^37 and 1x10^-37
+
+#define FPTYPE float
+#define FPCONST(x) x##F
+#define FPROUND_TO_ONE 0.9999995F
+#define FPDECEXP 32
+#define FPMIN_BUF_SIZE 6 // +9e+99
+
+#define FLT_SIGN_MASK 0x80000000
+#define FLT_EXP_MASK 0x7F800000
+#define FLT_MAN_MASK 0x007FFFFF
+
+union floatbits {
+ float f;
+ uint32_t u;
+};
+static inline int fp_signbit(float x) { union floatbits fb = {x}; return fb.u & FLT_SIGN_MASK; }
+static inline int fp_isspecial(float x) { union floatbits fb = {x}; return (fb.u & FLT_EXP_MASK) == FLT_EXP_MASK; }
+static inline int fp_isinf(float x) { union floatbits fb = {x}; return (fb.u & FLT_MAN_MASK) == 0; }
+static inline int fp_iszero(float x) { union floatbits fb = {x}; return fb.u == 0; }
+static inline int fp_isless1(float x) { union floatbits fb = {x}; return fb.u < 0x3f800000; }
+// Assumes both fp_isspecial() and fp_isinf() were applied before
+#define fp_isnan(x) 1
+
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+
+#define FPTYPE double
+#define FPCONST(x) x
+#define FPROUND_TO_ONE 0.999999999995
+#define FPDECEXP 256
+#define FPMIN_BUF_SIZE 7 // +9e+199
+#include <math.h>
+#define fp_signbit(x) signbit(x)
+#define fp_isspecial(x) 1
+#define fp_isnan(x) isnan(x)
+#define fp_isinf(x) isinf(x)
+#define fp_iszero(x) (x == 0)
+#define fp_isless1(x) (x < 1.0)
+
+#endif
+
+static const FPTYPE g_pos_pow[] = {
+ #if FPDECEXP > 32
+ 1e256, 1e128, 1e64,
+ #endif
+ 1e32, 1e16, 1e8, 1e4, 1e2, 1e1
+};
+static const FPTYPE g_neg_pow[] = {
+ #if FPDECEXP > 32
+ 1e-256, 1e-128, 1e-64,
+ #endif
+ 1e-32, 1e-16, 1e-8, 1e-4, 1e-2, 1e-1
+};
+
+int mp_format_float(FPTYPE f, char *buf, size_t buf_size, char fmt, int prec, char sign) {
+
+ char *s = buf;
+
+ if (buf_size <= FPMIN_BUF_SIZE) {
+ // FPMIN_BUF_SIZE is the minimum size needed to store any FP number.
+ // If the buffer does not have enough room for this (plus null terminator)
+ // then don't try to format the float.
+
+ if (buf_size >= 2) {
+ *s++ = '?';
+ }
+ if (buf_size >= 1) {
+ *s++ = '\0';
+ }
+ return buf_size >= 2;
+ }
+ if (fp_signbit(f)) {
+ *s++ = '-';
+ f = -f;
+ } else {
+ if (sign) {
+ *s++ = sign;
+ }
+ }
+
+ // buf_remaining contains bytes available for digits and exponent.
+ // It is buf_size minus room for the sign and null byte.
+ int buf_remaining = buf_size - 1 - (s - buf);
+
+ if (fp_isspecial(f)) {
+ char uc = fmt & 0x20;
+ if (fp_isinf(f)) {
+ *s++ = 'I' ^ uc;
+ *s++ = 'N' ^ uc;
+ *s++ = 'F' ^ uc;
+ goto ret;
+ } else if (fp_isnan(f)) {
+ *s++ = 'N' ^ uc;
+ *s++ = 'A' ^ uc;
+ *s++ = 'N' ^ uc;
+ ret:
+ *s = '\0';
+ return s - buf;
+ }
+ }
+
+ if (prec < 0) {
+ prec = 6;
+ }
+ char e_char = 'E' | (fmt & 0x20); // e_char will match case of fmt
+ fmt |= 0x20; // Force fmt to be lowercase
+ char org_fmt = fmt;
+ if (fmt == 'g' && prec == 0) {
+ prec = 1;
+ }
+ int e, e1;
+ int dec = 0;
+ char e_sign = '\0';
+ int num_digits = 0;
+ const FPTYPE *pos_pow = g_pos_pow;
+ const FPTYPE *neg_pow = g_neg_pow;
+
+ if (fp_iszero(f)) {
+ e = 0;
+ if (fmt == 'f') {
+ // Truncate precision to prevent buffer overflow
+ if (prec + 2 > buf_remaining) {
+ prec = buf_remaining - 2;
+ }
+ num_digits = prec + 1;
+ } else {
+ // Truncate precision to prevent buffer overflow
+ if (prec + 6 > buf_remaining) {
+ prec = buf_remaining - 6;
+ }
+ if (fmt == 'e') {
+ e_sign = '+';
+ }
+ }
+ } else if (fp_isless1(f)) {
+ // We need to figure out what an integer digit will be used
+ // in case 'f' is used (or we revert other format to it below).
+ // As we just tested number to be <1, this is obviously 0,
+ // but we can round it up to 1 below.
+ char first_dig = '0';
+ if (f >= FPROUND_TO_ONE) {
+ first_dig = '1';
+ }
+
+ // Build negative exponent
+ for (e = 0, e1 = FPDECEXP; e1; e1 >>= 1, pos_pow++, neg_pow++) {
+ if (*neg_pow > f) {
+ e += e1;
+ f *= *pos_pow;
+ }
+ }
+ char e_sign_char = '-';
+ if (fp_isless1(f) && f >= FPROUND_TO_ONE) {
+ f = FPCONST(1.0);
+ if (e == 0) {
+ e_sign_char = '+';
+ }
+ } else if (fp_isless1(f)) {
+ e++;
+ f *= FPCONST(10.0);
+ }
+
+ // If the user specified 'g' format, and e is <= 4, then we'll switch
+ // to the fixed format ('f')
+
+ if (fmt == 'f' || (fmt == 'g' && e <= 4)) {
+ fmt = 'f';
+ dec = -1;
+ *s++ = first_dig;
+
+ if (org_fmt == 'g') {
+ prec += (e - 1);
+ }
+
+ // truncate precision to prevent buffer overflow
+ if (prec + 2 > buf_remaining) {
+ prec = buf_remaining - 2;
+ }
+
+ num_digits = prec;
+ if (num_digits) {
+ *s++ = '.';
+ while (--e && num_digits) {
+ *s++ = '0';
+ num_digits--;
+ }
+ }
+ } else {
+ // For e & g formats, we'll be printing the exponent, so set the
+ // sign.
+ e_sign = e_sign_char;
+ dec = 0;
+
+ if (prec > (buf_remaining - FPMIN_BUF_SIZE)) {
+ prec = buf_remaining - FPMIN_BUF_SIZE;
+ if (fmt == 'g') {
+ prec++;
+ }
+ }
+ }
+ } else {
+ // Build positive exponent
+ for (e = 0, e1 = FPDECEXP; e1; e1 >>= 1, pos_pow++, neg_pow++) {
+ if (*pos_pow <= f) {
+ e += e1;
+ f *= *neg_pow;
+ }
+ }
+
+ // It can be that f was right on the edge of an entry in pos_pow needs to be reduced
+ if (f >= FPCONST(10.0)) {
+ e += 1;
+ f *= FPCONST(0.1);
+ }
+
+ // If the user specified fixed format (fmt == 'f') and e makes the
+ // number too big to fit into the available buffer, then we'll
+ // switch to the 'e' format.
+
+ if (fmt == 'f') {
+ if (e >= buf_remaining) {
+ fmt = 'e';
+ } else if ((e + prec + 2) > buf_remaining) {
+ prec = buf_remaining - e - 2;
+ if (prec < 0) {
+ // This means no decimal point, so we can add one back
+ // for the decimal.
+ prec++;
+ }
+ }
+ }
+ if (fmt == 'e' && prec > (buf_remaining - FPMIN_BUF_SIZE)) {
+ prec = buf_remaining - FPMIN_BUF_SIZE;
+ }
+ if (fmt == 'g'){
+ // Truncate precision to prevent buffer overflow
+ if (prec + (FPMIN_BUF_SIZE - 1) > buf_remaining) {
+ prec = buf_remaining - (FPMIN_BUF_SIZE - 1);
+ }
+ }
+ // If the user specified 'g' format, and e is < prec, then we'll switch
+ // to the fixed format.
+
+ if (fmt == 'g' && e < prec) {
+ fmt = 'f';
+ prec -= (e + 1);
+ }
+ if (fmt == 'f') {
+ dec = e;
+ num_digits = prec + e + 1;
+ } else {
+ e_sign = '+';
+ }
+ }
+ if (prec < 0) {
+ // This can happen when the prec is trimmed to prevent buffer overflow
+ prec = 0;
+ }
+
+ // We now have num.f as a floating point number between >= 1 and < 10
+ // (or equal to zero), and e contains the absolute value of the power of
+ // 10 exponent. and (dec + 1) == the number of dgits before the decimal.
+
+ // For e, prec is # digits after the decimal
+ // For f, prec is # digits after the decimal
+ // For g, prec is the max number of significant digits
+ //
+ // For e & g there will be a single digit before the decimal
+ // for f there will be e digits before the decimal
+
+ if (fmt == 'e') {
+ num_digits = prec + 1;
+ } else if (fmt == 'g') {
+ if (prec == 0) {
+ prec = 1;
+ }
+ num_digits = prec;
+ }
+
+ // Print the digits of the mantissa
+ for (int i = 0; i < num_digits; ++i, --dec) {
+ int32_t d = f;
+ *s++ = '0' + d;
+ if (dec == 0 && prec > 0) {
+ *s++ = '.';
+ }
+ f -= (FPTYPE)d;
+ f *= FPCONST(10.0);
+ }
+
+ // Round
+ // If we print non-exponential format (i.e. 'f'), but a digit we're going
+ // to round by (e) is too far away, then there's nothing to round.
+ if ((org_fmt != 'f' || e <= 1) && f >= FPCONST(5.0)) {
+ char *rs = s;
+ rs--;
+ while (1) {
+ if (*rs == '.') {
+ rs--;
+ continue;
+ }
+ if (*rs < '0' || *rs > '9') {
+ // + or -
+ rs++; // So we sit on the digit to the right of the sign
+ break;
+ }
+ if (*rs < '9') {
+ (*rs)++;
+ break;
+ }
+ *rs = '0';
+ if (rs == buf) {
+ break;
+ }
+ rs--;
+ }
+ if (*rs == '0') {
+ // We need to insert a 1
+ if (rs[1] == '.' && fmt != 'f') {
+ // We're going to round 9.99 to 10.00
+ // Move the decimal point
+ rs[0] = '.';
+ rs[1] = '0';
+ if (e_sign == '-') {
+ e--;
+ } else {
+ e++;
+ }
+ }
+ s++;
+ char *ss = s;
+ while (ss > rs) {
+ *ss = ss[-1];
+ ss--;
+ }
+ *rs = '1';
+ }
+ if (fp_isless1(f) && fmt == 'f') {
+ // We rounded up to 1.0
+ prec--;
+ }
+ }
+
+ // verify that we did not overrun the input buffer so far
+ assert((size_t)(s + 1 - buf) <= buf_size);
+
+ if (org_fmt == 'g' && prec > 0) {
+ // Remove trailing zeros and a trailing decimal point
+ while (s[-1] == '0') {
+ s--;
+ }
+ if (s[-1] == '.') {
+ s--;
+ }
+ }
+ // Append the exponent
+ if (e_sign) {
+ *s++ = e_char;
+ *s++ = e_sign;
+ if (FPMIN_BUF_SIZE == 7 && e >= 100) {
+ *s++ = '0' + (e / 100);
+ }
+ *s++ = '0' + ((e / 10) % 10);
+ *s++ = '0' + (e % 10);
+ }
+ *s = '\0';
+
+ // verify that we did not overrun the input buffer
+ assert((size_t)(s + 1 - buf) <= buf_size);
+
+ return s - buf;
+}
+
+#endif // MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_NONE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/formatfloat.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,35 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_FORMATFLOAT_H__ +#define __MICROPY_INCLUDED_PY_FORMATFLOAT_H__ + +#include "py/mpconfig.h" + +#if MICROPY_PY_BUILTINS_FLOAT +int mp_format_float(mp_float_t f, char *buf, size_t bufSize, char fmt, int prec, char sign); +#endif + +#endif // __MICROPY_INCLUDED_PY_FORMATFLOAT_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/frozenmod.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,61 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+
+#include "py/lexer.h"
+#include "py/frozenmod.h"
+
+#if MICROPY_MODULE_FROZEN
+
+#ifndef MICROPY_MODULE_FROZEN_LEXER
+#define MICROPY_MODULE_FROZEN_LEXER mp_lexer_new_from_str_len
+#else
+mp_lexer_t *MICROPY_MODULE_FROZEN_LEXER(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len);
+#endif
+
+extern const char mp_frozen_names[];
+extern const uint32_t mp_frozen_sizes[];
+extern const char mp_frozen_content[];
+
+mp_lexer_t *mp_find_frozen_module(const char *str, int len) {
+ const char *name = mp_frozen_names;
+
+ size_t offset = 0;
+ for (int i = 0; *name != 0; i++) {
+ int l = strlen(name);
+ if (l == len && !memcmp(str, name, l)) {
+ mp_lexer_t *lex = MICROPY_MODULE_FROZEN_LEXER(MP_QSTR_, mp_frozen_content + offset, mp_frozen_sizes[i], 0);
+ return lex;
+ }
+ name += l + 1;
+ offset += mp_frozen_sizes[i] + 1;
+ }
+ return NULL;
+}
+
+#endif // MICROPY_MODULE_FROZEN
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/frozenmod.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,27 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +mp_lexer_t *mp_find_frozen_module(const char *str, int len);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/gc.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,808 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/mpstate.h"
+#include "py/gc.h"
+#include "py/obj.h"
+#include "py/runtime.h"
+
+#if MICROPY_ENABLE_GC
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+// make this 1 to dump the heap each time it changes
+#define EXTENSIVE_HEAP_PROFILING (0)
+
+#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / BYTES_PER_WORD)
+#define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
+
+// ATB = allocation table byte
+// 0b00 = FREE -- free block
+// 0b01 = HEAD -- head of a chain of blocks
+// 0b10 = TAIL -- in the tail of a chain of blocks
+// 0b11 = MARK -- marked head block
+
+#define AT_FREE (0)
+#define AT_HEAD (1)
+#define AT_TAIL (2)
+#define AT_MARK (3)
+
+#define BLOCKS_PER_ATB (4)
+#define ATB_MASK_0 (0x03)
+#define ATB_MASK_1 (0x0c)
+#define ATB_MASK_2 (0x30)
+#define ATB_MASK_3 (0xc0)
+
+#define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0)
+#define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0)
+#define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0)
+#define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
+
+#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
+#define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
+#define ATB_ANY_TO_FREE(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
+#define ATB_FREE_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
+#define ATB_FREE_TO_TAIL(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
+#define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
+#define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
+
+#define BLOCK_FROM_PTR(ptr) (((byte*)(ptr) - MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
+#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (uintptr_t)MP_STATE_MEM(gc_pool_start)))
+#define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB)
+
+#if MICROPY_ENABLE_FINALISER
+// FTB = finaliser table byte
+// if set, then the corresponding block may have a finaliser
+
+#define BLOCKS_PER_FTB (8)
+
+#define FTB_GET(block) ((MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
+#define FTB_SET(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
+#define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
+#endif
+
+// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
+void gc_init(void *start, void *end) {
+ // align end pointer on block boundary
+ end = (void*)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
+ DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte*)end - (byte*)start);
+
+ // calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
+ // T = A + F + P
+ // F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB
+ // P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK
+ // => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
+ size_t total_byte_len = (byte*)end - (byte*)start;
+#if MICROPY_ENABLE_FINALISER
+ MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
+#else
+ MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
+#endif
+
+ MP_STATE_MEM(gc_alloc_table_start) = (byte*)start;
+
+#if MICROPY_ENABLE_FINALISER
+ size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
+ MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len);
+#endif
+
+ size_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
+ MP_STATE_MEM(gc_pool_start) = (byte*)end - gc_pool_block_len * BYTES_PER_BLOCK;
+ MP_STATE_MEM(gc_pool_end) = end;
+
+#if MICROPY_ENABLE_FINALISER
+ assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
+#endif
+
+ // clear ATBs
+ memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len));
+
+#if MICROPY_ENABLE_FINALISER
+ // clear FTBs
+ memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
+#endif
+
+ // set last free ATB index to start of heap
+ MP_STATE_MEM(gc_last_free_atb_index) = 0;
+
+ // unlock the GC
+ MP_STATE_MEM(gc_lock_depth) = 0;
+
+ // allow auto collection
+ MP_STATE_MEM(gc_auto_collect_enabled) = 1;
+
+ DEBUG_printf("GC layout:\n");
+ DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
+#if MICROPY_ENABLE_FINALISER
+ DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
+#endif
+ DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
+}
+
+void gc_lock(void) {
+ MP_STATE_MEM(gc_lock_depth)++;
+}
+
+void gc_unlock(void) {
+ MP_STATE_MEM(gc_lock_depth)--;
+}
+
+bool gc_is_locked(void) {
+ return MP_STATE_MEM(gc_lock_depth) != 0;
+}
+
+// ptr should be of type void*
+#define VERIFY_PTR(ptr) ( \
+ ((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
+ && ptr >= (void*)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
+ && ptr < (void*)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
+ )
+
+// ptr should be of type void*
+#define VERIFY_MARK_AND_PUSH(ptr) \
+ do { \
+ if (VERIFY_PTR(ptr)) { \
+ size_t _block = BLOCK_FROM_PTR(ptr); \
+ if (ATB_GET_KIND(_block) == AT_HEAD) { \
+ /* an unmarked head, mark it, and push it on gc stack */ \
+ DEBUG_printf("gc_mark(%p)\n", ptr); \
+ ATB_HEAD_TO_MARK(_block); \
+ if (MP_STATE_MEM(gc_sp) < &MP_STATE_MEM(gc_stack)[MICROPY_ALLOC_GC_STACK_SIZE]) { \
+ *MP_STATE_MEM(gc_sp)++ = _block; \
+ } else { \
+ MP_STATE_MEM(gc_stack_overflow) = 1; \
+ } \
+ } \
+ } \
+ } while (0)
+
+STATIC void gc_drain_stack(void) {
+ while (MP_STATE_MEM(gc_sp) > MP_STATE_MEM(gc_stack)) {
+ // pop the next block off the stack
+ size_t block = *--MP_STATE_MEM(gc_sp);
+
+ // work out number of consecutive blocks in the chain starting with this one
+ size_t n_blocks = 0;
+ do {
+ n_blocks += 1;
+ } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
+
+ // check this block's children
+ void **ptrs = (void**)PTR_FROM_BLOCK(block);
+ for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void*); i > 0; i--, ptrs++) {
+ void *ptr = *ptrs;
+ VERIFY_MARK_AND_PUSH(ptr);
+ }
+ }
+}
+
+STATIC void gc_deal_with_stack_overflow(void) {
+ while (MP_STATE_MEM(gc_stack_overflow)) {
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+ MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
+
+ // scan entire memory looking for blocks which have been marked but not their children
+ for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
+ // trace (again) if mark bit set
+ if (ATB_GET_KIND(block) == AT_MARK) {
+ *MP_STATE_MEM(gc_sp)++ = block;
+ gc_drain_stack();
+ }
+ }
+ }
+}
+
+STATIC void gc_sweep(void) {
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ MP_STATE_MEM(gc_collected) = 0;
+ #endif
+ // free unmarked heads and their tails
+ int free_tail = 0;
+ for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
+ switch (ATB_GET_KIND(block)) {
+ case AT_HEAD:
+#if MICROPY_ENABLE_FINALISER
+ if (FTB_GET(block)) {
+ mp_obj_base_t *obj = (mp_obj_base_t*)PTR_FROM_BLOCK(block);
+ if (obj->type != NULL) {
+ // if the object has a type then see if it has a __del__ method
+ mp_obj_t dest[2];
+ mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // load_method returned a method
+ mp_call_method_n_kw(0, 0, dest);
+ }
+ }
+ // clear finaliser flag
+ FTB_CLEAR(block);
+ }
+#endif
+ free_tail = 1;
+ DEBUG_printf("gc_sweep(%x)\n", PTR_FROM_BLOCK(block));
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ MP_STATE_MEM(gc_collected)++;
+ #endif
+ // fall through to free the head
+
+ case AT_TAIL:
+ if (free_tail) {
+ ATB_ANY_TO_FREE(block);
+ }
+ break;
+
+ case AT_MARK:
+ ATB_MARK_TO_HEAD(block);
+ free_tail = 0;
+ break;
+ }
+ }
+}
+
+void gc_collect_start(void) {
+ gc_lock();
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+ MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
+ // Trace root pointers. This relies on the root pointers being organised
+ // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
+ // dict_globals, then the root pointer section of mp_state_vm.
+ void **ptrs = (void**)(void*)&mp_state_ctx;
+ gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(void*));
+}
+
+void gc_collect_root(void **ptrs, size_t len) {
+ for (size_t i = 0; i < len; i++) {
+ void *ptr = ptrs[i];
+ VERIFY_MARK_AND_PUSH(ptr);
+ gc_drain_stack();
+ }
+}
+
+void gc_collect_end(void) {
+ gc_deal_with_stack_overflow();
+ gc_sweep();
+ MP_STATE_MEM(gc_last_free_atb_index) = 0;
+ gc_unlock();
+}
+
+void gc_info(gc_info_t *info) {
+ info->total = MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start);
+ info->used = 0;
+ info->free = 0;
+ info->num_1block = 0;
+ info->num_2block = 0;
+ info->max_block = 0;
+ for (size_t block = 0, len = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
+ size_t kind = ATB_GET_KIND(block);
+ if (kind == AT_FREE || kind == AT_HEAD) {
+ if (len == 1) {
+ info->num_1block += 1;
+ } else if (len == 2) {
+ info->num_2block += 1;
+ }
+ if (len > info->max_block) {
+ info->max_block = len;
+ }
+ }
+ switch (kind) {
+ case AT_FREE:
+ info->free += 1;
+ len = 0;
+ break;
+
+ case AT_HEAD:
+ info->used += 1;
+ len = 1;
+ break;
+
+ case AT_TAIL:
+ info->used += 1;
+ len += 1;
+ break;
+
+ case AT_MARK:
+ // shouldn't happen
+ break;
+ }
+ }
+
+ info->used *= BYTES_PER_BLOCK;
+ info->free *= BYTES_PER_BLOCK;
+}
+
+void *gc_alloc(size_t n_bytes, bool has_finaliser) {
+ size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
+ DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
+
+ // check if GC is locked
+ if (MP_STATE_MEM(gc_lock_depth) > 0) {
+ return NULL;
+ }
+
+ // check for 0 allocation
+ if (n_blocks == 0) {
+ return NULL;
+ }
+
+ size_t i;
+ size_t end_block;
+ size_t start_block;
+ size_t n_free = 0;
+ int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
+ for (;;) {
+
+ // look for a run of n_blocks available blocks
+ for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
+ byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
+ if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
+ if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
+ if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
+ if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
+ }
+
+ // nothing found!
+ if (collected) {
+ return NULL;
+ }
+ DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
+ gc_collect();
+ collected = 1;
+ }
+
+ // found, ending at block i inclusive
+found:
+ // get starting and end blocks, both inclusive
+ end_block = i;
+ start_block = i - n_free + 1;
+
+ // Set last free ATB index to block after last block we found, for start of
+ // next scan. To reduce fragmentation, we only do this if we were looking
+ // for a single free block, which guarantees that there are no free blocks
+ // before this one. Also, whenever we free or shink a block we must check
+ // if this index needs adjusting (see gc_realloc and gc_free).
+ if (n_free == 1) {
+ MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
+ }
+
+ // mark first block as used head
+ ATB_FREE_TO_HEAD(start_block);
+
+ // mark rest of blocks as used tail
+ // TODO for a run of many blocks can make this more efficient
+ for (size_t bl = start_block + 1; bl <= end_block; bl++) {
+ ATB_FREE_TO_TAIL(bl);
+ }
+
+ // get pointer to first block
+ void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
+ DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
+
+ // zero out the additional bytes of the newly allocated blocks
+ // This is needed because the blocks may have previously held pointers
+ // to the heap and will not be set to something else if the caller
+ // doesn't actually use the entire block. As such they will continue
+ // to point to the heap and may prevent other blocks from being reclaimed.
+ memset((byte*)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
+
+ #if MICROPY_ENABLE_FINALISER
+ if (has_finaliser) {
+ // clear type pointer in case it is never set
+ ((mp_obj_base_t*)ret_ptr)->type = NULL;
+ // set mp_obj flag only if it has a finaliser
+ FTB_SET(start_block);
+ }
+ #else
+ (void)has_finaliser;
+ #endif
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+
+ return ret_ptr;
+}
+
+/*
+void *gc_alloc(mp_uint_t n_bytes) {
+ return _gc_alloc(n_bytes, false);
+}
+
+void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
+ return _gc_alloc(n_bytes, true);
+}
+*/
+
+// force the freeing of a piece of memory
+// TODO: freeing here does not call finaliser
+void gc_free(void *ptr) {
+ if (MP_STATE_MEM(gc_lock_depth) > 0) {
+ // TODO how to deal with this error?
+ return;
+ }
+
+ DEBUG_printf("gc_free(%p)\n", ptr);
+
+ if (VERIFY_PTR(ptr)) {
+ size_t block = BLOCK_FROM_PTR(ptr);
+ if (ATB_GET_KIND(block) == AT_HEAD) {
+ #if MICROPY_ENABLE_FINALISER
+ FTB_CLEAR(block);
+ #endif
+ // set the last_free pointer to this block if it's earlier in the heap
+ if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
+ MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB;
+ }
+
+ // free head and all of its tail blocks
+ do {
+ ATB_ANY_TO_FREE(block);
+ block += 1;
+ } while (ATB_GET_KIND(block) == AT_TAIL);
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+ } else {
+ assert(!"bad free");
+ }
+ } else if (ptr != NULL) {
+ assert(!"bad free");
+ }
+}
+
+size_t gc_nbytes(const void *ptr) {
+ if (VERIFY_PTR(ptr)) {
+ size_t block = BLOCK_FROM_PTR(ptr);
+ if (ATB_GET_KIND(block) == AT_HEAD) {
+ // work out number of consecutive blocks in the chain starting with this on
+ size_t n_blocks = 0;
+ do {
+ n_blocks += 1;
+ } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
+ return n_blocks * BYTES_PER_BLOCK;
+ }
+ }
+
+ // invalid pointer
+ return 0;
+}
+
+#if 0
+// old, simple realloc that didn't expand memory in place
+void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
+ mp_uint_t n_existing = gc_nbytes(ptr);
+ if (n_bytes <= n_existing) {
+ return ptr;
+ } else {
+ bool has_finaliser;
+ if (ptr == NULL) {
+ has_finaliser = false;
+ } else {
+#if MICROPY_ENABLE_FINALISER
+ has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr));
+#else
+ has_finaliser = false;
+#endif
+ }
+ void *ptr2 = gc_alloc(n_bytes, has_finaliser);
+ if (ptr2 == NULL) {
+ return ptr2;
+ }
+ memcpy(ptr2, ptr, n_existing);
+ gc_free(ptr);
+ return ptr2;
+ }
+}
+
+#else // Alternative gc_realloc impl
+
+void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
+ if (MP_STATE_MEM(gc_lock_depth) > 0) {
+ return NULL;
+ }
+
+ // check for pure allocation
+ if (ptr_in == NULL) {
+ return gc_alloc(n_bytes, false);
+ }
+
+ // check for pure free
+ if (n_bytes == 0) {
+ gc_free(ptr_in);
+ return NULL;
+ }
+
+ void *ptr = ptr_in;
+
+ // sanity check the ptr
+ if (!VERIFY_PTR(ptr)) {
+ return NULL;
+ }
+
+ // get first block
+ size_t block = BLOCK_FROM_PTR(ptr);
+
+ // sanity check the ptr is pointing to the head of a block
+ if (ATB_GET_KIND(block) != AT_HEAD) {
+ return NULL;
+ }
+
+ // compute number of new blocks that are requested
+ size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK;
+
+ // Get the total number of consecutive blocks that are already allocated to
+ // this chunk of memory, and then count the number of free blocks following
+ // it. Stop if we reach the end of the heap, or if we find enough extra
+ // free blocks to satisfy the realloc. Note that we need to compute the
+ // total size of the existing memory chunk so we can correctly and
+ // efficiently shrink it (see below for shrinking code).
+ size_t n_free = 0;
+ size_t n_blocks = 1; // counting HEAD block
+ size_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
+ for (size_t bl = block + n_blocks; bl < max_block; bl++) {
+ byte block_type = ATB_GET_KIND(bl);
+ if (block_type == AT_TAIL) {
+ n_blocks++;
+ continue;
+ }
+ if (block_type == AT_FREE) {
+ n_free++;
+ if (n_blocks + n_free >= new_blocks) {
+ // stop as soon as we find enough blocks for n_bytes
+ break;
+ }
+ continue;
+ }
+ break;
+ }
+
+ // return original ptr if it already has the requested number of blocks
+ if (new_blocks == n_blocks) {
+ return ptr_in;
+ }
+
+ // check if we can shrink the allocated area
+ if (new_blocks < n_blocks) {
+ // free unneeded tail blocks
+ for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) {
+ ATB_ANY_TO_FREE(bl);
+ }
+
+ // set the last_free pointer to end of this block if it's earlier in the heap
+ if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
+ MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
+ }
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+
+ return ptr_in;
+ }
+
+ // check if we can expand in place
+ if (new_blocks <= n_blocks + n_free) {
+ // mark few more blocks as used tail
+ for (size_t bl = block + n_blocks; bl < block + new_blocks; bl++) {
+ assert(ATB_GET_KIND(bl) == AT_FREE);
+ ATB_FREE_TO_TAIL(bl);
+ }
+
+ // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc)
+ memset((byte*)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+
+ return ptr_in;
+ }
+
+ if (!allow_move) {
+ // not allowed to move memory block so return failure
+ return NULL;
+ }
+
+ // can't resize inplace; try to find a new contiguous chain
+ void *ptr_out = gc_alloc(n_bytes,
+#if MICROPY_ENABLE_FINALISER
+ FTB_GET(block)
+#else
+ false
+#endif
+ );
+
+ // check that the alloc succeeded
+ if (ptr_out == NULL) {
+ return NULL;
+ }
+
+ DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out);
+ memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK);
+ gc_free(ptr_in);
+ return ptr_out;
+}
+#endif // Alternative gc_realloc impl
+
+void gc_dump_info(void) {
+ gc_info_t info;
+ gc_info(&info);
+ mp_printf(&mp_plat_print, "GC: total: %u, used: %u, free: %u\n",
+ (uint)info.total, (uint)info.used, (uint)info.free);
+ mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u\n",
+ (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block);
+}
+
+void gc_dump_alloc_table(void) {
+ static const size_t DUMP_BYTES_PER_LINE = 64;
+ #if !EXTENSIVE_HEAP_PROFILING
+ // When comparing heap output we don't want to print the starting
+ // pointer of the heap because it changes from run to run.
+ mp_printf(&mp_plat_print, "GC memory layout; from %p:", MP_STATE_MEM(gc_pool_start));
+ #endif
+ for (size_t bl = 0; bl < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; bl++) {
+ if (bl % DUMP_BYTES_PER_LINE == 0) {
+ // a new line of blocks
+ {
+ // check if this line contains only free blocks
+ size_t bl2 = bl;
+ while (bl2 < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
+ bl2++;
+ }
+ if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
+ // there are at least 2 lines containing only free blocks, so abbreviate their printing
+ mp_printf(&mp_plat_print, "\n (%u lines all free)", (uint)(bl2 - bl) / DUMP_BYTES_PER_LINE);
+ bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
+ if (bl >= MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB) {
+ // got to end of heap
+ break;
+ }
+ }
+ }
+ // print header for new line of blocks
+ // (the cast to uint32_t is for 16-bit ports)
+ #if EXTENSIVE_HEAP_PROFILING
+ mp_printf(&mp_plat_print, "\n%05x: ", (uint)((bl * BYTES_PER_BLOCK) & (uint32_t)0xfffff));
+ #else
+ mp_printf(&mp_plat_print, "\n%05x: ", (uint)(PTR_FROM_BLOCK(bl) & (uint32_t)0xfffff));
+ #endif
+ }
+ int c = ' ';
+ switch (ATB_GET_KIND(bl)) {
+ case AT_FREE: c = '.'; break;
+ /* this prints out if the object is reachable from BSS or STACK (for unix only)
+ case AT_HEAD: {
+ c = 'h';
+ void **ptrs = (void**)(void*)&mp_state_ctx;
+ mp_uint_t len = offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t);
+ for (mp_uint_t i = 0; i < len; i++) {
+ mp_uint_t ptr = (mp_uint_t)ptrs[i];
+ if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
+ c = 'B';
+ break;
+ }
+ }
+ if (c == 'h') {
+ ptrs = (void**)&c;
+ len = ((mp_uint_t)MP_STATE_VM(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t);
+ for (mp_uint_t i = 0; i < len; i++) {
+ mp_uint_t ptr = (mp_uint_t)ptrs[i];
+ if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
+ c = 'S';
+ break;
+ }
+ }
+ }
+ break;
+ }
+ */
+ /* this prints the uPy object type of the head block */
+ case AT_HEAD: {
+ void **ptr = (void**)(MP_STATE_MEM(gc_pool_start) + bl * BYTES_PER_BLOCK);
+ if (*ptr == &mp_type_tuple) { c = 'T'; }
+ else if (*ptr == &mp_type_list) { c = 'L'; }
+ else if (*ptr == &mp_type_dict) { c = 'D'; }
+ #if MICROPY_PY_BUILTINS_FLOAT
+ else if (*ptr == &mp_type_float) { c = 'F'; }
+ #endif
+ else if (*ptr == &mp_type_fun_bc) { c = 'B'; }
+ else if (*ptr == &mp_type_module) { c = 'M'; }
+ else {
+ c = 'h';
+ #if 0
+ // This code prints "Q" for qstr-pool data, and "q" for qstr-str
+ // data. It can be useful to see how qstrs are being allocated,
+ // but is disabled by default because it is very slow.
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) {
+ if ((qstr_pool_t*)ptr == pool) {
+ c = 'Q';
+ break;
+ }
+ for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
+ if ((const byte*)ptr == *q) {
+ c = 'q';
+ break;
+ }
+ }
+ }
+ #endif
+ }
+ break;
+ }
+ case AT_TAIL: c = 't'; break;
+ case AT_MARK: c = 'm'; break;
+ }
+ mp_printf(&mp_plat_print, "%c", c);
+ }
+ mp_print_str(&mp_plat_print, "\n");
+}
+
+#if DEBUG_PRINT
+void gc_test(void) {
+ mp_uint_t len = 500;
+ mp_uint_t *heap = malloc(len);
+ gc_init(heap, heap + len / sizeof(mp_uint_t));
+ void *ptrs[100];
+ {
+ mp_uint_t **p = gc_alloc(16, false);
+ p[0] = gc_alloc(64, false);
+ p[1] = gc_alloc(1, false);
+ p[2] = gc_alloc(1, false);
+ p[3] = gc_alloc(1, false);
+ mp_uint_t ***p2 = gc_alloc(16, false);
+ p2[0] = p;
+ p2[1] = p;
+ ptrs[0] = p2;
+ }
+ for (int i = 0; i < 25; i+=2) {
+ mp_uint_t *p = gc_alloc(i, false);
+ printf("p=%p\n", p);
+ if (i & 3) {
+ //ptrs[i] = p;
+ }
+ }
+
+ printf("Before GC:\n");
+ gc_dump_alloc_table();
+ printf("Starting GC...\n");
+ gc_collect_start();
+ gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void*));
+ gc_collect_end();
+ printf("After GC:\n");
+ gc_dump_alloc_table();
+}
+#endif
+
+#endif // MICROPY_ENABLE_GC
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/gc.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,66 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_GC_H__
+#define __MICROPY_INCLUDED_PY_GC_H__
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+void gc_init(void *start, void *end);
+
+// These lock/unlock functions can be nested.
+// They can be used to prevent the GC from allocating/freeing.
+void gc_lock(void);
+void gc_unlock(void);
+bool gc_is_locked(void);
+
+// A given port must implement gc_collect by using the other collect functions.
+void gc_collect(void);
+void gc_collect_start(void);
+void gc_collect_root(void **ptrs, size_t len);
+void gc_collect_end(void);
+
+void *gc_alloc(size_t n_bytes, bool has_finaliser);
+void gc_free(void *ptr); // does not call finaliser
+size_t gc_nbytes(const void *ptr);
+void *gc_realloc(void *ptr, size_t n_bytes, bool allow_move);
+
+typedef struct _gc_info_t {
+ size_t total;
+ size_t used;
+ size_t free;
+ size_t num_1block;
+ size_t num_2block;
+ size_t max_block;
+} gc_info_t;
+
+void gc_info(gc_info_t *info);
+void gc_dump_info(void);
+void gc_dump_alloc_table(void);
+
+#endif // __MICROPY_INCLUDED_PY_GC_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/grammar.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,338 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// rules for writing rules:
+// - zero_or_more is implemented using opt_rule around a one_or_more rule
+// - don't put opt_rule in arguments of or rule; instead, wrap the call to this or rule in opt_rule
+
+// # Start symbols for the grammar:
+// # single_input is a single interactive statement;
+// # file_input is a module or sequence of commands read from an input file;
+// # eval_input is the input for the eval() functions.
+// # NB: compound_stmt in single_input is followed by extra NEWLINE! --> not in Micro Python
+// single_input: NEWLINE | simple_stmt | compound_stmt
+// file_input: (NEWLINE | stmt)* ENDMARKER
+// eval_input: testlist NEWLINE* ENDMARKER
+
+DEF_RULE(single_input, nc, or(3), tok(NEWLINE), rule(simple_stmt), rule(compound_stmt))
+DEF_RULE(file_input, c(generic_all_nodes), and(1), opt_rule(file_input_2))
+DEF_RULE(file_input_2, c(generic_all_nodes), one_or_more, rule(file_input_3))
+DEF_RULE(file_input_3, nc, or(2), tok(NEWLINE), rule(stmt))
+DEF_RULE(eval_input, nc, and(2), rule(testlist), opt_rule(eval_input_2))
+DEF_RULE(eval_input_2, nc, and(1), tok(NEWLINE))
+
+// decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+// decorators: decorator+
+// decorated: decorators (classdef | funcdef)
+// funcdef: 'def' NAME parameters ['->' test] ':' suite
+// parameters: '(' [typedargslist] ')'
+// typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* [',' ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef
+// tfpdef: NAME [':' test]
+// varargslist: vfpdef ['=' test] (',' vfpdef ['=' test])* [',' ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef
+// vfpdef: NAME
+
+DEF_RULE(decorator, nc, and(4), tok(DEL_AT), rule(dotted_name), opt_rule(trailer_paren), tok(NEWLINE))
+DEF_RULE(decorators, nc, one_or_more, rule(decorator))
+DEF_RULE(decorated, c(decorated), and(2), rule(decorators), rule(decorated_body))
+DEF_RULE(decorated_body, nc, or(2), rule(classdef), rule(funcdef))
+DEF_RULE(funcdef, c(funcdef), blank | and(8), tok(KW_DEF), tok(NAME), tok(DEL_PAREN_OPEN), opt_rule(typedargslist), tok(DEL_PAREN_CLOSE), opt_rule(funcdefrettype), tok(DEL_COLON), rule(suite))
+DEF_RULE(funcdefrettype, nc, ident | and(2), tok(DEL_MINUS_MORE), rule(test))
+// note: typedargslist lets through more than is allowed, compiler does further checks
+DEF_RULE(typedargslist, nc, list_with_end, rule(typedargslist_item), tok(DEL_COMMA))
+DEF_RULE(typedargslist_item, nc, or(3), rule(typedargslist_name), rule(typedargslist_star), rule(typedargslist_dbl_star))
+DEF_RULE(typedargslist_name, nc, ident | and(3), tok(NAME), opt_rule(typedargslist_colon), opt_rule(typedargslist_equal))
+DEF_RULE(typedargslist_star, nc, and(2), tok(OP_STAR), opt_rule(tfpdef))
+DEF_RULE(typedargslist_dbl_star, nc, and(3), tok(OP_DBL_STAR), tok(NAME), opt_rule(typedargslist_colon))
+DEF_RULE(typedargslist_colon, nc, ident | and(2), tok(DEL_COLON), rule(test))
+DEF_RULE(typedargslist_equal, nc, ident | and(2), tok(DEL_EQUAL), rule(test))
+DEF_RULE(tfpdef, nc, and(2), tok(NAME), opt_rule(typedargslist_colon))
+// note: varargslist lets through more than is allowed, compiler does further checks
+DEF_RULE(varargslist, nc, list_with_end, rule(varargslist_item), tok(DEL_COMMA))
+DEF_RULE(varargslist_item, nc, or(3), rule(varargslist_name), rule(varargslist_star), rule(varargslist_dbl_star))
+DEF_RULE(varargslist_name, nc, ident | and(2), tok(NAME), opt_rule(varargslist_equal))
+DEF_RULE(varargslist_star, nc, and(2), tok(OP_STAR), opt_rule(vfpdef))
+DEF_RULE(varargslist_dbl_star, nc, and(2), tok(OP_DBL_STAR), tok(NAME))
+DEF_RULE(varargslist_equal, nc, ident | and(2), tok(DEL_EQUAL), rule(test))
+DEF_RULE(vfpdef, nc, ident | and(1), tok(NAME))
+
+// stmt: compound_stmt | simple_stmt
+
+DEF_RULE(stmt, nc, or(2), rule(compound_stmt), rule(simple_stmt))
+
+// simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+
+DEF_RULE(simple_stmt, nc, and(2), rule(simple_stmt_2), tok(NEWLINE))
+DEF_RULE(simple_stmt_2, c(generic_all_nodes), list_with_end, rule(small_stmt), tok(DEL_SEMICOLON))
+
+// small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt
+// expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*)
+// testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+// augassign: '+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//='
+// # For normal assignments, additional restrictions enforced by the interpreter
+
+DEF_RULE(small_stmt, nc, or(8), rule(del_stmt), rule(pass_stmt), rule(flow_stmt), rule(import_stmt), rule(global_stmt), rule(nonlocal_stmt), rule(assert_stmt), rule(expr_stmt))
+DEF_RULE(expr_stmt, c(expr_stmt), and(2), rule(testlist_star_expr), opt_rule(expr_stmt_2))
+DEF_RULE(expr_stmt_2, nc, or(2), rule(expr_stmt_augassign), rule(expr_stmt_assign_list))
+DEF_RULE(expr_stmt_augassign, nc, and(2), rule(augassign), rule(expr_stmt_6))
+DEF_RULE(expr_stmt_assign_list, nc, one_or_more, rule(expr_stmt_assign))
+DEF_RULE(expr_stmt_assign, nc, ident | and(2), tok(DEL_EQUAL), rule(expr_stmt_6))
+DEF_RULE(expr_stmt_6, nc, or(2), rule(yield_expr), rule(testlist_star_expr))
+DEF_RULE(testlist_star_expr, c(generic_tuple), list_with_end, rule(testlist_star_expr_2), tok(DEL_COMMA))
+DEF_RULE(testlist_star_expr_2, nc, or(2), rule(star_expr), rule(test))
+DEF_RULE(augassign, nc, or(12), tok(DEL_PLUS_EQUAL), tok(DEL_MINUS_EQUAL), tok(DEL_STAR_EQUAL), tok(DEL_SLASH_EQUAL), tok(DEL_PERCENT_EQUAL), tok(DEL_AMPERSAND_EQUAL), tok(DEL_PIPE_EQUAL), tok(DEL_CARET_EQUAL), tok(DEL_DBL_LESS_EQUAL), tok(DEL_DBL_MORE_EQUAL), tok(DEL_DBL_STAR_EQUAL), tok(DEL_DBL_SLASH_EQUAL))
+
+// del_stmt: 'del' exprlist
+// pass_stmt: 'pass'
+// flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+// break_stmt: 'break'
+// continue_stmt: 'continue'
+// return_stmt: 'return' [testlist]
+// yield_stmt: yield_expr
+// raise_stmt: 'raise' [test ['from' test]]
+
+DEF_RULE(del_stmt, c(del_stmt), and(2), tok(KW_DEL), rule(exprlist))
+DEF_RULE(pass_stmt, c(generic_all_nodes), and(1), tok(KW_PASS))
+DEF_RULE(flow_stmt, nc, or(5), rule(break_stmt), rule(continue_stmt), rule(return_stmt), rule(raise_stmt), rule(yield_stmt))
+DEF_RULE(break_stmt, c(break_stmt), and(1), tok(KW_BREAK))
+DEF_RULE(continue_stmt, c(continue_stmt), and(1), tok(KW_CONTINUE))
+DEF_RULE(return_stmt, c(return_stmt), and(2), tok(KW_RETURN), opt_rule(testlist))
+DEF_RULE(yield_stmt, c(yield_stmt), and(1), rule(yield_expr))
+DEF_RULE(raise_stmt, c(raise_stmt), and(2), tok(KW_RAISE), opt_rule(raise_stmt_arg))
+DEF_RULE(raise_stmt_arg, nc, and(2), rule(test), opt_rule(raise_stmt_from))
+DEF_RULE(raise_stmt_from, nc, ident | and(2), tok(KW_FROM), rule(test))
+
+// import_stmt: import_name | import_from
+// import_name: 'import' dotted_as_names
+// import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)
+// import_as_name: NAME ['as' NAME]
+// dotted_as_name: dotted_name ['as' NAME]
+// import_as_names: import_as_name (',' import_as_name)* [',']
+// dotted_as_names: dotted_as_name (',' dotted_as_name)*
+// dotted_name: NAME ('.' NAME)*
+// global_stmt: 'global' NAME (',' NAME)*
+// nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+// assert_stmt: 'assert' test [',' test]
+
+DEF_RULE(import_stmt, nc, or(2), rule(import_name), rule(import_from))
+DEF_RULE(import_name, c(import_name), and(2), tok(KW_IMPORT), rule(dotted_as_names))
+DEF_RULE(import_from, c(import_from), and(4), tok(KW_FROM), rule(import_from_2), tok(KW_IMPORT), rule(import_from_3))
+DEF_RULE(import_from_2, nc, or(2), rule(dotted_name), rule(import_from_2b))
+DEF_RULE(import_from_2b, nc, and(2), rule(one_or_more_period_or_ellipsis), opt_rule(dotted_name))
+DEF_RULE(import_from_3, nc, or(3), tok(OP_STAR), rule(import_as_names_paren), rule(import_as_names))
+DEF_RULE(import_as_names_paren, nc, ident | and(3), tok(DEL_PAREN_OPEN), rule(import_as_names), tok(DEL_PAREN_CLOSE))
+DEF_RULE(one_or_more_period_or_ellipsis, nc, one_or_more, rule(period_or_ellipsis))
+DEF_RULE(period_or_ellipsis, nc, or(2), tok(DEL_PERIOD), tok(ELLIPSIS))
+DEF_RULE(import_as_name, nc, and(2), tok(NAME), opt_rule(as_name))
+DEF_RULE(dotted_as_name, nc, and(2), rule(dotted_name), opt_rule(as_name))
+DEF_RULE(as_name, nc, ident | and(2), tok(KW_AS), tok(NAME))
+DEF_RULE(import_as_names, nc, list_with_end, rule(import_as_name), tok(DEL_COMMA))
+DEF_RULE(dotted_as_names, nc, list, rule(dotted_as_name), tok(DEL_COMMA))
+DEF_RULE(dotted_name, nc, list, tok(NAME), tok(DEL_PERIOD))
+DEF_RULE(global_stmt, c(global_stmt), and(2), tok(KW_GLOBAL), rule(name_list))
+DEF_RULE(nonlocal_stmt, c(nonlocal_stmt), and(2), tok(KW_NONLOCAL), rule(name_list))
+DEF_RULE(name_list, nc, list, tok(NAME), tok(DEL_COMMA))
+DEF_RULE(assert_stmt, c(assert_stmt), and(3), tok(KW_ASSERT), rule(test), opt_rule(assert_stmt_extra))
+DEF_RULE(assert_stmt_extra, nc, ident | and(2), tok(DEL_COMMA), rule(test))
+
+// compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
+// if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+// while_stmt: 'while' test ':' suite ['else' ':' suite]
+// for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+// try_stmt: 'try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)
+// # NB compile.c makes sure that the default except clause is last
+// except_clause: 'except' [test ['as' NAME]]
+// with_stmt: 'with' with_item (',' with_item)* ':' suite
+// with_item: test ['as' expr]
+// suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+DEF_RULE(compound_stmt, nc, or(8), rule(if_stmt), rule(while_stmt), rule(for_stmt), rule(try_stmt), rule(with_stmt), rule(funcdef), rule(classdef), rule(decorated))
+DEF_RULE(if_stmt, c(if_stmt), and(6), tok(KW_IF), rule(test), tok(DEL_COLON), rule(suite), opt_rule(if_stmt_elif_list), opt_rule(else_stmt))
+DEF_RULE(if_stmt_elif_list, nc, one_or_more, rule(if_stmt_elif))
+DEF_RULE(if_stmt_elif, nc, and(4), tok(KW_ELIF), rule(test), tok(DEL_COLON), rule(suite))
+DEF_RULE(while_stmt, c(while_stmt), and(5), tok(KW_WHILE), rule(test), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
+DEF_RULE(for_stmt, c(for_stmt), and(7), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(testlist), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
+DEF_RULE(try_stmt, c(try_stmt), and(4), tok(KW_TRY), tok(DEL_COLON), rule(suite), rule(try_stmt_2))
+DEF_RULE(try_stmt_2, nc, or(2), rule(try_stmt_except_and_more), rule(try_stmt_finally))
+DEF_RULE(try_stmt_except_and_more, nc, and(3), rule(try_stmt_except_list), opt_rule(else_stmt), opt_rule(try_stmt_finally))
+DEF_RULE(try_stmt_except, nc, and(4), tok(KW_EXCEPT), opt_rule(try_stmt_as_name), tok(DEL_COLON), rule(suite))
+DEF_RULE(try_stmt_as_name, nc, and(2), rule(test), opt_rule(as_name))
+DEF_RULE(try_stmt_except_list, nc, one_or_more, rule(try_stmt_except))
+DEF_RULE(try_stmt_finally, nc, and(3), tok(KW_FINALLY), tok(DEL_COLON), rule(suite))
+DEF_RULE(else_stmt, nc, ident | and(3), tok(KW_ELSE), tok(DEL_COLON), rule(suite))
+DEF_RULE(with_stmt, c(with_stmt), and(4), tok(KW_WITH), rule(with_stmt_list), tok(DEL_COLON), rule(suite))
+DEF_RULE(with_stmt_list, nc, list, rule(with_item), tok(DEL_COMMA))
+DEF_RULE(with_item, nc, and(2), rule(test), opt_rule(with_item_as))
+DEF_RULE(with_item_as, nc, ident | and(2), tok(KW_AS), rule(expr))
+DEF_RULE(suite, nc, or(2), rule(suite_block), rule(simple_stmt))
+DEF_RULE(suite_block, nc, and(4), tok(NEWLINE), tok(INDENT), rule(suite_block_stmts), tok(DEDENT))
+DEF_RULE(suite_block_stmts, c(generic_all_nodes), one_or_more, rule(stmt))
+
+// test: or_test ['if' or_test 'else' test] | lambdef
+// test_nocond: or_test | lambdef_nocond
+// lambdef: 'lambda' [varargslist] ':' test
+// lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+
+DEF_RULE(test, nc, or(2), rule(lambdef), rule(test_if_expr))
+DEF_RULE(test_if_expr, c(test_if_expr), and(2), rule(or_test), opt_rule(test_if_else))
+DEF_RULE(test_if_else, nc, and(4), tok(KW_IF), rule(or_test), tok(KW_ELSE), rule(test))
+DEF_RULE(test_nocond, nc, or(2), rule(lambdef_nocond), rule(or_test))
+DEF_RULE(lambdef, c(lambdef), blank | and(4), tok(KW_LAMBDA), opt_rule(varargslist), tok(DEL_COLON), rule(test))
+DEF_RULE(lambdef_nocond, c(lambdef), blank | and(4), tok(KW_LAMBDA), opt_rule(varargslist), tok(DEL_COLON), rule(test_nocond))
+
+// or_test: and_test ('or' and_test)*
+// and_test: not_test ('and' not_test)*
+// not_test: 'not' not_test | comparison
+// comparison: expr (comp_op expr)*
+// comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+// star_expr: '*' expr
+// expr: xor_expr ('|' xor_expr)*
+// xor_expr: and_expr ('^' and_expr)*
+// and_expr: shift_expr ('&' shift_expr)*
+// shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+// arith_expr: term (('+'|'-') term)*
+// term: factor (('*'|'/'|'%'|'//') factor)*
+// factor: ('+'|'-'|'~') factor | power
+// power: atom trailer* ['**' factor]
+
+DEF_RULE(or_test, c(or_test), list, rule(and_test), tok(KW_OR))
+DEF_RULE(and_test, c(and_test), list, rule(not_test), tok(KW_AND))
+DEF_RULE(not_test, nc, or(2), rule(not_test_2), rule(comparison))
+DEF_RULE(not_test_2, c(not_test_2), and(2), tok(KW_NOT), rule(not_test))
+DEF_RULE(comparison, c(comparison), list, rule(expr), rule(comp_op))
+DEF_RULE(comp_op, nc, or(9), tok(OP_LESS), tok(OP_MORE), tok(OP_DBL_EQUAL), tok(OP_LESS_EQUAL), tok(OP_MORE_EQUAL), tok(OP_NOT_EQUAL), tok(KW_IN), rule(comp_op_not_in), rule(comp_op_is))
+DEF_RULE(comp_op_not_in, nc, and(2), tok(KW_NOT), tok(KW_IN))
+DEF_RULE(comp_op_is, nc, and(2), tok(KW_IS), opt_rule(comp_op_is_not))
+DEF_RULE(comp_op_is_not, nc, and(1), tok(KW_NOT))
+DEF_RULE(star_expr, c(star_expr), and(2), tok(OP_STAR), rule(expr))
+DEF_RULE(expr, c(expr), list, rule(xor_expr), tok(OP_PIPE))
+DEF_RULE(xor_expr, c(xor_expr), list, rule(and_expr), tok(OP_CARET))
+DEF_RULE(and_expr, c(and_expr), list, rule(shift_expr), tok(OP_AMPERSAND))
+DEF_RULE(shift_expr, c(shift_expr), list, rule(arith_expr), rule(shift_op))
+DEF_RULE(shift_op, nc, or(2), tok(OP_DBL_LESS), tok(OP_DBL_MORE))
+DEF_RULE(arith_expr, c(arith_expr), list, rule(term), rule(arith_op))
+DEF_RULE(arith_op, nc, or(2), tok(OP_PLUS), tok(OP_MINUS))
+DEF_RULE(term, c(term), list, rule(factor), rule(term_op))
+DEF_RULE(term_op, nc, or(4), tok(OP_STAR), tok(OP_SLASH), tok(OP_PERCENT), tok(OP_DBL_SLASH))
+DEF_RULE(factor, nc, or(2), rule(factor_2), rule(power))
+DEF_RULE(factor_2, c(factor_2), and(2), rule(factor_op), rule(factor))
+DEF_RULE(factor_op, nc, or(3), tok(OP_PLUS), tok(OP_MINUS), tok(OP_TILDE))
+DEF_RULE(power, c(power), and(3), rule(atom), opt_rule(power_trailers), opt_rule(power_dbl_star))
+DEF_RULE(power_trailers, c(power_trailers), one_or_more, rule(trailer))
+DEF_RULE(power_dbl_star, nc, ident | and(2), tok(OP_DBL_STAR), rule(factor))
+
+// atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False'
+// testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+// trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+
+DEF_RULE(atom, nc, or(11), tok(NAME), tok(INTEGER), tok(FLOAT_OR_IMAG), rule(atom_string), tok(ELLIPSIS), tok(KW_NONE), tok(KW_TRUE), tok(KW_FALSE), rule(atom_paren), rule(atom_bracket), rule(atom_brace))
+DEF_RULE(atom_string, c(atom_string), one_or_more, rule(string_or_bytes))
+DEF_RULE(string_or_bytes, nc, or(2), tok(STRING), tok(BYTES))
+DEF_RULE(atom_paren, c(atom_paren), and(3), tok(DEL_PAREN_OPEN), opt_rule(atom_2b), tok(DEL_PAREN_CLOSE))
+DEF_RULE(atom_2b, nc, or(2), rule(yield_expr), rule(testlist_comp))
+DEF_RULE(atom_bracket, c(atom_bracket), and(3), tok(DEL_BRACKET_OPEN), opt_rule(testlist_comp), tok(DEL_BRACKET_CLOSE))
+DEF_RULE(atom_brace, c(atom_brace), and(3), tok(DEL_BRACE_OPEN), opt_rule(dictorsetmaker), tok(DEL_BRACE_CLOSE))
+DEF_RULE(testlist_comp, nc, and(2), rule(testlist_comp_2), opt_rule(testlist_comp_3))
+DEF_RULE(testlist_comp_2, nc, or(2), rule(star_expr), rule(test))
+DEF_RULE(testlist_comp_3, nc, or(2), rule(comp_for), rule(testlist_comp_3b))
+DEF_RULE(testlist_comp_3b, nc, ident | and(2), tok(DEL_COMMA), opt_rule(testlist_comp_3c))
+DEF_RULE(testlist_comp_3c, nc, list_with_end, rule(testlist_comp_2), tok(DEL_COMMA))
+DEF_RULE(trailer, nc, or(3), rule(trailer_paren), rule(trailer_bracket), rule(trailer_period))
+DEF_RULE(trailer_paren, c(trailer_paren), and(3), tok(DEL_PAREN_OPEN), opt_rule(arglist), tok(DEL_PAREN_CLOSE))
+DEF_RULE(trailer_bracket, c(trailer_bracket), and(3), tok(DEL_BRACKET_OPEN), rule(subscriptlist), tok(DEL_BRACKET_CLOSE))
+DEF_RULE(trailer_period, c(trailer_period), and(2), tok(DEL_PERIOD), tok(NAME))
+
+// subscriptlist: subscript (',' subscript)* [',']
+// subscript: test | [test] ':' [test] [sliceop]
+// sliceop: ':' [test]
+
+#if MICROPY_PY_BUILTINS_SLICE
+DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(subscript), tok(DEL_COMMA))
+DEF_RULE(subscript, nc, or(2), rule(subscript_3), rule(subscript_2))
+DEF_RULE(subscript_2, c(subscript_2), and(2), rule(test), opt_rule(subscript_3))
+DEF_RULE(subscript_3, c(subscript_3), and(2), tok(DEL_COLON), opt_rule(subscript_3b))
+DEF_RULE(subscript_3b, nc, or(2), rule(subscript_3c), rule(subscript_3d))
+DEF_RULE(subscript_3c, nc, and(2), tok(DEL_COLON), opt_rule(test))
+DEF_RULE(subscript_3d, nc, and(2), rule(test), opt_rule(sliceop))
+DEF_RULE(sliceop, nc, and(2), tok(DEL_COLON), opt_rule(test))
+#else
+DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
+#endif
+
+// exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+// testlist: test (',' test)* [',']
+// dictorsetmaker: (test ':' test (comp_for | (',' test ':' test)* [','])) | (test (comp_for | (',' test)* [',']))
+
+DEF_RULE(exprlist, nc, list_with_end, rule(exprlist_2), tok(DEL_COMMA))
+DEF_RULE(exprlist_2, nc, or(2), rule(star_expr), rule(expr))
+DEF_RULE(testlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
+// TODO dictorsetmaker lets through more than is allowed
+DEF_RULE(dictorsetmaker, nc, and(2), rule(dictorsetmaker_item), opt_rule(dictorsetmaker_tail))
+#if MICROPY_PY_BUILTINS_SET
+DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and(2), rule(test), opt_rule(dictorsetmaker_colon))
+DEF_RULE(dictorsetmaker_colon, nc, ident | and(2), tok(DEL_COLON), rule(test))
+#else
+DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and(3), rule(test), tok(DEL_COLON), rule(test))
+#endif
+DEF_RULE(dictorsetmaker_tail, nc, or(2), rule(comp_for), rule(dictorsetmaker_list))
+DEF_RULE(dictorsetmaker_list, nc, and(2), tok(DEL_COMMA), opt_rule(dictorsetmaker_list2))
+DEF_RULE(dictorsetmaker_list2, nc, list_with_end, rule(dictorsetmaker_item), tok(DEL_COMMA))
+
+// classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+DEF_RULE(classdef, c(classdef), blank | and(5), tok(KW_CLASS), tok(NAME), opt_rule(classdef_2), tok(DEL_COLON), rule(suite))
+DEF_RULE(classdef_2, nc, ident | and(3), tok(DEL_PAREN_OPEN), opt_rule(arglist), tok(DEL_PAREN_CLOSE))
+
+// arglist: (argument ',')* (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
+
+// TODO arglist lets through more than is allowed, compiler needs to do further verification
+DEF_RULE(arglist, nc, list_with_end, rule(arglist_2), tok(DEL_COMMA))
+DEF_RULE(arglist_2, nc, or(3), rule(arglist_star), rule(arglist_dbl_star), rule(argument))
+DEF_RULE(arglist_star, nc, and(2), tok(OP_STAR), rule(test))
+DEF_RULE(arglist_dbl_star, nc, and(2), tok(OP_DBL_STAR), rule(test))
+
+// # The reason that keywords are test nodes instead of NAME is that using NAME
+// # results in an ambiguity. ast.c makes sure it's a NAME.
+// argument: test [comp_for] | test '=' test # Really [keyword '='] test
+// comp_iter: comp_for | comp_if
+// comp_for: 'for' exprlist 'in' or_test [comp_iter]
+// comp_if: 'if' test_nocond [comp_iter]
+
+DEF_RULE(argument, nc, and(2), rule(test), opt_rule(argument_2))
+DEF_RULE(argument_2, nc, or(2), rule(comp_for), rule(argument_3))
+DEF_RULE(argument_3, nc, ident | and(2), tok(DEL_EQUAL), rule(test))
+DEF_RULE(comp_iter, nc, or(2), rule(comp_for), rule(comp_if))
+DEF_RULE(comp_for, nc, blank | and(5), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(or_test), opt_rule(comp_iter))
+DEF_RULE(comp_if, nc, and(3), tok(KW_IF), rule(test_nocond), opt_rule(comp_iter))
+
+// # not used in grammar, but may appear in "node" passed from Parser to Compiler
+// encoding_decl: NAME
+
+// yield_expr: 'yield' [yield_arg]
+// yield_arg: 'from' test | testlist
+
+DEF_RULE(yield_expr, c(yield_expr), and(2), tok(KW_YIELD), opt_rule(yield_arg))
+DEF_RULE(yield_arg, nc, or(2), rule(yield_arg_from), rule(testlist))
+DEF_RULE(yield_arg_from, nc, and(2), tok(KW_FROM), rule(test))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/lexer.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,794 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/lexer.h"
+#include "py/runtime.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define TAB_SIZE (8)
+
+// TODO seems that CPython allows NULL byte in the input stream
+// don't know if that's intentional or not, but we don't allow it
+
+// TODO replace with a call to a standard function
+STATIC bool str_strn_equal(const char *str, const char *strn, mp_uint_t len) {
+ mp_uint_t i = 0;
+
+ while (i < len && *str == *strn) {
+ ++i;
+ ++str;
+ ++strn;
+ }
+
+ return i == len && *str == 0;
+}
+
+#define CUR_CHAR(lex) ((lex)->chr0)
+
+STATIC bool is_end(mp_lexer_t *lex) {
+ return lex->chr0 == MP_LEXER_EOF;
+}
+
+STATIC bool is_physical_newline(mp_lexer_t *lex) {
+ return lex->chr0 == '\n';
+}
+
+STATIC bool is_char(mp_lexer_t *lex, byte c) {
+ return lex->chr0 == c;
+}
+
+STATIC bool is_char_or(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr0 == c1 || lex->chr0 == c2;
+}
+
+STATIC bool is_char_or3(mp_lexer_t *lex, byte c1, byte c2, byte c3) {
+ return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3;
+}
+
+/*
+STATIC bool is_char_following(mp_lexer_t *lex, byte c) {
+ return lex->chr1 == c;
+}
+*/
+
+STATIC bool is_char_following_or(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr1 == c1 || lex->chr1 == c2;
+}
+
+STATIC bool is_char_following_following_or(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr2 == c1 || lex->chr2 == c2;
+}
+
+STATIC bool is_char_and(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr0 == c1 && lex->chr1 == c2;
+}
+
+STATIC bool is_whitespace(mp_lexer_t *lex) {
+ return unichar_isspace(lex->chr0);
+}
+
+STATIC bool is_letter(mp_lexer_t *lex) {
+ return unichar_isalpha(lex->chr0);
+}
+
+STATIC bool is_digit(mp_lexer_t *lex) {
+ return unichar_isdigit(lex->chr0);
+}
+
+STATIC bool is_following_digit(mp_lexer_t *lex) {
+ return unichar_isdigit(lex->chr1);
+}
+
+STATIC bool is_following_base_char(mp_lexer_t *lex) {
+ const unichar chr1 = lex->chr1 | 0x20;
+ return chr1 == 'b' || chr1 == 'o' || chr1 == 'x';
+}
+
+STATIC bool is_following_odigit(mp_lexer_t *lex) {
+ return lex->chr1 >= '0' && lex->chr1 <= '7';
+}
+
+// to easily parse utf-8 identifiers we allow any raw byte with high bit set
+STATIC bool is_head_of_identifier(mp_lexer_t *lex) {
+ return is_letter(lex) || lex->chr0 == '_' || lex->chr0 >= 0x80;
+}
+
+STATIC bool is_tail_of_identifier(mp_lexer_t *lex) {
+ return is_head_of_identifier(lex) || is_digit(lex);
+}
+
+STATIC void next_char(mp_lexer_t *lex) {
+ if (lex->chr0 == MP_LEXER_EOF) {
+ return;
+ }
+
+ if (lex->chr0 == '\n') {
+ // a new line
+ ++lex->line;
+ lex->column = 1;
+ } else if (lex->chr0 == '\t') {
+ // a tab
+ lex->column = (((lex->column - 1 + TAB_SIZE) / TAB_SIZE) * TAB_SIZE) + 1;
+ } else {
+ // a character worth one column
+ ++lex->column;
+ }
+
+ lex->chr0 = lex->chr1;
+ lex->chr1 = lex->chr2;
+ lex->chr2 = lex->stream_next_byte(lex->stream_data);
+
+ if (lex->chr0 == '\r') {
+ // CR is a new line, converted to LF
+ lex->chr0 = '\n';
+ if (lex->chr1 == '\n') {
+ // CR LF is a single new line
+ lex->chr1 = lex->chr2;
+ lex->chr2 = lex->stream_next_byte(lex->stream_data);
+ }
+ }
+
+ if (lex->chr2 == MP_LEXER_EOF) {
+ // EOF, check if we need to insert a newline at end of file
+ if (lex->chr1 != MP_LEXER_EOF && lex->chr1 != '\n') {
+ // if lex->chr1 == '\r' then this makes a CR LF which will be converted to LF above
+ // otherwise it just inserts a LF
+ lex->chr2 = '\n';
+ }
+ }
+}
+
+STATIC void indent_push(mp_lexer_t *lex, mp_uint_t indent) {
+ if (lex->num_indent_level >= lex->alloc_indent_level) {
+ // TODO use m_renew_maybe and somehow indicate an error if it fails... probably by using MP_TOKEN_MEMORY_ERROR
+ lex->indent_level = m_renew(uint16_t, lex->indent_level, lex->alloc_indent_level, lex->alloc_indent_level + MICROPY_ALLOC_LEXEL_INDENT_INC);
+ lex->alloc_indent_level += MICROPY_ALLOC_LEXEL_INDENT_INC;
+ }
+ lex->indent_level[lex->num_indent_level++] = indent;
+}
+
+STATIC mp_uint_t indent_top(mp_lexer_t *lex) {
+ return lex->indent_level[lex->num_indent_level - 1];
+}
+
+STATIC void indent_pop(mp_lexer_t *lex) {
+ lex->num_indent_level -= 1;
+}
+
+// some tricky operator encoding:
+// <op> = begin with <op>, if this opchar matches then begin here
+// e<op> = end with <op>, if this opchar matches then end
+// E<op> = mandatory end with <op>, this opchar must match, then end
+// c<op> = continue with <op>, if this opchar matches then continue matching
+// this means if the start of two ops are the same then they are equal til the last char
+
+STATIC const char *tok_enc =
+ "()[]{},:;@~" // singles
+ "<e=c<e=" // < <= << <<=
+ ">e=c>e=" // > >= >> >>=
+ "*e=c*e=" // * *= ** **=
+ "+e=" // + +=
+ "-e=e>" // - -= ->
+ "&e=" // & &=
+ "|e=" // | |=
+ "/e=c/e=" // / /= // //=
+ "%e=" // % %=
+ "^e=" // ^ ^=
+ "=e=" // = ==
+ "!E="; // !=
+
+// TODO static assert that number of tokens is less than 256 so we can safely make this table with byte sized entries
+STATIC const uint8_t tok_enc_kind[] = {
+ MP_TOKEN_DEL_PAREN_OPEN, MP_TOKEN_DEL_PAREN_CLOSE,
+ MP_TOKEN_DEL_BRACKET_OPEN, MP_TOKEN_DEL_BRACKET_CLOSE,
+ MP_TOKEN_DEL_BRACE_OPEN, MP_TOKEN_DEL_BRACE_CLOSE,
+ MP_TOKEN_DEL_COMMA, MP_TOKEN_DEL_COLON, MP_TOKEN_DEL_SEMICOLON, MP_TOKEN_DEL_AT, MP_TOKEN_OP_TILDE,
+
+ MP_TOKEN_OP_LESS, MP_TOKEN_OP_LESS_EQUAL, MP_TOKEN_OP_DBL_LESS, MP_TOKEN_DEL_DBL_LESS_EQUAL,
+ MP_TOKEN_OP_MORE, MP_TOKEN_OP_MORE_EQUAL, MP_TOKEN_OP_DBL_MORE, MP_TOKEN_DEL_DBL_MORE_EQUAL,
+ MP_TOKEN_OP_STAR, MP_TOKEN_DEL_STAR_EQUAL, MP_TOKEN_OP_DBL_STAR, MP_TOKEN_DEL_DBL_STAR_EQUAL,
+ MP_TOKEN_OP_PLUS, MP_TOKEN_DEL_PLUS_EQUAL,
+ MP_TOKEN_OP_MINUS, MP_TOKEN_DEL_MINUS_EQUAL, MP_TOKEN_DEL_MINUS_MORE,
+ MP_TOKEN_OP_AMPERSAND, MP_TOKEN_DEL_AMPERSAND_EQUAL,
+ MP_TOKEN_OP_PIPE, MP_TOKEN_DEL_PIPE_EQUAL,
+ MP_TOKEN_OP_SLASH, MP_TOKEN_DEL_SLASH_EQUAL, MP_TOKEN_OP_DBL_SLASH, MP_TOKEN_DEL_DBL_SLASH_EQUAL,
+ MP_TOKEN_OP_PERCENT, MP_TOKEN_DEL_PERCENT_EQUAL,
+ MP_TOKEN_OP_CARET, MP_TOKEN_DEL_CARET_EQUAL,
+ MP_TOKEN_DEL_EQUAL, MP_TOKEN_OP_DBL_EQUAL,
+ MP_TOKEN_OP_NOT_EQUAL,
+};
+
+// must have the same order as enum in lexer.h
+STATIC const char *tok_kw[] = {
+ "False",
+ "None",
+ "True",
+ "and",
+ "as",
+ "assert",
+ "break",
+ "class",
+ "continue",
+ "def",
+ "del",
+ "elif",
+ "else",
+ "except",
+ "finally",
+ "for",
+ "from",
+ "global",
+ "if",
+ "import",
+ "in",
+ "is",
+ "lambda",
+ "nonlocal",
+ "not",
+ "or",
+ "pass",
+ "raise",
+ "return",
+ "try",
+ "while",
+ "with",
+ "yield",
+ "__debug__",
+};
+
+// This is called with CUR_CHAR() before first hex digit, and should return with
+// it pointing to last hex digit
+// num_digits must be greater than zero
+STATIC bool get_hex(mp_lexer_t *lex, mp_uint_t num_digits, mp_uint_t *result) {
+ mp_uint_t num = 0;
+ while (num_digits-- != 0) {
+ next_char(lex);
+ unichar c = CUR_CHAR(lex);
+ if (!unichar_isxdigit(c)) {
+ return false;
+ }
+ num = (num << 4) + unichar_xdigit_value(c);
+ }
+ *result = num;
+ return true;
+}
+
+STATIC void mp_lexer_next_token_into(mp_lexer_t *lex, bool first_token) {
+ // start new token text
+ vstr_reset(&lex->vstr);
+
+ // skip white space and comments
+ bool had_physical_newline = false;
+ while (!is_end(lex)) {
+ if (is_physical_newline(lex)) {
+ had_physical_newline = true;
+ next_char(lex);
+ } else if (is_whitespace(lex)) {
+ next_char(lex);
+ } else if (is_char(lex, '#')) {
+ next_char(lex);
+ while (!is_end(lex) && !is_physical_newline(lex)) {
+ next_char(lex);
+ }
+ // had_physical_newline will be set on next loop
+ } else if (is_char(lex, '\\')) {
+ // backslash (outside string literals) must appear just before a physical newline
+ next_char(lex);
+ if (!is_physical_newline(lex)) {
+ // SyntaxError: unexpected character after line continuation character
+ lex->tok_line = lex->line;
+ lex->tok_column = lex->column;
+ lex->tok_kind = MP_TOKEN_BAD_LINE_CONTINUATION;
+ return;
+ } else {
+ next_char(lex);
+ }
+ } else {
+ break;
+ }
+ }
+
+ // set token source information
+ lex->tok_line = lex->line;
+ lex->tok_column = lex->column;
+
+ if (first_token && lex->line == 1 && lex->column != 1) {
+ // check that the first token is in the first column
+ // if first token is not on first line, we get a physical newline and
+ // this check is done as part of normal indent/dedent checking below
+ // (done to get equivalence with CPython)
+ lex->tok_kind = MP_TOKEN_INDENT;
+
+ } else if (lex->emit_dent < 0) {
+ lex->tok_kind = MP_TOKEN_DEDENT;
+ lex->emit_dent += 1;
+
+ } else if (lex->emit_dent > 0) {
+ lex->tok_kind = MP_TOKEN_INDENT;
+ lex->emit_dent -= 1;
+
+ } else if (had_physical_newline && lex->nested_bracket_level == 0) {
+ lex->tok_kind = MP_TOKEN_NEWLINE;
+
+ mp_uint_t num_spaces = lex->column - 1;
+ lex->emit_dent = 0;
+ if (num_spaces == indent_top(lex)) {
+ } else if (num_spaces > indent_top(lex)) {
+ indent_push(lex, num_spaces);
+ lex->emit_dent += 1;
+ } else {
+ while (num_spaces < indent_top(lex)) {
+ indent_pop(lex);
+ lex->emit_dent -= 1;
+ }
+ if (num_spaces != indent_top(lex)) {
+ lex->tok_kind = MP_TOKEN_DEDENT_MISMATCH;
+ }
+ }
+
+ } else if (is_end(lex)) {
+ if (indent_top(lex) > 0) {
+ lex->tok_kind = MP_TOKEN_NEWLINE;
+ lex->emit_dent = 0;
+ while (indent_top(lex) > 0) {
+ indent_pop(lex);
+ lex->emit_dent -= 1;
+ }
+ } else {
+ lex->tok_kind = MP_TOKEN_END;
+ }
+
+ } else if (is_char_or(lex, '\'', '\"')
+ || (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"'))
+ || ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r')) && is_char_following_following_or(lex, '\'', '\"'))) {
+ // a string or bytes literal
+
+ // parse type codes
+ bool is_raw = false;
+ bool is_bytes = false;
+ if (is_char(lex, 'u')) {
+ next_char(lex);
+ } else if (is_char(lex, 'b')) {
+ is_bytes = true;
+ next_char(lex);
+ if (is_char(lex, 'r')) {
+ is_raw = true;
+ next_char(lex);
+ }
+ } else if (is_char(lex, 'r')) {
+ is_raw = true;
+ next_char(lex);
+ if (is_char(lex, 'b')) {
+ is_bytes = true;
+ next_char(lex);
+ }
+ }
+
+ // set token kind
+ if (is_bytes) {
+ lex->tok_kind = MP_TOKEN_BYTES;
+ } else {
+ lex->tok_kind = MP_TOKEN_STRING;
+ }
+
+ // get first quoting character
+ char quote_char = '\'';
+ if (is_char(lex, '\"')) {
+ quote_char = '\"';
+ }
+ next_char(lex);
+
+ // work out if it's a single or triple quoted literal
+ mp_uint_t num_quotes;
+ if (is_char_and(lex, quote_char, quote_char)) {
+ // triple quotes
+ next_char(lex);
+ next_char(lex);
+ num_quotes = 3;
+ } else {
+ // single quotes
+ num_quotes = 1;
+ }
+
+ // parse the literal
+ mp_uint_t n_closing = 0;
+ while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) {
+ if (is_char(lex, quote_char)) {
+ n_closing += 1;
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ } else {
+ n_closing = 0;
+ if (is_char(lex, '\\')) {
+ next_char(lex);
+ unichar c = CUR_CHAR(lex);
+ if (is_raw) {
+ // raw strings allow escaping of quotes, but the backslash is also emitted
+ vstr_add_char(&lex->vstr, '\\');
+ } else {
+ switch (c) {
+ case MP_LEXER_EOF: break; // TODO a proper error message?
+ case '\n': c = MP_LEXER_EOF; break; // TODO check this works correctly (we are supposed to ignore it
+ case '\\': break;
+ case '\'': break;
+ case '"': break;
+ case 'a': c = 0x07; break;
+ case 'b': c = 0x08; break;
+ case 't': c = 0x09; break;
+ case 'n': c = 0x0a; break;
+ case 'v': c = 0x0b; break;
+ case 'f': c = 0x0c; break;
+ case 'r': c = 0x0d; break;
+ case 'u':
+ case 'U':
+ if (is_bytes) {
+ // b'\u1234' == b'\\u1234'
+ vstr_add_char(&lex->vstr, '\\');
+ break;
+ }
+ // Otherwise fall through.
+ case 'x':
+ {
+ mp_uint_t num = 0;
+ if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) {
+ // not enough hex chars for escape sequence
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+ c = num;
+ break;
+ }
+ case 'N':
+ // Supporting '\N{LATIN SMALL LETTER A}' == 'a' would require keeping the
+ // entire Unicode name table in the core. As of Unicode 6.3.0, that's nearly
+ // 3MB of text; even gzip-compressed and with minimal structure, it'll take
+ // roughly half a meg of storage. This form of Unicode escape may be added
+ // later on, but it's definitely not a priority right now. -- CJA 20140607
+ mp_not_implemented("unicode name escapes");
+ break;
+ default:
+ if (c >= '0' && c <= '7') {
+ // Octal sequence, 1-3 chars
+ mp_uint_t digits = 3;
+ mp_uint_t num = c - '0';
+ while (is_following_odigit(lex) && --digits != 0) {
+ next_char(lex);
+ num = num * 8 + (CUR_CHAR(lex) - '0');
+ }
+ c = num;
+ } else {
+ // unrecognised escape character; CPython lets this through verbatim as '\' and then the character
+ vstr_add_char(&lex->vstr, '\\');
+ }
+ break;
+ }
+ }
+ if (c != MP_LEXER_EOF) {
+ if (MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) {
+ if (c < 0x110000 && !is_bytes) {
+ vstr_add_char(&lex->vstr, c);
+ } else if (c < 0x100 && is_bytes) {
+ vstr_add_byte(&lex->vstr, c);
+ } else {
+ // unicode character out of range
+ // this raises a generic SyntaxError; could provide more info
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+ } else {
+ // without unicode everything is just added as an 8-bit byte
+ if (c < 0x100) {
+ vstr_add_byte(&lex->vstr, c);
+ } else {
+ // 8-bit character out of range
+ // this raises a generic SyntaxError; could provide more info
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+ }
+ }
+ } else {
+ // Add the "character" as a byte so that we remain 8-bit clean.
+ // This way, strings are parsed correctly whether or not they contain utf-8 chars.
+ vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+ }
+ }
+ next_char(lex);
+ }
+
+ // check we got the required end quotes
+ if (n_closing < num_quotes) {
+ lex->tok_kind = MP_TOKEN_LONELY_STRING_OPEN;
+ }
+
+ // cut off the end quotes from the token text
+ vstr_cut_tail_bytes(&lex->vstr, n_closing);
+
+ } else if (is_head_of_identifier(lex)) {
+ lex->tok_kind = MP_TOKEN_NAME;
+
+ // get first char (add as byte to remain 8-bit clean and support utf-8)
+ vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+
+ // get tail chars
+ while (!is_end(lex) && is_tail_of_identifier(lex)) {
+ vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+ }
+
+ } else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) {
+ bool forced_integer = false;
+ if (is_char(lex, '.')) {
+ lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+ } else {
+ lex->tok_kind = MP_TOKEN_INTEGER;
+ if (is_char(lex, '0') && is_following_base_char(lex)) {
+ forced_integer = true;
+ }
+ }
+
+ // get first char
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+
+ // get tail chars
+ while (!is_end(lex)) {
+ if (!forced_integer && is_char_or(lex, 'e', 'E')) {
+ lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+ vstr_add_char(&lex->vstr, 'e');
+ next_char(lex);
+ if (is_char(lex, '+') || is_char(lex, '-')) {
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+ }
+ } else if (is_letter(lex) || is_digit(lex) || is_char(lex, '.')) {
+ if (is_char_or3(lex, '.', 'j', 'J')) {
+ lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+ }
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+ } else {
+ break;
+ }
+ }
+
+ } else if (is_char(lex, '.')) {
+ // special handling for . and ... operators, because .. is not a valid operator
+
+ // get first char
+ vstr_add_char(&lex->vstr, '.');
+ next_char(lex);
+
+ if (is_char_and(lex, '.', '.')) {
+ vstr_add_char(&lex->vstr, '.');
+ vstr_add_char(&lex->vstr, '.');
+ next_char(lex);
+ next_char(lex);
+ lex->tok_kind = MP_TOKEN_ELLIPSIS;
+ } else {
+ lex->tok_kind = MP_TOKEN_DEL_PERIOD;
+ }
+
+ } else {
+ // search for encoded delimiter or operator
+
+ const char *t = tok_enc;
+ mp_uint_t tok_enc_index = 0;
+ for (; *t != 0 && !is_char(lex, *t); t += 1) {
+ if (*t == 'e' || *t == 'c') {
+ t += 1;
+ } else if (*t == 'E') {
+ tok_enc_index -= 1;
+ t += 1;
+ }
+ tok_enc_index += 1;
+ }
+
+ next_char(lex);
+
+ if (*t == 0) {
+ // didn't match any delimiter or operator characters
+ lex->tok_kind = MP_TOKEN_INVALID;
+
+ } else {
+ // matched a delimiter or operator character
+
+ // get the maximum characters for a valid token
+ t += 1;
+ mp_uint_t t_index = tok_enc_index;
+ for (;;) {
+ for (; *t == 'e'; t += 1) {
+ t += 1;
+ t_index += 1;
+ if (is_char(lex, *t)) {
+ next_char(lex);
+ tok_enc_index = t_index;
+ break;
+ }
+ }
+
+ if (*t == 'E') {
+ t += 1;
+ if (is_char(lex, *t)) {
+ next_char(lex);
+ tok_enc_index = t_index;
+ } else {
+ lex->tok_kind = MP_TOKEN_INVALID;
+ goto tok_enc_no_match;
+ }
+ break;
+ }
+
+ if (*t == 'c') {
+ t += 1;
+ t_index += 1;
+ if (is_char(lex, *t)) {
+ next_char(lex);
+ tok_enc_index = t_index;
+ t += 1;
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ // set token kind
+ lex->tok_kind = tok_enc_kind[tok_enc_index];
+
+ tok_enc_no_match:
+
+ // compute bracket level for implicit line joining
+ if (lex->tok_kind == MP_TOKEN_DEL_PAREN_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACKET_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACE_OPEN) {
+ lex->nested_bracket_level += 1;
+ } else if (lex->tok_kind == MP_TOKEN_DEL_PAREN_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACKET_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACE_CLOSE) {
+ lex->nested_bracket_level -= 1;
+ }
+ }
+ }
+
+ // check for keywords
+ if (lex->tok_kind == MP_TOKEN_NAME) {
+ // We check for __debug__ here and convert it to its value. This is so
+ // the parser gives a syntax error on, eg, x.__debug__. Otherwise, we
+ // need to check for this special token in many places in the compiler.
+ // TODO improve speed of these string comparisons
+ //for (mp_int_t i = 0; tok_kw[i] != NULL; i++) {
+ for (size_t i = 0; i < MP_ARRAY_SIZE(tok_kw); i++) {
+ if (str_strn_equal(tok_kw[i], lex->vstr.buf, lex->vstr.len)) {
+ if (i == MP_ARRAY_SIZE(tok_kw) - 1) {
+ // tok_kw[MP_ARRAY_SIZE(tok_kw) - 1] == "__debug__"
+ lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
+ } else {
+ lex->tok_kind = MP_TOKEN_KW_FALSE + i;
+ }
+ break;
+ }
+ }
+ }
+}
+
+mp_lexer_t *mp_lexer_new(qstr src_name, void *stream_data, mp_lexer_stream_next_byte_t stream_next_byte, mp_lexer_stream_close_t stream_close) {
+ mp_lexer_t *lex = m_new_obj_maybe(mp_lexer_t);
+
+ // check for memory allocation error
+ if (lex == NULL) {
+ if (stream_close) {
+ stream_close(stream_data);
+ }
+ return NULL;
+ }
+
+ lex->source_name = src_name;
+ lex->stream_data = stream_data;
+ lex->stream_next_byte = stream_next_byte;
+ lex->stream_close = stream_close;
+ lex->line = 1;
+ lex->column = 1;
+ lex->emit_dent = 0;
+ lex->nested_bracket_level = 0;
+ lex->alloc_indent_level = MICROPY_ALLOC_LEXER_INDENT_INIT;
+ lex->num_indent_level = 1;
+ lex->indent_level = m_new_maybe(uint16_t, lex->alloc_indent_level);
+ vstr_init(&lex->vstr, 32);
+
+ // check for memory allocation error
+ if (lex->indent_level == NULL || vstr_had_error(&lex->vstr)) {
+ mp_lexer_free(lex);
+ return NULL;
+ }
+
+ // store sentinel for first indentation level
+ lex->indent_level[0] = 0;
+
+ // preload characters
+ lex->chr0 = stream_next_byte(stream_data);
+ lex->chr1 = stream_next_byte(stream_data);
+ lex->chr2 = stream_next_byte(stream_data);
+
+ // if input stream is 0, 1 or 2 characters long and doesn't end in a newline, then insert a newline at the end
+ if (lex->chr0 == MP_LEXER_EOF) {
+ lex->chr0 = '\n';
+ } else if (lex->chr1 == MP_LEXER_EOF) {
+ if (lex->chr0 == '\r') {
+ lex->chr0 = '\n';
+ } else if (lex->chr0 != '\n') {
+ lex->chr1 = '\n';
+ }
+ } else if (lex->chr2 == MP_LEXER_EOF) {
+ if (lex->chr1 == '\r') {
+ lex->chr1 = '\n';
+ } else if (lex->chr1 != '\n') {
+ lex->chr2 = '\n';
+ }
+ }
+
+ // preload first token
+ mp_lexer_next_token_into(lex, true);
+
+ return lex;
+}
+
+void mp_lexer_free(mp_lexer_t *lex) {
+ if (lex) {
+ if (lex->stream_close) {
+ lex->stream_close(lex->stream_data);
+ }
+ vstr_clear(&lex->vstr);
+ m_del(uint16_t, lex->indent_level, lex->alloc_indent_level);
+ m_del_obj(mp_lexer_t, lex);
+ }
+}
+
+void mp_lexer_to_next(mp_lexer_t *lex) {
+ mp_lexer_next_token_into(lex, false);
+}
+
+#if MICROPY_DEBUG_PRINTERS
+void mp_lexer_show_token(const mp_lexer_t *lex) {
+ printf("(" UINT_FMT ":" UINT_FMT ") kind:%u str:%p len:%zu", lex->tok_line, lex->tok_column, lex->tok_kind, lex->vstr.buf, lex->vstr.len);
+ if (lex->vstr.len > 0) {
+ const byte *i = (const byte *)lex->vstr.buf;
+ const byte *j = (const byte *)i + lex->vstr.len;
+ printf(" ");
+ while (i < j) {
+ unichar c = utf8_get_char(i);
+ i = utf8_next_char(i);
+ if (unichar_isprint(c)) {
+ printf("%c", (int)c);
+ } else {
+ printf("?");
+ }
+ }
+ }
+ printf("\n");
+}
+#endif
+
+#endif // MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/lexer.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,201 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_LEXER_H__
+#define __MICROPY_INCLUDED_PY_LEXER_H__
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/qstr.h"
+
+/* lexer.h -- simple tokeniser for Micro Python
+ *
+ * Uses (byte) length instead of null termination.
+ * Tokens are the same - UTF-8 with (byte) length.
+ */
+
+typedef enum _mp_token_kind_t {
+ MP_TOKEN_END, // 0
+
+ MP_TOKEN_INVALID,
+ MP_TOKEN_DEDENT_MISMATCH,
+ MP_TOKEN_LONELY_STRING_OPEN,
+ MP_TOKEN_BAD_LINE_CONTINUATION,
+
+ MP_TOKEN_NEWLINE, // 5
+ MP_TOKEN_INDENT, // 6
+ MP_TOKEN_DEDENT, // 7
+
+ MP_TOKEN_NAME, // 8
+ MP_TOKEN_INTEGER,
+ MP_TOKEN_FLOAT_OR_IMAG,
+ MP_TOKEN_STRING,
+ MP_TOKEN_BYTES,
+
+ MP_TOKEN_ELLIPSIS,
+
+ MP_TOKEN_KW_FALSE, // 14
+ MP_TOKEN_KW_NONE,
+ MP_TOKEN_KW_TRUE,
+ MP_TOKEN_KW_AND,
+ MP_TOKEN_KW_AS,
+ MP_TOKEN_KW_ASSERT,
+ MP_TOKEN_KW_BREAK,
+ MP_TOKEN_KW_CLASS,
+ MP_TOKEN_KW_CONTINUE,
+ MP_TOKEN_KW_DEF, // 23
+ MP_TOKEN_KW_DEL,
+ MP_TOKEN_KW_ELIF,
+ MP_TOKEN_KW_ELSE,
+ MP_TOKEN_KW_EXCEPT,
+ MP_TOKEN_KW_FINALLY,
+ MP_TOKEN_KW_FOR,
+ MP_TOKEN_KW_FROM,
+ MP_TOKEN_KW_GLOBAL,
+ MP_TOKEN_KW_IF,
+ MP_TOKEN_KW_IMPORT, // 33
+ MP_TOKEN_KW_IN,
+ MP_TOKEN_KW_IS,
+ MP_TOKEN_KW_LAMBDA,
+ MP_TOKEN_KW_NONLOCAL,
+ MP_TOKEN_KW_NOT,
+ MP_TOKEN_KW_OR,
+ MP_TOKEN_KW_PASS,
+ MP_TOKEN_KW_RAISE,
+ MP_TOKEN_KW_RETURN,
+ MP_TOKEN_KW_TRY, // 43
+ MP_TOKEN_KW_WHILE,
+ MP_TOKEN_KW_WITH,
+ MP_TOKEN_KW_YIELD,
+
+ MP_TOKEN_OP_PLUS, // 47
+ MP_TOKEN_OP_MINUS,
+ MP_TOKEN_OP_STAR,
+ MP_TOKEN_OP_DBL_STAR,
+ MP_TOKEN_OP_SLASH,
+ MP_TOKEN_OP_DBL_SLASH,
+ MP_TOKEN_OP_PERCENT,
+ MP_TOKEN_OP_LESS,
+ MP_TOKEN_OP_DBL_LESS,
+ MP_TOKEN_OP_MORE,
+ MP_TOKEN_OP_DBL_MORE, // 57
+ MP_TOKEN_OP_AMPERSAND,
+ MP_TOKEN_OP_PIPE,
+ MP_TOKEN_OP_CARET,
+ MP_TOKEN_OP_TILDE,
+ MP_TOKEN_OP_LESS_EQUAL,
+ MP_TOKEN_OP_MORE_EQUAL,
+ MP_TOKEN_OP_DBL_EQUAL,
+ MP_TOKEN_OP_NOT_EQUAL,
+
+ MP_TOKEN_DEL_PAREN_OPEN, // 66
+ MP_TOKEN_DEL_PAREN_CLOSE,
+ MP_TOKEN_DEL_BRACKET_OPEN,
+ MP_TOKEN_DEL_BRACKET_CLOSE,
+ MP_TOKEN_DEL_BRACE_OPEN,
+ MP_TOKEN_DEL_BRACE_CLOSE,
+ MP_TOKEN_DEL_COMMA,
+ MP_TOKEN_DEL_COLON,
+ MP_TOKEN_DEL_PERIOD,
+ MP_TOKEN_DEL_SEMICOLON,
+ MP_TOKEN_DEL_AT, // 76
+ MP_TOKEN_DEL_EQUAL,
+ MP_TOKEN_DEL_PLUS_EQUAL,
+ MP_TOKEN_DEL_MINUS_EQUAL,
+ MP_TOKEN_DEL_STAR_EQUAL,
+ MP_TOKEN_DEL_SLASH_EQUAL,
+ MP_TOKEN_DEL_DBL_SLASH_EQUAL,
+ MP_TOKEN_DEL_PERCENT_EQUAL,
+ MP_TOKEN_DEL_AMPERSAND_EQUAL,
+ MP_TOKEN_DEL_PIPE_EQUAL,
+ MP_TOKEN_DEL_CARET_EQUAL, // 86
+ MP_TOKEN_DEL_DBL_MORE_EQUAL,
+ MP_TOKEN_DEL_DBL_LESS_EQUAL,
+ MP_TOKEN_DEL_DBL_STAR_EQUAL,
+ MP_TOKEN_DEL_MINUS_MORE,
+} mp_token_kind_t;
+
+// the next-byte function must return the next byte in the stream
+// it must return MP_LEXER_EOF if end of stream
+// it can be called again after returning MP_LEXER_EOF, and in that case must return MP_LEXER_EOF
+#define MP_LEXER_EOF ((unichar)(-1))
+
+typedef mp_uint_t (*mp_lexer_stream_next_byte_t)(void*);
+typedef void (*mp_lexer_stream_close_t)(void*);
+
+// this data structure is exposed for efficiency
+// public members are: source_name, tok_line, tok_column, tok_kind, vstr
+typedef struct _mp_lexer_t {
+ qstr source_name; // name of source
+ void *stream_data; // data for stream
+ mp_lexer_stream_next_byte_t stream_next_byte; // stream callback to get next byte
+ mp_lexer_stream_close_t stream_close; // stream callback to free
+
+ unichar chr0, chr1, chr2; // current cached characters from source
+
+ mp_uint_t line; // current source line
+ mp_uint_t column; // current source column
+
+ mp_int_t emit_dent; // non-zero when there are INDENT/DEDENT tokens to emit
+ mp_int_t nested_bracket_level; // >0 when there are nested brackets over multiple lines
+
+ mp_uint_t alloc_indent_level;
+ mp_uint_t num_indent_level;
+ uint16_t *indent_level;
+
+ mp_uint_t tok_line; // token source line
+ mp_uint_t tok_column; // token source column
+ mp_token_kind_t tok_kind; // token kind
+ vstr_t vstr; // token data
+} mp_lexer_t;
+
+mp_lexer_t *mp_lexer_new(qstr src_name, void *stream_data, mp_lexer_stream_next_byte_t stream_next_byte, mp_lexer_stream_close_t stream_close);
+mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len);
+
+void mp_lexer_free(mp_lexer_t *lex);
+void mp_lexer_to_next(mp_lexer_t *lex);
+void mp_lexer_show_token(const mp_lexer_t *lex);
+
+/******************************************************************/
+// platform specific import function; must be implemented for a specific port
+// TODO tidy up, rename, or put elsewhere
+
+//mp_lexer_t *mp_import_open_file(qstr mod_name);
+
+typedef enum {
+ MP_IMPORT_STAT_NO_EXIST,
+ MP_IMPORT_STAT_DIR,
+ MP_IMPORT_STAT_FILE,
+} mp_import_stat_t;
+
+mp_import_stat_t mp_import_stat(const char *path);
+mp_lexer_t *mp_lexer_new_from_file(const char *filename);
+
+#if MICROPY_HELPER_LEXER_UNIX
+mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd);
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_LEXER_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/lexerstr.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,65 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/lexer.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+typedef struct _mp_lexer_str_buf_t {
+ mp_uint_t free_len; // if > 0, src_beg will be freed when done by: m_free(src_beg, free_len)
+ const char *src_beg; // beginning of source
+ const char *src_cur; // current location in source
+ const char *src_end; // end (exclusive) of source
+} mp_lexer_str_buf_t;
+
+STATIC mp_uint_t str_buf_next_byte(mp_lexer_str_buf_t *sb) {
+ if (sb->src_cur < sb->src_end) {
+ return *sb->src_cur++;
+ } else {
+ return MP_LEXER_EOF;
+ }
+}
+
+STATIC void str_buf_free(mp_lexer_str_buf_t *sb) {
+ if (sb->free_len > 0) {
+ m_del(char, (char*)sb->src_beg, sb->free_len);
+ }
+ m_del_obj(mp_lexer_str_buf_t, sb);
+}
+
+mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len) {
+ mp_lexer_str_buf_t *sb = m_new_obj_maybe(mp_lexer_str_buf_t);
+ if (sb == NULL) {
+ return NULL;
+ }
+ sb->free_len = free_len;
+ sb->src_beg = str;
+ sb->src_cur = str;
+ sb->src_end = str + len;
+ return mp_lexer_new(src_name, sb, (mp_lexer_stream_next_byte_t)str_buf_next_byte, (mp_lexer_stream_close_t)str_buf_free);
+}
+
+#endif // MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/lexerunix.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,96 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+
+#if MICROPY_HELPER_LEXER_UNIX
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "py/lexer.h"
+
+typedef struct _mp_lexer_file_buf_t {
+ int fd;
+ bool close_fd;
+ byte buf[20];
+ mp_uint_t len;
+ mp_uint_t pos;
+} mp_lexer_file_buf_t;
+
+STATIC mp_uint_t file_buf_next_byte(mp_lexer_file_buf_t *fb) {
+ if (fb->pos >= fb->len) {
+ if (fb->len == 0) {
+ return MP_LEXER_EOF;
+ } else {
+ int n = read(fb->fd, fb->buf, sizeof(fb->buf));
+ if (n <= 0) {
+ fb->len = 0;
+ return MP_LEXER_EOF;
+ }
+ fb->len = n;
+ fb->pos = 0;
+ }
+ }
+ return fb->buf[fb->pos++];
+}
+
+STATIC void file_buf_close(mp_lexer_file_buf_t *fb) {
+ if (fb->close_fd) {
+ close(fb->fd);
+ }
+ m_del_obj(mp_lexer_file_buf_t, fb);
+}
+
+mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd) {
+ mp_lexer_file_buf_t *fb = m_new_obj_maybe(mp_lexer_file_buf_t);
+ if (fb == NULL) {
+ if (close_fd) {
+ close(fd);
+ }
+ return NULL;
+ }
+ fb->fd = fd;
+ fb->close_fd = close_fd;
+ int n = read(fb->fd, fb->buf, sizeof(fb->buf));
+ fb->len = n;
+ fb->pos = 0;
+ return mp_lexer_new(filename, fb, (mp_lexer_stream_next_byte_t)file_buf_next_byte, (mp_lexer_stream_close_t)file_buf_close);
+}
+
+mp_lexer_t *mp_lexer_new_from_file(const char *filename) {
+ int fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ return NULL;
+ }
+ return mp_lexer_new_from_fd(qstr_from_str(filename), fd, true);
+}
+
+#endif // MICROPY_HELPER_LEXER_UNIX
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/makeqstrdata.py Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,113 @@
+"""
+Process raw qstr file and output qstr data with length, hash and data bytes.
+
+This script works with Python 2.6, 2.7, 3.3 and 3.4.
+"""
+
+from __future__ import print_function
+
+import re
+import sys
+
+# codepoint2name is different in Python 2 to Python 3
+import platform
+if platform.python_version_tuple()[0] == '2':
+ from htmlentitydefs import codepoint2name
+elif platform.python_version_tuple()[0] == '3':
+ from html.entities import codepoint2name
+codepoint2name[ord('-')] = 'hyphen';
+
+# add some custom names to map characters that aren't in HTML
+codepoint2name[ord(' ')] = 'space'
+codepoint2name[ord('\'')] = 'squot'
+codepoint2name[ord(',')] = 'comma'
+codepoint2name[ord('.')] = 'dot'
+codepoint2name[ord(':')] = 'colon'
+codepoint2name[ord('/')] = 'slash'
+codepoint2name[ord('%')] = 'percent'
+codepoint2name[ord('#')] = 'hash'
+codepoint2name[ord('(')] = 'paren_open'
+codepoint2name[ord(')')] = 'paren_close'
+codepoint2name[ord('[')] = 'bracket_open'
+codepoint2name[ord(']')] = 'bracket_close'
+codepoint2name[ord('{')] = 'brace_open'
+codepoint2name[ord('}')] = 'brace_close'
+codepoint2name[ord('*')] = 'star'
+codepoint2name[ord('!')] = 'bang'
+codepoint2name[ord('\\')] = 'backslash'
+codepoint2name[ord('+')] = 'plus'
+
+# this must match the equivalent function in qstr.c
+def compute_hash(qstr, bytes_hash):
+ hash = 5381
+ for char in qstr:
+ hash = (hash * 33) ^ ord(char)
+ # Make sure that valid hash is never zero, zero means "hash not computed"
+ return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1
+
+def do_work(infiles):
+ # read the qstrs in from the input files
+ qcfgs = {}
+ qstrs = {}
+ for infile in infiles:
+ with open(infile, 'rt') as f:
+ for line in f:
+ line = line.strip()
+
+ # is this a config line?
+ match = re.match(r'^QCFG\((.+), (.+)\)', line)
+ if match:
+ value = match.group(2)
+ if value[0] == '(' and value[-1] == ')':
+ # strip parenthesis from config value
+ value = value[1:-1]
+ qcfgs[match.group(1)] = value
+ continue
+
+ # is this a QSTR line?
+ match = re.match(r'^Q\((.*)\)$', line)
+ if not match:
+ continue
+
+ # get the qstr value
+ qstr = match.group(1)
+ ident = re.sub(r'[^A-Za-z0-9_]', lambda s: "_" + codepoint2name[ord(s.group(0))] + "_", qstr)
+
+ # don't add duplicates
+ if ident in qstrs:
+ continue
+
+ # add the qstr to the list, with order number to retain original order in file
+ qstrs[ident] = (len(qstrs), ident, qstr)
+
+ if not qcfgs:
+ sys.stderr.write("ERROR: Empty preprocessor output - check for errors above\n")
+ sys.exit(1)
+
+ # get config variables
+ cfg_bytes_len = int(qcfgs['BYTES_IN_LEN'])
+ cfg_bytes_hash = int(qcfgs['BYTES_IN_HASH'])
+ cfg_max_len = 1 << (8 * cfg_bytes_len)
+
+ # print out the starter of the generated C header file
+ print('// This file was automatically generated by makeqstrdata.py')
+ print('')
+
+ # add NULL qstr with no hash or data
+ print('QDEF(MP_QSTR_NULL, (const byte*)"%s%s" "")' % ('\\x00' * cfg_bytes_hash, '\\x00' * cfg_bytes_len))
+
+ # go through each qstr and print it out
+ for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
+ qhash = compute_hash(qstr, cfg_bytes_hash)
+ # Calculate len of str, taking escapes into account
+ qlen = len(qstr.replace("\\\\", "-").replace("\\", ""))
+ qdata = qstr.replace('"', '\\"')
+ if qlen >= cfg_max_len:
+ print('qstr is too long:', qstr)
+ assert False
+ qlen_str = ('\\x%02x' * cfg_bytes_len) % tuple(((qlen >> (8 * i)) & 0xff) for i in range(cfg_bytes_len))
+ qhash_str = ('\\x%02x' * cfg_bytes_hash) % tuple(((qhash >> (8 * i)) & 0xff) for i in range(cfg_bytes_hash))
+ print('QDEF(MP_QSTR_%s, (const byte*)"%s%s" "%s")' % (ident, qhash_str, qlen_str, qdata))
+
+if __name__ == "__main__":
+ do_work(sys.argv[1:])
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/makeversionhdr.py Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,107 @@
+"""
+Generate header file with macros defining MicroPython version info.
+
+This script works with Python 2.6, 2.7, 3.3 and 3.4.
+"""
+
+from __future__ import print_function
+
+import sys
+import os
+import datetime
+import subprocess
+
+def get_version_info_from_git():
+ # Python 2.6 doesn't have check_output, so check for that
+ try:
+ subprocess.check_output
+ subprocess.check_call
+ except AttributeError:
+ return None
+
+ # Note: git describe doesn't work if no tag is available
+ try:
+ git_tag = subprocess.check_output(["git", "describe", "--dirty", "--always"], stderr=subprocess.STDOUT, universal_newlines=True).strip()
+ except subprocess.CalledProcessError as er:
+ if er.returncode == 128:
+ # git exit code of 128 means no repository found
+ return None
+ git_tag = ""
+ except OSError:
+ return None
+ try:
+ git_hash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], stderr=subprocess.STDOUT, universal_newlines=True).strip()
+ except subprocess.CalledProcessError:
+ git_hash = "unknown"
+ except OSError:
+ return None
+
+ try:
+ # Check if there are any modified files.
+ subprocess.check_call(["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT)
+ # Check if there are any staged files.
+ subprocess.check_call(["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ git_hash += "-dirty"
+ except OSError:
+ return None
+
+ # Try to extract MicroPython version from git tag
+ if git_tag.startswith("v"):
+ ver = git_tag[1:].split("-")[0].split(".")
+ if len(ver) == 2:
+ ver.append("0")
+ else:
+ ver = ["0", "0", "1"]
+
+ return git_tag, git_hash, ver
+
+def get_version_info_from_docs_conf():
+ with open(os.path.join(os.path.dirname(sys.argv[0]), "..", "docs", "conf.py")) as f:
+ for line in f:
+ if line.startswith("release = '"):
+ ver = line.strip()[10:].strip("'")
+ git_tag = "v" + ver
+ ver = ver.split(".")
+ if len(ver) == 2:
+ ver.append("0")
+ return git_tag, "<no hash>", ver
+ return None
+
+def make_version_header(filename):
+ # Get version info using git, with fallback to docs/conf.py
+ info = get_version_info_from_git()
+ if info is None:
+ info = get_version_info_from_docs_conf()
+
+ git_tag, git_hash, ver = info
+
+ # Generate the file with the git and version info
+ file_data = """\
+// This file was generated by py/makeversionhdr.py
+#define MICROPY_GIT_TAG "%s"
+#define MICROPY_GIT_HASH "%s"
+#define MICROPY_BUILD_DATE "%s"
+#define MICROPY_VERSION_MAJOR (%s)
+#define MICROPY_VERSION_MINOR (%s)
+#define MICROPY_VERSION_MICRO (%s)
+#define MICROPY_VERSION_STRING "%s.%s.%s"
+""" % (git_tag, git_hash, datetime.date.today().strftime("%Y-%m-%d"),
+ ver[0], ver[1], ver[2], ver[0], ver[1], ver[2])
+
+ # Check if the file contents changed from last time
+ write_file = True
+ if os.path.isfile(filename):
+ with open(filename, 'r') as f:
+ existing_data = f.read()
+ if existing_data == file_data:
+ write_file = False
+
+ # Only write the file if we need to
+ if write_file:
+ print("Generating %s" % filename)
+ with open(filename, 'w') as f:
+ f.write(file_data)
+
+if __name__ == "__main__":
+ make_version_header(sys.argv[1])
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/malloc.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,196 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/mpstate.h"
+
+#if 0 // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_MEM_STATS
+#define UPDATE_PEAK() { if (MP_STATE_MEM(current_bytes_allocated) > MP_STATE_MEM(peak_bytes_allocated)) MP_STATE_MEM(peak_bytes_allocated) = MP_STATE_MEM(current_bytes_allocated); }
+#endif
+
+#if MICROPY_ENABLE_GC
+#include "py/gc.h"
+
+// We redirect standard alloc functions to GC heap - just for the rest of
+// this module. In the rest of micropython source, system malloc can be
+// freely accessed - for interfacing with system and 3rd-party libs for
+// example. On the other hand, some (e.g. bare-metal) ports may use GC
+// heap as system heap, so, to avoid warnings, we do undef's first.
+#undef malloc
+#undef free
+#undef realloc
+#define malloc(b) gc_alloc((b), false)
+#define malloc_with_finaliser(b) gc_alloc((b), true)
+#define free gc_free
+#define realloc(ptr, n) gc_realloc(ptr, n, true)
+#define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv)
+#else
+STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
+ if (allow_move) {
+ return realloc(ptr, n_bytes);
+ } else {
+ // We are asked to resize, but without moving the memory region pointed to
+ // by ptr. Unless the underlying memory manager has special provision for
+ // this behaviour there is nothing we can do except fail to resize.
+ return NULL;
+ }
+}
+#endif // MICROPY_ENABLE_GC
+
+void *m_malloc(size_t num_bytes) {
+ void *ptr = malloc(num_bytes);
+ if (ptr == NULL && num_bytes != 0) {
+ return m_malloc_fail(num_bytes);
+ }
+#if MICROPY_MEM_STATS
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+ UPDATE_PEAK();
+#endif
+ DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+ return ptr;
+}
+
+void *m_malloc_maybe(size_t num_bytes) {
+ void *ptr = malloc(num_bytes);
+#if MICROPY_MEM_STATS
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+ UPDATE_PEAK();
+#endif
+ DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+ return ptr;
+}
+
+#if MICROPY_ENABLE_FINALISER
+void *m_malloc_with_finaliser(size_t num_bytes) {
+ void *ptr = malloc_with_finaliser(num_bytes);
+ if (ptr == NULL && num_bytes != 0) {
+ return m_malloc_fail(num_bytes);
+ }
+#if MICROPY_MEM_STATS
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+ UPDATE_PEAK();
+#endif
+ DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+ return ptr;
+}
+#endif
+
+void *m_malloc0(size_t num_bytes) {
+ void *ptr = m_malloc(num_bytes);
+ if (ptr == NULL && num_bytes != 0) {
+ return m_malloc_fail(num_bytes);
+ }
+ memset(ptr, 0, num_bytes);
+ return ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
+#else
+void *m_realloc(void *ptr, size_t new_num_bytes) {
+#endif
+ void *new_ptr = realloc(ptr, new_num_bytes);
+ if (new_ptr == NULL && new_num_bytes != 0) {
+ return m_malloc_fail(new_num_bytes);
+ }
+#if MICROPY_MEM_STATS
+ // At first thought, "Total bytes allocated" should only grow,
+ // after all, it's *total*. But consider for example 2K block
+ // shrunk to 1K and then grown to 2K again. It's still 2K
+ // allocated total. If we process only positive increments,
+ // we'll count 3K.
+ size_t diff = new_num_bytes - old_num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += diff;
+ MP_STATE_MEM(current_bytes_allocated) += diff;
+ UPDATE_PEAK();
+#endif
+ DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
+ return new_ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move) {
+#else
+void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
+#endif
+ void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
+#if MICROPY_MEM_STATS
+ // At first thought, "Total bytes allocated" should only grow,
+ // after all, it's *total*. But consider for example 2K block
+ // shrunk to 1K and then grown to 2K again. It's still 2K
+ // allocated total. If we process only positive increments,
+ // we'll count 3K.
+ // Also, don't count failed reallocs.
+ if (!(new_ptr == NULL && new_num_bytes != 0)) {
+ size_t diff = new_num_bytes - old_num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += diff;
+ MP_STATE_MEM(current_bytes_allocated) += diff;
+ UPDATE_PEAK();
+ }
+#endif
+ DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
+ return new_ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void m_free(void *ptr, size_t num_bytes) {
+#else
+void m_free(void *ptr) {
+#endif
+ free(ptr);
+#if MICROPY_MEM_STATS
+ MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
+#endif
+ DEBUG_printf("free %p, %d\n", ptr, num_bytes);
+}
+
+#if MICROPY_MEM_STATS
+size_t m_get_total_bytes_allocated(void) {
+ return MP_STATE_MEM(total_bytes_allocated);
+}
+
+size_t m_get_current_bytes_allocated(void) {
+ return MP_STATE_MEM(current_bytes_allocated);
+}
+
+size_t m_get_peak_bytes_allocated(void) {
+ return MP_STATE_MEM(peak_bytes_allocated);
+}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/map.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,424 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+// Fixed empty map. Useful when need to call kw-receiving functions
+// without any keywords from C, etc.
+const mp_map_t mp_const_empty_map = {
+ .all_keys_are_qstrs = 0,
+ .is_fixed = 1,
+ .is_ordered = 1,
+ .used = 0,
+ .alloc = 0,
+ .table = NULL,
+};
+
+// approximatelly doubling primes; made with Mathematica command: Table[Prime[Floor[(1.7)^n]], {n, 3, 24}]
+// prefixed with zero for the empty case.
+STATIC uint32_t doubling_primes[] = {0, 7, 19, 43, 89, 179, 347, 647, 1229, 2297, 4243, 7829, 14347, 26017, 47149, 84947, 152443, 273253, 488399, 869927, 1547173, 2745121, 4861607};
+
+STATIC mp_uint_t get_doubling_prime_greater_or_equal_to(mp_uint_t x) {
+ for (size_t i = 0; i < MP_ARRAY_SIZE(doubling_primes); i++) {
+ if (doubling_primes[i] >= x) {
+ return doubling_primes[i];
+ }
+ }
+ // ran out of primes in the table!
+ // return something sensible, at least make it odd
+ return x | 1;
+}
+
+/******************************************************************************/
+/* map */
+
+void mp_map_init(mp_map_t *map, mp_uint_t n) {
+ if (n == 0) {
+ map->alloc = 0;
+ map->table = NULL;
+ } else {
+ map->alloc = n;
+ map->table = m_new0(mp_map_elem_t, map->alloc);
+ }
+ map->used = 0;
+ map->all_keys_are_qstrs = 1;
+ map->is_fixed = 0;
+ map->is_ordered = 0;
+}
+
+void mp_map_init_fixed_table(mp_map_t *map, mp_uint_t n, const mp_obj_t *table) {
+ map->alloc = n;
+ map->used = n;
+ map->all_keys_are_qstrs = 1;
+ map->is_fixed = 1;
+ map->is_ordered = 1;
+ map->table = (mp_map_elem_t*)table;
+}
+
+mp_map_t *mp_map_new(mp_uint_t n) {
+ mp_map_t *map = m_new(mp_map_t, 1);
+ mp_map_init(map, n);
+ return map;
+}
+
+// Differentiate from mp_map_clear() - semantics is different
+void mp_map_deinit(mp_map_t *map) {
+ if (!map->is_fixed) {
+ m_del(mp_map_elem_t, map->table, map->alloc);
+ }
+ map->used = map->alloc = 0;
+}
+
+void mp_map_free(mp_map_t *map) {
+ mp_map_deinit(map);
+ m_del_obj(mp_map_t, map);
+}
+
+void mp_map_clear(mp_map_t *map) {
+ if (!map->is_fixed) {
+ m_del(mp_map_elem_t, map->table, map->alloc);
+ }
+ map->alloc = 0;
+ map->used = 0;
+ map->all_keys_are_qstrs = 1;
+ map->is_fixed = 0;
+ map->table = NULL;
+}
+
+STATIC void mp_map_rehash(mp_map_t *map) {
+ mp_uint_t old_alloc = map->alloc;
+ mp_uint_t new_alloc = get_doubling_prime_greater_or_equal_to(map->alloc + 1);
+ mp_map_elem_t *old_table = map->table;
+ mp_map_elem_t *new_table = m_new0(mp_map_elem_t, new_alloc);
+ // If we reach this point, table resizing succeeded, now we can edit the old map.
+ map->alloc = new_alloc;
+ map->used = 0;
+ map->all_keys_are_qstrs = 1;
+ map->table = new_table;
+ for (mp_uint_t i = 0; i < old_alloc; i++) {
+ if (old_table[i].key != MP_OBJ_NULL && old_table[i].key != MP_OBJ_SENTINEL) {
+ mp_map_lookup(map, old_table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = old_table[i].value;
+ }
+ }
+ m_del(mp_map_elem_t, old_table, old_alloc);
+}
+
+// MP_MAP_LOOKUP behaviour:
+// - returns NULL if not found, else the slot it was found in with key,value non-null
+// MP_MAP_LOOKUP_ADD_IF_NOT_FOUND behaviour:
+// - returns slot, with key non-null and value=MP_OBJ_NULL if it was added
+// MP_MAP_LOOKUP_REMOVE_IF_FOUND behaviour:
+// - returns NULL if not found, else the slot if was found in with key null and value non-null
+mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t lookup_kind) {
+
+ if (map->is_fixed && lookup_kind != MP_MAP_LOOKUP) {
+ // can't add/remove from a fixed array
+ return NULL;
+ }
+
+ // Work out if we can compare just pointers
+ bool compare_only_ptrs = map->all_keys_are_qstrs;
+ if (compare_only_ptrs) {
+ if (MP_OBJ_IS_QSTR(index)) {
+ // Index is a qstr, so can just do ptr comparison.
+ } else if (MP_OBJ_IS_TYPE(index, &mp_type_str)) {
+ // Index is a non-interned string.
+ // We can either intern the string, or force a full equality comparison.
+ // We chose the latter, since interning costs time and potentially RAM,
+ // and it won't necessarily benefit subsequent calls because these calls
+ // most likely won't pass the newly-interned string.
+ compare_only_ptrs = false;
+ } else if (lookup_kind != MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ // If we are not adding, then we can return straight away a failed
+ // lookup because we know that the index will never be found.
+ return NULL;
+ }
+ }
+
+ // if the map is an ordered array then we must do a brute force linear search
+ if (map->is_ordered) {
+ for (mp_map_elem_t *elem = &map->table[0], *top = &map->table[map->used]; elem < top; elem++) {
+ if (elem->key == index || (!compare_only_ptrs && mp_obj_equal(elem->key, index))) {
+ if (MP_UNLIKELY(lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND)) {
+ elem->key = MP_OBJ_SENTINEL;
+ // keep elem->value so that caller can access it if needed
+ }
+ return elem;
+ }
+ }
+ if (MP_LIKELY(lookup_kind != MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)) {
+ return NULL;
+ }
+ // TODO shrink array down over any previously-freed slots
+ if (map->used == map->alloc) {
+ // TODO: Alloc policy
+ map->alloc += 4;
+ map->table = m_renew(mp_map_elem_t, map->table, map->used, map->alloc);
+ mp_seq_clear(map->table, map->used, map->alloc, sizeof(*map->table));
+ }
+ mp_map_elem_t *elem = map->table + map->used++;
+ elem->key = index;
+ if (!MP_OBJ_IS_QSTR(index)) {
+ map->all_keys_are_qstrs = 0;
+ }
+ return elem;
+ }
+
+ // map is a hash table (not an ordered array), so do a hash lookup
+
+ if (map->alloc == 0) {
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ mp_map_rehash(map);
+ } else {
+ return NULL;
+ }
+ }
+
+ // get hash of index, with fast path for common case of qstr
+ mp_uint_t hash;
+ if (MP_OBJ_IS_QSTR(index)) {
+ hash = qstr_hash(MP_OBJ_QSTR_VALUE(index));
+ } else {
+ hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index));
+ }
+
+ mp_uint_t pos = hash % map->alloc;
+ mp_uint_t start_pos = pos;
+ mp_map_elem_t *avail_slot = NULL;
+ for (;;) {
+ mp_map_elem_t *slot = &map->table[pos];
+ if (slot->key == MP_OBJ_NULL) {
+ // found NULL slot, so index is not in table
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ map->used += 1;
+ if (avail_slot == NULL) {
+ avail_slot = slot;
+ }
+ avail_slot->key = index;
+ avail_slot->value = MP_OBJ_NULL;
+ if (!MP_OBJ_IS_QSTR(index)) {
+ map->all_keys_are_qstrs = 0;
+ }
+ return avail_slot;
+ } else {
+ return NULL;
+ }
+ } else if (slot->key == MP_OBJ_SENTINEL) {
+ // found deleted slot, remember for later
+ if (avail_slot == NULL) {
+ avail_slot = slot;
+ }
+ } else if (slot->key == index || (!compare_only_ptrs && mp_obj_equal(slot->key, index))) {
+ // found index
+ // Note: CPython does not replace the index; try x={True:'true'};x[1]='one';x
+ if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ // delete element in this slot
+ map->used--;
+ if (map->table[(pos + 1) % map->alloc].key == MP_OBJ_NULL) {
+ // optimisation if next slot is empty
+ slot->key = MP_OBJ_NULL;
+ } else {
+ slot->key = MP_OBJ_SENTINEL;
+ }
+ // keep slot->value so that caller can access it if needed
+ }
+ return slot;
+ }
+
+ // not yet found, keep searching in this table
+ pos = (pos + 1) % map->alloc;
+
+ if (pos == start_pos) {
+ // search got back to starting position, so index is not in table
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ if (avail_slot != NULL) {
+ // there was an available slot, so use that
+ map->used++;
+ avail_slot->key = index;
+ avail_slot->value = MP_OBJ_NULL;
+ if (!MP_OBJ_IS_QSTR(index)) {
+ map->all_keys_are_qstrs = 0;
+ }
+ return avail_slot;
+ } else {
+ // not enough room in table, rehash it
+ mp_map_rehash(map);
+ // restart the search for the new element
+ start_pos = pos = hash % map->alloc;
+ }
+ } else {
+ return NULL;
+ }
+ }
+ }
+}
+
+/******************************************************************************/
+/* set */
+
+#if MICROPY_PY_BUILTINS_SET
+
+void mp_set_init(mp_set_t *set, mp_uint_t n) {
+ set->alloc = n;
+ set->used = 0;
+ set->table = m_new0(mp_obj_t, set->alloc);
+}
+
+STATIC void mp_set_rehash(mp_set_t *set) {
+ mp_uint_t old_alloc = set->alloc;
+ mp_obj_t *old_table = set->table;
+ set->alloc = get_doubling_prime_greater_or_equal_to(set->alloc + 1);
+ set->used = 0;
+ set->table = m_new0(mp_obj_t, set->alloc);
+ for (mp_uint_t i = 0; i < old_alloc; i++) {
+ if (old_table[i] != MP_OBJ_NULL && old_table[i] != MP_OBJ_SENTINEL) {
+ mp_set_lookup(set, old_table[i], MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ }
+ }
+ m_del(mp_obj_t, old_table, old_alloc);
+}
+
+mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind) {
+ // Note: lookup_kind can be MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND which
+ // is handled by using bitwise operations.
+
+ if (set->alloc == 0) {
+ if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ mp_set_rehash(set);
+ } else {
+ return MP_OBJ_NULL;
+ }
+ }
+ mp_uint_t hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index));
+ mp_uint_t pos = hash % set->alloc;
+ mp_uint_t start_pos = pos;
+ mp_obj_t *avail_slot = NULL;
+ for (;;) {
+ mp_obj_t elem = set->table[pos];
+ if (elem == MP_OBJ_NULL) {
+ // found NULL slot, so index is not in table
+ if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ if (avail_slot == NULL) {
+ avail_slot = &set->table[pos];
+ }
+ set->used++;
+ *avail_slot = index;
+ return index;
+ } else {
+ return MP_OBJ_NULL;
+ }
+ } else if (elem == MP_OBJ_SENTINEL) {
+ // found deleted slot, remember for later
+ if (avail_slot == NULL) {
+ avail_slot = &set->table[pos];
+ }
+ } else if (mp_obj_equal(elem, index)) {
+ // found index
+ if (lookup_kind & MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ // delete element
+ set->used--;
+ if (set->table[(pos + 1) % set->alloc] == MP_OBJ_NULL) {
+ // optimisation if next slot is empty
+ set->table[pos] = MP_OBJ_NULL;
+ } else {
+ set->table[pos] = MP_OBJ_SENTINEL;
+ }
+ }
+ return elem;
+ }
+
+ // not yet found, keep searching in this table
+ pos = (pos + 1) % set->alloc;
+
+ if (pos == start_pos) {
+ // search got back to starting position, so index is not in table
+ if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ if (avail_slot != NULL) {
+ // there was an available slot, so use that
+ set->used++;
+ *avail_slot = index;
+ return index;
+ } else {
+ // not enough room in table, rehash it
+ mp_set_rehash(set);
+ // restart the search for the new element
+ start_pos = pos = hash % set->alloc;
+ }
+ } else {
+ return MP_OBJ_NULL;
+ }
+ }
+ }
+}
+
+mp_obj_t mp_set_remove_first(mp_set_t *set) {
+ for (mp_uint_t pos = 0; pos < set->alloc; pos++) {
+ if (MP_SET_SLOT_IS_FILLED(set, pos)) {
+ mp_obj_t elem = set->table[pos];
+ // delete element
+ set->used--;
+ if (set->table[(pos + 1) % set->alloc] == MP_OBJ_NULL) {
+ // optimisation if next slot is empty
+ set->table[pos] = MP_OBJ_NULL;
+ } else {
+ set->table[pos] = MP_OBJ_SENTINEL;
+ }
+ return elem;
+ }
+ }
+ return MP_OBJ_NULL;
+}
+
+void mp_set_clear(mp_set_t *set) {
+ m_del(mp_obj_t, set->table, set->alloc);
+ set->alloc = 0;
+ set->used = 0;
+ set->table = NULL;
+}
+
+#endif // MICROPY_PY_BUILTINS_SET
+
+#if defined(DEBUG_PRINT) && DEBUG_PRINT
+void mp_map_dump(mp_map_t *map) {
+ for (mp_uint_t i = 0; i < map->alloc; i++) {
+ if (map->table[i].key != NULL) {
+ mp_obj_print(map->table[i].key, PRINT_REPR);
+ } else {
+ printf("(nil)");
+ }
+ printf(": %p\n", map->table[i].value);
+ }
+ printf("---\n");
+}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/misc.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,225 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_MISC_H__
+#define __MICROPY_INCLUDED_PY_MISC_H__
+
+// a mini library of useful types and functions
+
+/** types *******************************************************/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stddef.h>
+
+typedef unsigned char byte;
+typedef unsigned int uint;
+
+/** generic ops *************************************************/
+
+#ifndef MIN
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#endif
+#ifndef MAX
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#endif
+
+/** memomry allocation ******************************************/
+
+// TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element)
+
+#define m_new(type, num) ((type*)(m_malloc(sizeof(type) * (num))))
+#define m_new_maybe(type, num) ((type*)(m_malloc_maybe(sizeof(type) * (num))))
+#define m_new0(type, num) ((type*)(m_malloc0(sizeof(type) * (num))))
+#define m_new_obj(type) (m_new(type, 1))
+#define m_new_obj_maybe(type) (m_new_maybe(type, 1))
+#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type*)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num)))
+#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type*)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num)))
+#if MICROPY_ENABLE_FINALISER
+#define m_new_obj_with_finaliser(type) ((type*)(m_malloc_with_finaliser(sizeof(type))))
+#else
+#define m_new_obj_with_finaliser(type) m_new_obj(type)
+#endif
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#define m_renew(type, ptr, old_num, new_num) ((type*)(m_realloc((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num))))
+#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type*)(m_realloc_maybe((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num), (allow_move))))
+#define m_del(type, ptr, num) m_free(ptr, sizeof(type) * (num))
+#define m_del_var(obj_type, var_type, var_num, ptr) (m_free(ptr, sizeof(obj_type) + sizeof(var_type) * (var_num)))
+#else
+#define m_renew(type, ptr, old_num, new_num) ((type*)(m_realloc((ptr), sizeof(type) * (new_num))))
+#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type*)(m_realloc_maybe((ptr), sizeof(type) * (new_num), (allow_move))))
+#define m_del(type, ptr, num) ((void)(num), m_free(ptr))
+#define m_del_var(obj_type, var_type, var_num, ptr) ((void)(var_num), m_free(ptr))
+#endif
+#define m_del_obj(type, ptr) (m_del(type, ptr, 1))
+
+void *m_malloc(size_t num_bytes);
+void *m_malloc_maybe(size_t num_bytes);
+void *m_malloc_with_finaliser(size_t num_bytes);
+void *m_malloc0(size_t num_bytes);
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes);
+void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move);
+void m_free(void *ptr, size_t num_bytes);
+#else
+void *m_realloc(void *ptr, size_t new_num_bytes);
+void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move);
+void m_free(void *ptr);
+#endif
+void *m_malloc_fail(size_t num_bytes);
+
+#if MICROPY_MEM_STATS
+size_t m_get_total_bytes_allocated(void);
+size_t m_get_current_bytes_allocated(void);
+size_t m_get_peak_bytes_allocated(void);
+#endif
+
+/** array helpers ***********************************************/
+
+// get the number of elements in a fixed-size array
+#define MP_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+// align ptr to the nearest multiple of "alignment"
+#define MP_ALIGN(ptr, alignment) (void*)(((uintptr_t)(ptr) + ((alignment) - 1)) & ~((alignment) - 1))
+
+/** unichar / UTF-8 *********************************************/
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+// with unicode enabled we need a type which can fit chars up to 0x10ffff
+typedef uint32_t unichar;
+#else
+// without unicode enabled we can only need to fit chars up to 0xff
+// (on 16-bit archs uint is 16-bits and more efficient than uint32_t)
+typedef uint unichar;
+#endif
+
+unichar utf8_get_char(const byte *s);
+const byte *utf8_next_char(const byte *s);
+
+bool unichar_isspace(unichar c);
+bool unichar_isalpha(unichar c);
+bool unichar_isprint(unichar c);
+bool unichar_isdigit(unichar c);
+bool unichar_isxdigit(unichar c);
+bool unichar_isident(unichar c);
+bool unichar_isupper(unichar c);
+bool unichar_islower(unichar c);
+unichar unichar_tolower(unichar c);
+unichar unichar_toupper(unichar c);
+mp_uint_t unichar_xdigit_value(unichar c);
+mp_uint_t unichar_charlen(const char *str, mp_uint_t len);
+#define UTF8_IS_NONASCII(ch) ((ch) & 0x80)
+#define UTF8_IS_CONT(ch) (((ch) & 0xC0) == 0x80)
+
+/** variable string *********************************************/
+
+typedef struct _vstr_t {
+ size_t alloc;
+ size_t len;
+ char *buf;
+ bool had_error : 1;
+ bool fixed_buf : 1;
+} vstr_t;
+
+// convenience macro to declare a vstr with a fixed size buffer on the stack
+#define VSTR_FIXED(vstr, alloc) vstr_t vstr; char vstr##_buf[(alloc)]; vstr_init_fixed_buf(&vstr, (alloc), vstr##_buf);
+
+void vstr_init(vstr_t *vstr, size_t alloc);
+void vstr_init_len(vstr_t *vstr, size_t len);
+void vstr_init_fixed_buf(vstr_t *vstr, size_t alloc, char *buf);
+struct _mp_print_t;
+void vstr_init_print(vstr_t *vstr, size_t alloc, struct _mp_print_t *print);
+void vstr_clear(vstr_t *vstr);
+vstr_t *vstr_new(void);
+vstr_t *vstr_new_size(size_t alloc);
+void vstr_free(vstr_t *vstr);
+void vstr_reset(vstr_t *vstr);
+bool vstr_had_error(vstr_t *vstr);
+char *vstr_str(vstr_t *vstr);
+size_t vstr_len(vstr_t *vstr);
+void vstr_hint_size(vstr_t *vstr, size_t size);
+char *vstr_extend(vstr_t *vstr, size_t size);
+char *vstr_add_len(vstr_t *vstr, size_t len);
+char *vstr_null_terminated_str(vstr_t *vstr);
+void vstr_add_byte(vstr_t *vstr, byte v);
+void vstr_add_char(vstr_t *vstr, unichar chr);
+void vstr_add_str(vstr_t *vstr, const char *str);
+void vstr_add_strn(vstr_t *vstr, const char *str, size_t len);
+void vstr_ins_byte(vstr_t *vstr, size_t byte_pos, byte b);
+void vstr_ins_char(vstr_t *vstr, size_t char_pos, unichar chr);
+void vstr_cut_head_bytes(vstr_t *vstr, size_t bytes_to_cut);
+void vstr_cut_tail_bytes(vstr_t *vstr, size_t bytes_to_cut);
+void vstr_cut_out_bytes(vstr_t *vstr, size_t byte_pos, size_t bytes_to_cut);
+void vstr_printf(vstr_t *vstr, const char *fmt, ...);
+
+/** non-dynamic size-bounded variable buffer/string *************/
+
+#define CHECKBUF(buf, max_size) char buf[max_size + 1]; size_t buf##_len = max_size; char *buf##_p = buf;
+#define CHECKBUF_RESET(buf, max_size) buf##_len = max_size; buf##_p = buf;
+#define CHECKBUF_APPEND(buf, src, src_len) \
+ { size_t l = MIN(src_len, buf##_len); \
+ memcpy(buf##_p, src, l); \
+ buf##_len -= l; \
+ buf##_p += l; }
+#define CHECKBUF_APPEND_0(buf) { *buf##_p = 0; }
+#define CHECKBUF_LEN(buf) (buf##_p - buf)
+
+#ifdef va_start
+void vstr_vprintf(vstr_t *vstr, const char *fmt, va_list ap);
+#endif
+
+// Debugging helpers
+int DEBUG_printf(const char *fmt, ...);
+
+extern mp_uint_t mp_verbose_flag;
+
+// This is useful for unicode handling. Some CPU archs has
+// special instructions for efficient implentation of this
+// function (e.g. CLZ on ARM).
+// NOTE: this function is unused at the moment
+#ifndef count_lead_ones
+static inline mp_uint_t count_lead_ones(byte val) {
+ mp_uint_t c = 0;
+ for (byte mask = 0x80; val & mask; mask >>= 1) {
+ c++;
+ }
+ return c;
+}
+#endif
+
+/** float internals *************/
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define MP_FLOAT_EXP_BITS (11)
+#define MP_FLOAT_FRAC_BITS (52)
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define MP_FLOAT_EXP_BITS (8)
+#define MP_FLOAT_FRAC_BITS (23)
+#endif
+#define MP_FLOAT_EXP_BIAS ((1 << (MP_FLOAT_EXP_BITS - 1)) - 1)
+#endif // MICROPY_PY_BUILTINS_FLOAT
+
+#endif // __MICROPY_INCLUDED_PY_MISC_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mkenv.mk Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,66 @@
+ifneq ($(lastword a b),b)
+$(error These Makefiles require make 3.81 or newer)
+endif
+
+# Set TOP to be the path to get from the current directory (where make was
+# invoked) to the top of the tree. $(lastword $(MAKEFILE_LIST)) returns
+# the name of this makefile relative to where make was invoked.
+#
+# We assume that this file is in the py directory so we use $(dir ) twice
+# to get to the top of the tree.
+
+THIS_MAKEFILE := $(lastword $(MAKEFILE_LIST))
+TOP := $(patsubst %/py/mkenv.mk,%,$(THIS_MAKEFILE))
+
+# Turn on increased build verbosity by defining BUILD_VERBOSE in your main
+# Makefile or in your environment. You can also use V=1 on the make command
+# line.
+
+ifeq ("$(origin V)", "command line")
+BUILD_VERBOSE=$(V)
+endif
+ifndef BUILD_VERBOSE
+BUILD_VERBOSE = 0
+endif
+ifeq ($(BUILD_VERBOSE),0)
+Q = @
+else
+Q =
+endif
+# Since this is a new feature, advertise it
+ifeq ($(BUILD_VERBOSE),0)
+$(info Use make V=1 or set BUILD_VERBOSE in your environment to increase build verbosity.)
+endif
+
+# default settings; can be overriden in main Makefile
+
+PY_SRC ?= $(TOP)/py
+BUILD ?= build
+
+RM = rm
+ECHO = @echo
+CP = cp
+MKDIR = mkdir
+SED = sed
+PYTHON = python
+
+AS = $(CROSS_COMPILE)as
+CC = $(CROSS_COMPILE)gcc
+CXX = $(CROSS_COMPILE)g++
+LD = $(CROSS_COMPILE)ld
+OBJCOPY = $(CROSS_COMPILE)objcopy
+SIZE = $(CROSS_COMPILE)size
+STRIP = $(CROSS_COMPILE)strip
+AR = $(CROSS_COMPILE)ar
+ifeq ($(MICROPY_FORCE_32BIT),1)
+CC += -m32
+CXX += -m32
+LD += -m32
+endif
+
+all:
+.PHONY: all
+
+.DELETE_ON_ERROR:
+
+MKENV_INCLUDED = 1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/mkrules.mk Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,119 @@ +ifneq ($(MKENV_INCLUDED),1) +# We assume that mkenv is in the same directory as this file. +THIS_MAKEFILE = $(lastword $(MAKEFILE_LIST)) +include $(dir $(THIS_MAKEFILE))mkenv.mk +endif + +# This file expects that OBJ contains a list of all of the object files. +# The directory portion of each object file is used to locate the source +# and should not contain any ..'s but rather be relative to the top of the +# tree. +# +# So for example, py/map.c would have an object file name py/map.o +# The object files will go into the build directory and mantain the same +# directory structure as the source tree. So the final dependency will look +# like this: +# +# build/py/map.o: py/map.c +# +# We set vpath to point to the top of the tree so that the source files +# can be located. By following this scheme, it allows a single build rule +# to be used to compile all .c files. + +vpath %.S . $(TOP) +$(BUILD)/%.o: %.S + $(ECHO) "CC $<" + $(Q)$(CC) $(CFLAGS) -c -o $@ $< + +vpath %.s . $(TOP) +$(BUILD)/%.o: %.s + $(ECHO) "AS $<" + $(Q)$(AS) -o $@ $< + +define compile_c +$(ECHO) "CC $<" +$(Q)$(CC) $(CFLAGS) -c -MD -o $@ $< +@# The following fixes the dependency file. +@# See http://make.paulandlesley.org/autodep.html for details. +@# Regex adjusted from the above to play better with Windows paths, etc. +@$(CP) $(@:.o=.d) $(@:.o=.P); \ + $(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \ + -e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \ + $(RM) -f $(@:.o=.d) +endef + +vpath %.c . $(TOP) +$(BUILD)/%.o: %.c + $(call compile_c) + +$(BUILD)/%.pp: %.c + $(ECHO) "PreProcess $<" + $(Q)$(CC) $(CFLAGS) -E -Wp,-C,-dD,-dI -o $@ $< + +# The following rule uses | to create an order only prereuisite. Order only +# prerequisites only get built if they don't exist. They don't cause timestamp +# checking to be performed. +# +# We don't know which source files actually need the generated.h (since +# it is #included from str.h). The compiler generated dependencies will cause +# the right .o's to get recompiled if the generated.h file changes. Adding +# an order-only dependendency to all of the .o's will cause the generated .h +# to get built before we try to compile any of them. +$(OBJ): | $(HEADER_BUILD)/qstrdefs.generated.h $(HEADER_BUILD)/mpversion.h + +# $(sort $(var)) removes duplicates +# +# The net effect of this, is it causes the objects to depend on the +# object directories (but only for existence), and the object directories +# will be created if they don't exist. +OBJ_DIRS = $(sort $(dir $(OBJ))) +$(OBJ): | $(OBJ_DIRS) +$(OBJ_DIRS): + $(MKDIR) -p $@ + +$(HEADER_BUILD): + $(MKDIR) -p $@ + +ifneq ($(PROG),) +# Build a standalone executable (unix does this) + +all: $(PROG) + +$(PROG): $(OBJ) + $(ECHO) "LINK $@" +# Do not pass COPT here - it's *C* compiler optimizations. For example, +# we may want to compile using Thumb, but link with non-Thumb libc. + $(Q)$(CC) -o $@ $^ $(LIB) $(LDFLAGS) +ifndef DEBUG + $(Q)$(STRIP) $(STRIPFLAGS_EXTRA) $(PROG) +endif + $(Q)$(SIZE) $(PROG) + +lib: $(OBJ) + $(AR) rcs libmicropython.a $^ + +clean: clean-prog +clean-prog: + $(RM) -f $(PROG) + $(RM) -f $(PROG).map + +.PHONY: clean-prog +endif + +clean: + $(RM) -rf $(BUILD) +.PHONY: clean + +print-cfg: + $(ECHO) "PY_SRC = $(PY_SRC)" + $(ECHO) "BUILD = $(BUILD)" + $(ECHO) "OBJ = $(OBJ)" +.PHONY: print-cfg + +print-def: + @$(ECHO) "The following defines are built into the $(CC) compiler" + touch __empty__.c + @$(CC) -E -Wp,-dM __empty__.c + @$(RM) -f __empty__.c + +-include $(OBJ:.o=.P)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modarray.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,44 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_ARRAY
+
+STATIC const mp_rom_map_elem_t mp_module_array_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_array) },
+ { MP_ROM_QSTR(MP_QSTR_array), MP_ROM_PTR(&mp_type_array) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_array_globals, mp_module_array_globals_table);
+
+const mp_obj_module_t mp_module_array = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_array,
+ .globals = (mp_obj_dict_t*)&mp_module_array_globals,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modbuiltins.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,731 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/objtype.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stream.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_PY_IO
+extern struct _mp_dummy_t mp_sys_stdout_obj; // type is irrelevant, just need pointer
+#endif
+
+// args[0] is function from class body
+// args[1] is class name
+// args[2:] are base objects
+STATIC mp_obj_t mp_builtin___build_class__(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args);
+
+ // set the new classes __locals__ object
+ mp_obj_dict_t *old_locals = mp_locals_get();
+ mp_obj_t class_locals = mp_obj_new_dict(0);
+ mp_locals_set(MP_OBJ_TO_PTR(class_locals));
+
+ // call the class code
+ mp_obj_t cell = mp_call_function_0(args[0]);
+
+ // restore old __locals__ object
+ mp_locals_set(old_locals);
+
+ // get the class type (meta object) from the base objects
+ mp_obj_t meta;
+ if (n_args == 2) {
+ // no explicit bases, so use 'type'
+ meta = MP_OBJ_FROM_PTR(&mp_type_type);
+ } else {
+ // use type of first base object
+ meta = MP_OBJ_FROM_PTR(mp_obj_get_type(args[2]));
+ }
+
+ // TODO do proper metaclass resolution for multiple base objects
+
+ // create the new class using a call to the meta object
+ mp_obj_t meta_args[3];
+ meta_args[0] = args[1]; // class name
+ meta_args[1] = mp_obj_new_tuple(n_args - 2, args + 2); // tuple of bases
+ meta_args[2] = class_locals; // dict of members
+ mp_obj_t new_class = mp_call_function_n_kw(meta, 3, 0, meta_args);
+
+ // store into cell if neede
+ if (cell != mp_const_none) {
+ mp_obj_cell_set(cell, new_class);
+ }
+
+ return new_class;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR(mp_builtin___build_class___obj, 2, mp_builtin___build_class__);
+
+STATIC mp_obj_t mp_builtin_abs(mp_obj_t o_in) {
+ if (0) {
+ // dummy
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(o_in)) {
+ mp_float_t value = mp_obj_float_get(o_in);
+ // TODO check for NaN etc
+ if (value < 0) {
+ return mp_obj_new_float(-value);
+ } else {
+ return o_in;
+ }
+#if MICROPY_PY_BUILTINS_COMPLEX
+ } else if (MP_OBJ_IS_TYPE(o_in, &mp_type_complex)) {
+ mp_float_t real, imag;
+ mp_obj_complex_get(o_in, &real, &imag);
+ return mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(real*real + imag*imag));
+#endif
+#endif
+ } else {
+ // this will raise a TypeError if the argument is not integral
+ return mp_obj_int_abs(o_in);
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_abs_obj, mp_builtin_abs);
+
+STATIC mp_obj_t mp_builtin_all(mp_obj_t o_in) {
+ mp_obj_t iterable = mp_getiter(o_in);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (!mp_obj_is_true(item)) {
+ return mp_const_false;
+ }
+ }
+ return mp_const_true;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_all_obj, mp_builtin_all);
+
+STATIC mp_obj_t mp_builtin_any(mp_obj_t o_in) {
+ mp_obj_t iterable = mp_getiter(o_in);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_obj_is_true(item)) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_any_obj, mp_builtin_any);
+
+STATIC mp_obj_t mp_builtin_bin(mp_obj_t o_in) {
+ mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_b_brace_close_), o_in };
+ return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_bin_obj, mp_builtin_bin);
+
+STATIC mp_obj_t mp_builtin_callable(mp_obj_t o_in) {
+ if (mp_obj_is_callable(o_in)) {
+ return mp_const_true;
+ } else {
+ return mp_const_false;
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_callable_obj, mp_builtin_callable);
+
+STATIC mp_obj_t mp_builtin_chr(mp_obj_t o_in) {
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ mp_uint_t c = mp_obj_get_int(o_in);
+ char str[4];
+ int len = 0;
+ if (c < 0x80) {
+ *str = c; len = 1;
+ } else if (c < 0x800) {
+ str[0] = (c >> 6) | 0xC0;
+ str[1] = (c & 0x3F) | 0x80;
+ len = 2;
+ } else if (c < 0x10000) {
+ str[0] = (c >> 12) | 0xE0;
+ str[1] = ((c >> 6) & 0x3F) | 0x80;
+ str[2] = (c & 0x3F) | 0x80;
+ len = 3;
+ } else if (c < 0x110000) {
+ str[0] = (c >> 18) | 0xF0;
+ str[1] = ((c >> 12) & 0x3F) | 0x80;
+ str[2] = ((c >> 6) & 0x3F) | 0x80;
+ str[3] = (c & 0x3F) | 0x80;
+ len = 4;
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "chr() arg not in range(0x110000)"));
+ }
+ return mp_obj_new_str(str, len, true);
+ #else
+ mp_int_t ord = mp_obj_get_int(o_in);
+ if (0 <= ord && ord <= 0xff) {
+ char str[1] = {ord};
+ return mp_obj_new_str(str, 1, true);
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "chr() arg not in range(256)"));
+ }
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_chr_obj, mp_builtin_chr);
+
+STATIC mp_obj_t mp_builtin_dir(size_t n_args, const mp_obj_t *args) {
+ // TODO make this function more general and less of a hack
+
+ mp_obj_dict_t *dict = NULL;
+ mp_map_t *members = NULL;
+ if (n_args == 0) {
+ // make a list of names in the local name space
+ dict = mp_locals_get();
+ } else { // n_args == 1
+ // make a list of names in the given object
+ if (MP_OBJ_IS_TYPE(args[0], &mp_type_module)) {
+ dict = mp_obj_module_get_globals(args[0]);
+ } else {
+ mp_obj_type_t *type;
+ if (MP_OBJ_IS_TYPE(args[0], &mp_type_type)) {
+ type = MP_OBJ_TO_PTR(args[0]);
+ } else {
+ type = mp_obj_get_type(args[0]);
+ }
+ if (type->locals_dict != NULL && type->locals_dict->base.type == &mp_type_dict) {
+ dict = type->locals_dict;
+ }
+ }
+ if (mp_obj_is_instance_type(mp_obj_get_type(args[0]))) {
+ mp_obj_instance_t *inst = MP_OBJ_TO_PTR(args[0]);
+ members = &inst->members;
+ }
+ }
+
+ mp_obj_t dir = mp_obj_new_list(0, NULL);
+ if (dict != NULL) {
+ for (mp_uint_t i = 0; i < dict->map.alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) {
+ mp_obj_list_append(dir, dict->map.table[i].key);
+ }
+ }
+ }
+ if (members != NULL) {
+ for (mp_uint_t i = 0; i < members->alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(members, i)) {
+ mp_obj_list_append(dir, members->table[i].key);
+ }
+ }
+ }
+ return dir;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_dir_obj, 0, 1, mp_builtin_dir);
+
+STATIC mp_obj_t mp_builtin_divmod(mp_obj_t o1_in, mp_obj_t o2_in) {
+ return mp_binary_op(MP_BINARY_OP_DIVMOD, o1_in, o2_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_divmod_obj, mp_builtin_divmod);
+
+STATIC mp_obj_t mp_builtin_hash(mp_obj_t o_in) {
+ // result is guaranteed to be a (small) int
+ return mp_unary_op(MP_UNARY_OP_HASH, o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hash_obj, mp_builtin_hash);
+
+STATIC mp_obj_t mp_builtin_hex(mp_obj_t o_in) {
+ return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_x), o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hex_obj, mp_builtin_hex);
+
+STATIC mp_obj_t mp_builtin_iter(mp_obj_t o_in) {
+ return mp_getiter(o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_iter_obj, mp_builtin_iter);
+
+#if MICROPY_PY_BUILTINS_MIN_MAX
+
+STATIC mp_obj_t mp_builtin_min_max(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs, mp_uint_t op) {
+ mp_map_elem_t *key_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_key), MP_MAP_LOOKUP);
+ mp_map_elem_t *default_elem;
+ mp_obj_t key_fn = key_elem == NULL ? MP_OBJ_NULL : key_elem->value;
+ if (n_args == 1) {
+ // given an iterable
+ mp_obj_t iterable = mp_getiter(args[0]);
+ mp_obj_t best_key = MP_OBJ_NULL;
+ mp_obj_t best_obj = MP_OBJ_NULL;
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_t key = key_fn == MP_OBJ_NULL ? item : mp_call_function_1(key_fn, item);
+ if (best_obj == MP_OBJ_NULL || (mp_binary_op(op, key, best_key) == mp_const_true)) {
+ best_key = key;
+ best_obj = item;
+ }
+ }
+ if (best_obj == MP_OBJ_NULL) {
+ default_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_default), MP_MAP_LOOKUP);
+ if (default_elem != NULL) {
+ best_obj = default_elem->value;
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "arg is an empty sequence"));
+ }
+ }
+ return best_obj;
+ } else {
+ // given many args
+ mp_obj_t best_key = MP_OBJ_NULL;
+ mp_obj_t best_obj = MP_OBJ_NULL;
+ for (mp_uint_t i = 0; i < n_args; i++) {
+ mp_obj_t key = key_fn == MP_OBJ_NULL ? args[i] : mp_call_function_1(key_fn, args[i]);
+ if (best_obj == MP_OBJ_NULL || (mp_binary_op(op, key, best_key) == mp_const_true)) {
+ best_key = key;
+ best_obj = args[i];
+ }
+ }
+ return best_obj;
+ }
+}
+
+STATIC mp_obj_t mp_builtin_max(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ return mp_builtin_min_max(n_args, args, kwargs, MP_BINARY_OP_MORE);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_max_obj, 1, mp_builtin_max);
+
+STATIC mp_obj_t mp_builtin_min(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ return mp_builtin_min_max(n_args, args, kwargs, MP_BINARY_OP_LESS);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_min_obj, 1, mp_builtin_min);
+
+#endif
+
+STATIC mp_obj_t mp_builtin_next(mp_obj_t o) {
+ mp_obj_t ret = mp_iternext_allow_raise(o);
+ if (ret == MP_OBJ_STOP_ITERATION) {
+ nlr_raise(mp_obj_new_exception(&mp_type_StopIteration));
+ } else {
+ return ret;
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_next_obj, mp_builtin_next);
+
+STATIC mp_obj_t mp_builtin_oct(mp_obj_t o_in) {
+ return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_o), o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_oct_obj, mp_builtin_oct);
+
+STATIC mp_obj_t mp_builtin_ord(mp_obj_t o_in) {
+ mp_uint_t len;
+ const char *str = mp_obj_str_get_data(o_in, &len);
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ if (MP_OBJ_IS_STR(o_in)) {
+ len = unichar_charlen(str, len);
+ if (len == 1) {
+ if (!UTF8_IS_NONASCII(*str)) {
+ goto return_first_byte;
+ }
+ mp_int_t ord = *str++ & 0x7F;
+ for (mp_int_t mask = 0x40; ord & mask; mask >>= 1) {
+ ord &= ~mask;
+ }
+ while (UTF8_IS_CONT(*str)) {
+ ord = (ord << 6) | (*str++ & 0x3F);
+ }
+ return mp_obj_new_int(ord);
+ }
+ } else {
+ // a bytes object
+ if (len == 1) {
+ return_first_byte:
+ return MP_OBJ_NEW_SMALL_INT(((const byte*)str)[0]);
+ }
+ }
+ #else
+ if (len == 1) {
+ // don't sign extend when converting to ord
+ return mp_obj_new_int(((const byte*)str)[0]);
+ }
+ #endif
+
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "ord expects a character"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "ord() expected a character, but string of length %d found", (int)len));
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_ord_obj, mp_builtin_ord);
+
+STATIC mp_obj_t mp_builtin_pow(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args && n_args <= 3);
+ switch (n_args) {
+ case 2: return mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]);
+ default: return mp_binary_op(MP_BINARY_OP_MODULO, mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]), args[2]); // TODO optimise...
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj, 2, 3, mp_builtin_pow);
+
+STATIC mp_obj_t mp_builtin_print(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ mp_map_elem_t *sep_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_sep), MP_MAP_LOOKUP);
+ mp_map_elem_t *end_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_end), MP_MAP_LOOKUP);
+ const char *sep_data = " ";
+ mp_uint_t sep_len = 1;
+ const char *end_data = "\n";
+ mp_uint_t end_len = 1;
+ if (sep_elem != NULL && sep_elem->value != mp_const_none) {
+ sep_data = mp_obj_str_get_data(sep_elem->value, &sep_len);
+ }
+ if (end_elem != NULL && end_elem->value != mp_const_none) {
+ end_data = mp_obj_str_get_data(end_elem->value, &end_len);
+ }
+ #if MICROPY_PY_IO
+ void *stream_obj = &mp_sys_stdout_obj;
+ mp_map_elem_t *file_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_file), MP_MAP_LOOKUP);
+ if (file_elem != NULL && file_elem->value != mp_const_none) {
+ stream_obj = MP_OBJ_TO_PTR(file_elem->value); // XXX may not be a concrete object
+ }
+
+ mp_print_t print = {stream_obj, mp_stream_write_adaptor};
+ #endif
+ for (mp_uint_t i = 0; i < n_args; i++) {
+ if (i > 0) {
+ #if MICROPY_PY_IO
+ mp_stream_write_adaptor(stream_obj, sep_data, sep_len);
+ #else
+ mp_print_strn(&mp_plat_print, sep_data, sep_len, 0, 0, 0);
+ #endif
+ }
+ #if MICROPY_PY_IO
+ mp_obj_print_helper(&print, args[i], PRINT_STR);
+ #else
+ mp_obj_print_helper(&mp_plat_print, args[i], PRINT_STR);
+ #endif
+ }
+ #if MICROPY_PY_IO
+ mp_stream_write_adaptor(stream_obj, end_data, end_len);
+ #else
+ mp_print_strn(&mp_plat_print, end_data, end_len, 0, 0, 0);
+ #endif
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_print_obj, 0, mp_builtin_print);
+
+STATIC mp_obj_t mp_builtin___repl_print__(mp_obj_t o) {
+ if (o != mp_const_none) {
+ #if MICROPY_PY_IO
+ mp_obj_print_helper(&mp_sys_stdout_print, o, PRINT_REPR);
+ mp_print_str(&mp_sys_stdout_print, "\n");
+ #else
+ mp_obj_print_helper(&mp_plat_print, o, PRINT_REPR);
+ mp_print_str(&mp_plat_print, "\n");
+ #endif
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ mp_obj_t dest[2] = {MP_OBJ_SENTINEL, o};
+ mp_type_module.attr(MP_OBJ_FROM_PTR(&mp_module_builtins), MP_QSTR__, dest);
+ #endif
+ }
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin___repl_print___obj, mp_builtin___repl_print__);
+
+STATIC mp_obj_t mp_builtin_repr(mp_obj_t o_in) {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+ mp_obj_print_helper(&print, o_in, PRINT_REPR);
+ return mp_obj_new_str_from_vstr(&mp_type_str, &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_repr_obj, mp_builtin_repr);
+
+STATIC mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t o_in = args[0];
+ if (MP_OBJ_IS_INT(o_in)) {
+ return o_in;
+ }
+#if MICROPY_PY_BUILTINS_FLOAT
+ mp_int_t num_dig = 0;
+ if (n_args > 1) {
+ num_dig = mp_obj_get_int(args[1]);
+ mp_float_t val = mp_obj_get_float(o_in);
+ mp_float_t mult = MICROPY_FLOAT_C_FUN(pow)(10, num_dig);
+ // TODO may lead to overflow
+ mp_float_t rounded = MICROPY_FLOAT_C_FUN(round)(val * mult) / mult;
+ return mp_obj_new_float(rounded);
+ }
+ mp_float_t val = mp_obj_get_float(o_in);
+ mp_float_t rounded = MICROPY_FLOAT_C_FUN(round)(val);
+ mp_int_t r = rounded;
+ // make rounded value even if it was halfway between ints
+ if (val - rounded == 0.5) {
+ r = (r + 1) & (~1);
+ } else if (val - rounded == -0.5) {
+ r &= ~1;
+ }
+ if (n_args > 1) {
+ return mp_obj_new_float(r);
+ }
+#else
+ mp_int_t r = mp_obj_get_int(o_in);
+#endif
+ return mp_obj_new_int(r);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_round_obj, 1, 2, mp_builtin_round);
+
+STATIC mp_obj_t mp_builtin_sum(size_t n_args, const mp_obj_t *args) {
+ assert(1 <= n_args && n_args <= 2);
+ mp_obj_t value;
+ switch (n_args) {
+ case 1: value = MP_OBJ_NEW_SMALL_INT(0); break;
+ default: value = args[1]; break;
+ }
+ mp_obj_t iterable = mp_getiter(args[0]);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ value = mp_binary_op(MP_BINARY_OP_ADD, value, item);
+ }
+ return value;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_sum_obj, 1, 2, mp_builtin_sum);
+
+STATIC mp_obj_t mp_builtin_sorted(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ assert(n_args >= 1);
+ if (n_args > 1) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "must use keyword argument for key function"));
+ }
+ mp_obj_t self = mp_type_list.make_new(&mp_type_list, 1, 0, args);
+ mp_obj_list_sort(1, &self, kwargs);
+
+ return self;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_sorted_obj, 1, mp_builtin_sorted);
+
+// See mp_load_attr() if making any changes
+STATIC inline mp_obj_t mp_load_attr_default(mp_obj_t base, qstr attr, mp_obj_t defval) {
+ mp_obj_t dest[2];
+ // use load_method, raising or not raising exception
+ ((defval == MP_OBJ_NULL) ? mp_load_method : mp_load_method_maybe)(base, attr, dest);
+ if (dest[0] == MP_OBJ_NULL) {
+ return defval;
+ } else if (dest[1] == MP_OBJ_NULL) {
+ // load_method returned just a normal attribute
+ return dest[0];
+ } else {
+ // load_method returned a method, so build a bound method object
+ return mp_obj_new_bound_meth(dest[0], dest[1]);
+ }
+}
+
+STATIC mp_obj_t mp_builtin_getattr(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t defval = MP_OBJ_NULL;
+ if (n_args > 2) {
+ defval = args[2];
+ }
+ return mp_load_attr_default(args[0], mp_obj_str_get_qstr(args[1]), defval);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_getattr_obj, 2, 3, mp_builtin_getattr);
+
+STATIC mp_obj_t mp_builtin_setattr(mp_obj_t base, mp_obj_t attr, mp_obj_t value) {
+ mp_store_attr(base, mp_obj_str_get_qstr(attr), value);
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_3(mp_builtin_setattr_obj, mp_builtin_setattr);
+
+STATIC mp_obj_t mp_builtin_hasattr(mp_obj_t object_in, mp_obj_t attr_in) {
+ qstr attr = mp_obj_str_get_qstr(attr_in);
+
+ mp_obj_t dest[2];
+ // TODO: https://docs.python.org/3/library/functions.html?highlight=hasattr#hasattr
+ // explicitly says "This is implemented by calling getattr(object, name) and seeing
+ // whether it raises an AttributeError or not.", so we should explicitly wrap this
+ // in nlr_push and handle exception.
+ mp_load_method_maybe(object_in, attr, dest);
+
+ return mp_obj_new_bool(dest[0] != MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_hasattr_obj, mp_builtin_hasattr);
+
+STATIC mp_obj_t mp_builtin_globals(void) {
+ return MP_OBJ_FROM_PTR(mp_globals_get());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_globals_obj, mp_builtin_globals);
+
+STATIC mp_obj_t mp_builtin_locals(void) {
+ return MP_OBJ_FROM_PTR(mp_locals_get());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_locals_obj, mp_builtin_locals);
+
+// These are defined in terms of MicroPython API functions right away
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_id_obj, mp_obj_id);
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_len_obj, mp_obj_len);
+
+STATIC const mp_rom_map_elem_t mp_module_builtins_globals_table[] = {
+ // built-in core functions
+ { MP_ROM_QSTR(MP_QSTR___build_class__), MP_ROM_PTR(&mp_builtin___build_class___obj) },
+ { MP_ROM_QSTR(MP_QSTR___import__), MP_ROM_PTR(&mp_builtin___import___obj) },
+ { MP_ROM_QSTR(MP_QSTR___repl_print__), MP_ROM_PTR(&mp_builtin___repl_print___obj) },
+
+ // built-in types
+ { MP_ROM_QSTR(MP_QSTR_bool), MP_ROM_PTR(&mp_type_bool) },
+ { MP_ROM_QSTR(MP_QSTR_bytes), MP_ROM_PTR(&mp_type_bytes) },
+ #if MICROPY_PY_BUILTINS_BYTEARRAY
+ { MP_ROM_QSTR(MP_QSTR_bytearray), MP_ROM_PTR(&mp_type_bytearray) },
+ #endif
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ { MP_ROM_QSTR(MP_QSTR_complex), MP_ROM_PTR(&mp_type_complex) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_dict), MP_ROM_PTR(&mp_type_dict) },
+ #if MICROPY_PY_BUILTINS_ENUMERATE
+ { MP_ROM_QSTR(MP_QSTR_enumerate), MP_ROM_PTR(&mp_type_enumerate) },
+ #endif
+ #if MICROPY_PY_BUILTINS_FILTER
+ { MP_ROM_QSTR(MP_QSTR_filter), MP_ROM_PTR(&mp_type_filter) },
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ { MP_ROM_QSTR(MP_QSTR_float), MP_ROM_PTR(&mp_type_float) },
+ #endif
+ #if MICROPY_PY_BUILTINS_SET && MICROPY_PY_BUILTINS_FROZENSET
+ { MP_ROM_QSTR(MP_QSTR_frozenset), MP_ROM_PTR(&mp_type_frozenset) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_int), MP_ROM_PTR(&mp_type_int) },
+ { MP_ROM_QSTR(MP_QSTR_list), MP_ROM_PTR(&mp_type_list) },
+ { MP_ROM_QSTR(MP_QSTR_map), MP_ROM_PTR(&mp_type_map) },
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ { MP_ROM_QSTR(MP_QSTR_memoryview), MP_ROM_PTR(&mp_type_memoryview) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_object), MP_ROM_PTR(&mp_type_object) },
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ { MP_ROM_QSTR(MP_QSTR_property), MP_ROM_PTR(&mp_type_property) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_range), MP_ROM_PTR(&mp_type_range) },
+ #if MICROPY_PY_BUILTINS_REVERSED
+ { MP_ROM_QSTR(MP_QSTR_reversed), MP_ROM_PTR(&mp_type_reversed) },
+ #endif
+ #if MICROPY_PY_BUILTINS_SET
+ { MP_ROM_QSTR(MP_QSTR_set), MP_ROM_PTR(&mp_type_set) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_str), MP_ROM_PTR(&mp_type_str) },
+ { MP_ROM_QSTR(MP_QSTR_super), MP_ROM_PTR(&mp_type_super) },
+ { MP_ROM_QSTR(MP_QSTR_tuple), MP_ROM_PTR(&mp_type_tuple) },
+ { MP_ROM_QSTR(MP_QSTR_type), MP_ROM_PTR(&mp_type_type) },
+ { MP_ROM_QSTR(MP_QSTR_zip), MP_ROM_PTR(&mp_type_zip) },
+
+ { MP_ROM_QSTR(MP_QSTR_classmethod), MP_ROM_PTR(&mp_type_classmethod) },
+ { MP_ROM_QSTR(MP_QSTR_staticmethod), MP_ROM_PTR(&mp_type_staticmethod) },
+
+ // built-in objects
+ { MP_ROM_QSTR(MP_QSTR_Ellipsis), MP_ROM_PTR(&mp_const_ellipsis_obj) },
+ #if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+ { MP_ROM_QSTR(MP_QSTR_NotImplemented), MP_ROM_PTR(&mp_const_notimplemented_obj) },
+ #endif
+
+ // built-in user functions
+ { MP_ROM_QSTR(MP_QSTR_abs), MP_ROM_PTR(&mp_builtin_abs_obj) },
+ { MP_ROM_QSTR(MP_QSTR_all), MP_ROM_PTR(&mp_builtin_all_obj) },
+ { MP_ROM_QSTR(MP_QSTR_any), MP_ROM_PTR(&mp_builtin_any_obj) },
+ { MP_ROM_QSTR(MP_QSTR_bin), MP_ROM_PTR(&mp_builtin_bin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_callable), MP_ROM_PTR(&mp_builtin_callable_obj) },
+ #if MICROPY_PY_BUILTINS_COMPILE
+ { MP_ROM_QSTR(MP_QSTR_compile), MP_ROM_PTR(&mp_builtin_compile_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_chr), MP_ROM_PTR(&mp_builtin_chr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_dir), MP_ROM_PTR(&mp_builtin_dir_obj) },
+ { MP_ROM_QSTR(MP_QSTR_divmod), MP_ROM_PTR(&mp_builtin_divmod_obj) },
+ #if MICROPY_PY_BUILTINS_EVAL_EXEC
+ { MP_ROM_QSTR(MP_QSTR_eval), MP_ROM_PTR(&mp_builtin_eval_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exec), MP_ROM_PTR(&mp_builtin_exec_obj) },
+ #endif
+ #if MICROPY_PY_BUILTINS_EXECFILE
+ { MP_ROM_QSTR(MP_QSTR_execfile), MP_ROM_PTR(&mp_builtin_execfile_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_getattr), MP_ROM_PTR(&mp_builtin_getattr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_setattr), MP_ROM_PTR(&mp_builtin_setattr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_globals), MP_ROM_PTR(&mp_builtin_globals_obj) },
+ { MP_ROM_QSTR(MP_QSTR_hasattr), MP_ROM_PTR(&mp_builtin_hasattr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_hash), MP_ROM_PTR(&mp_builtin_hash_obj) },
+ { MP_ROM_QSTR(MP_QSTR_hex), MP_ROM_PTR(&mp_builtin_hex_obj) },
+ { MP_ROM_QSTR(MP_QSTR_id), MP_ROM_PTR(&mp_builtin_id_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isinstance), MP_ROM_PTR(&mp_builtin_isinstance_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issubclass), MP_ROM_PTR(&mp_builtin_issubclass_obj) },
+ { MP_ROM_QSTR(MP_QSTR_iter), MP_ROM_PTR(&mp_builtin_iter_obj) },
+ { MP_ROM_QSTR(MP_QSTR_len), MP_ROM_PTR(&mp_builtin_len_obj) },
+ { MP_ROM_QSTR(MP_QSTR_locals), MP_ROM_PTR(&mp_builtin_locals_obj) },
+ #if MICROPY_PY_BUILTINS_MIN_MAX
+ { MP_ROM_QSTR(MP_QSTR_max), MP_ROM_PTR(&mp_builtin_max_obj) },
+ { MP_ROM_QSTR(MP_QSTR_min), MP_ROM_PTR(&mp_builtin_min_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_next), MP_ROM_PTR(&mp_builtin_next_obj) },
+ { MP_ROM_QSTR(MP_QSTR_oct), MP_ROM_PTR(&mp_builtin_oct_obj) },
+ { MP_ROM_QSTR(MP_QSTR_ord), MP_ROM_PTR(&mp_builtin_ord_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pow), MP_ROM_PTR(&mp_builtin_pow_obj) },
+ { MP_ROM_QSTR(MP_QSTR_print), MP_ROM_PTR(&mp_builtin_print_obj) },
+ { MP_ROM_QSTR(MP_QSTR_repr), MP_ROM_PTR(&mp_builtin_repr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_round), MP_ROM_PTR(&mp_builtin_round_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sorted), MP_ROM_PTR(&mp_builtin_sorted_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sum), MP_ROM_PTR(&mp_builtin_sum_obj) },
+
+ // built-in exceptions
+ { MP_ROM_QSTR(MP_QSTR_BaseException), MP_ROM_PTR(&mp_type_BaseException) },
+ { MP_ROM_QSTR(MP_QSTR_ArithmeticError), MP_ROM_PTR(&mp_type_ArithmeticError) },
+ { MP_ROM_QSTR(MP_QSTR_AssertionError), MP_ROM_PTR(&mp_type_AssertionError) },
+ { MP_ROM_QSTR(MP_QSTR_AttributeError), MP_ROM_PTR(&mp_type_AttributeError) },
+ { MP_ROM_QSTR(MP_QSTR_EOFError), MP_ROM_PTR(&mp_type_EOFError) },
+ { MP_ROM_QSTR(MP_QSTR_Exception), MP_ROM_PTR(&mp_type_Exception) },
+ { MP_ROM_QSTR(MP_QSTR_GeneratorExit), MP_ROM_PTR(&mp_type_GeneratorExit) },
+ { MP_ROM_QSTR(MP_QSTR_ImportError), MP_ROM_PTR(&mp_type_ImportError) },
+ { MP_ROM_QSTR(MP_QSTR_IndentationError), MP_ROM_PTR(&mp_type_IndentationError) },
+ { MP_ROM_QSTR(MP_QSTR_IndexError), MP_ROM_PTR(&mp_type_IndexError) },
+ { MP_ROM_QSTR(MP_QSTR_KeyboardInterrupt), MP_ROM_PTR(&mp_type_KeyboardInterrupt) },
+ { MP_ROM_QSTR(MP_QSTR_KeyError), MP_ROM_PTR(&mp_type_KeyError) },
+ { MP_ROM_QSTR(MP_QSTR_LookupError), MP_ROM_PTR(&mp_type_LookupError) },
+ { MP_ROM_QSTR(MP_QSTR_MemoryError), MP_ROM_PTR(&mp_type_MemoryError) },
+ { MP_ROM_QSTR(MP_QSTR_NameError), MP_ROM_PTR(&mp_type_NameError) },
+ { MP_ROM_QSTR(MP_QSTR_NotImplementedError), MP_ROM_PTR(&mp_type_NotImplementedError) },
+ { MP_ROM_QSTR(MP_QSTR_OSError), MP_ROM_PTR(&mp_type_OSError) },
+ { MP_ROM_QSTR(MP_QSTR_OverflowError), MP_ROM_PTR(&mp_type_OverflowError) },
+ { MP_ROM_QSTR(MP_QSTR_RuntimeError), MP_ROM_PTR(&mp_type_RuntimeError) },
+ { MP_ROM_QSTR(MP_QSTR_StopIteration), MP_ROM_PTR(&mp_type_StopIteration) },
+ { MP_ROM_QSTR(MP_QSTR_SyntaxError), MP_ROM_PTR(&mp_type_SyntaxError) },
+ { MP_ROM_QSTR(MP_QSTR_SystemExit), MP_ROM_PTR(&mp_type_SystemExit) },
+ { MP_ROM_QSTR(MP_QSTR_TypeError), MP_ROM_PTR(&mp_type_TypeError) },
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ { MP_ROM_QSTR(MP_QSTR_UnicodeError), MP_ROM_PTR(&mp_type_UnicodeError) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_ValueError), MP_ROM_PTR(&mp_type_ValueError) },
+ #if MICROPY_EMIT_NATIVE
+ { MP_ROM_QSTR(MP_QSTR_ViperTypeError), MP_ROM_PTR(&mp_type_ViperTypeError) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_ZeroDivisionError), MP_ROM_PTR(&mp_type_ZeroDivisionError) },
+ // Somehow CPython managed to have OverflowError not inherit from ValueError ;-/
+ // TODO: For MICROPY_CPYTHON_COMPAT==0 use ValueError to avoid exc proliferation
+
+ // Extra builtins as defined by a port
+ MICROPY_PORT_BUILTINS
+};
+
+MP_DEFINE_CONST_DICT(mp_module_builtins_globals, mp_module_builtins_globals_table);
+
+const mp_obj_module_t mp_module_builtins = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_builtins,
+ .globals = (mp_obj_dict_t*)&mp_module_builtins_globals,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modcmath.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,167 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH
+
+#include <math.h>
+
+/// \module cmath - mathematical functions for complex numbers
+///
+/// The `cmath` module provides some basic mathematical funtions for
+/// working with complex numbers.
+
+/// \function phase(z)
+/// Returns the phase of the number `z`, in the range (-pi, +pi].
+STATIC mp_obj_t mp_cmath_phase(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_phase_obj, mp_cmath_phase);
+
+/// \function polar(z)
+/// Returns, as a tuple, the polar form of `z`.
+STATIC mp_obj_t mp_cmath_polar(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ mp_obj_t tuple[2] = {
+ mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(real*real + imag*imag)),
+ mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real)),
+ };
+ return mp_obj_new_tuple(2, tuple);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_polar_obj, mp_cmath_polar);
+
+/// \function rect(r, phi)
+/// Returns the complex number with modulus `r` and phase `phi`.
+STATIC mp_obj_t mp_cmath_rect(mp_obj_t r_obj, mp_obj_t phi_obj) {
+ mp_float_t r = mp_obj_get_float(r_obj);
+ mp_float_t phi = mp_obj_get_float(phi_obj);
+ return mp_obj_new_complex(r * MICROPY_FLOAT_C_FUN(cos)(phi), r * MICROPY_FLOAT_C_FUN(sin)(phi));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_cmath_rect_obj, mp_cmath_rect);
+
+/// \function exp(z)
+/// Return the exponential of `z`.
+STATIC mp_obj_t mp_cmath_exp(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ mp_float_t exp_real = MICROPY_FLOAT_C_FUN(exp)(real);
+ return mp_obj_new_complex(exp_real * MICROPY_FLOAT_C_FUN(cos)(imag), exp_real * MICROPY_FLOAT_C_FUN(sin)(imag));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_exp_obj, mp_cmath_exp);
+
+/// \function log(z)
+/// Return the natural logarithm of `z`. The branch cut is along the negative real axis.
+// TODO can take second argument, being the base
+STATIC mp_obj_t mp_cmath_log(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(0.5 * MICROPY_FLOAT_C_FUN(log)(real*real + imag*imag), MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log_obj, mp_cmath_log);
+
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+/// \function log10(z)
+/// Return the base-10 logarithm of `z`. The branch cut is along the negative real axis.
+STATIC mp_obj_t mp_cmath_log10(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(0.5 * MICROPY_FLOAT_C_FUN(log10)(real*real + imag*imag), 0.4342944819032518 * MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log10_obj, mp_cmath_log10);
+#endif
+
+/// \function sqrt(z)
+/// Return the square-root of `z`.
+STATIC mp_obj_t mp_cmath_sqrt(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ mp_float_t sqrt_abs = MICROPY_FLOAT_C_FUN(pow)(real*real + imag*imag, 0.25);
+ mp_float_t theta = 0.5 * MICROPY_FLOAT_C_FUN(atan2)(imag, real);
+ return mp_obj_new_complex(sqrt_abs * MICROPY_FLOAT_C_FUN(cos)(theta), sqrt_abs * MICROPY_FLOAT_C_FUN(sin)(theta));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sqrt_obj, mp_cmath_sqrt);
+
+/// \function cos(z)
+/// Return the cosine of `z`.
+STATIC mp_obj_t mp_cmath_cos(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(MICROPY_FLOAT_C_FUN(cos)(real) * MICROPY_FLOAT_C_FUN(cosh)(imag), -MICROPY_FLOAT_C_FUN(sin)(real) * MICROPY_FLOAT_C_FUN(sinh)(imag));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_cos_obj, mp_cmath_cos);
+
+/// \function sin(z)
+/// Return the sine of `z`.
+STATIC mp_obj_t mp_cmath_sin(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(MICROPY_FLOAT_C_FUN(sin)(real) * MICROPY_FLOAT_C_FUN(cosh)(imag), MICROPY_FLOAT_C_FUN(cos)(real) * MICROPY_FLOAT_C_FUN(sinh)(imag));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sin_obj, mp_cmath_sin);
+
+STATIC const mp_rom_map_elem_t mp_module_cmath_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_cmath) },
+ { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e },
+ { MP_ROM_QSTR(MP_QSTR_pi), mp_const_float_pi },
+ { MP_ROM_QSTR(MP_QSTR_phase), MP_ROM_PTR(&mp_cmath_phase_obj) },
+ { MP_ROM_QSTR(MP_QSTR_polar), MP_ROM_PTR(&mp_cmath_polar_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&mp_cmath_rect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exp), MP_ROM_PTR(&mp_cmath_exp_obj) },
+ { MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_cmath_log_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_cmath_log10_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_cmath_sqrt_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_cmath_acos_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_cmath_asin_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_cmath_atan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_cmath_cos_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_cmath_sin_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_cmath_tan_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_cmath_acosh_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_cmath_asinh_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_cmath_atanh_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_cmath_cosh_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_cmath_sinh_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_cmath_tanh_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_cmath_isfinite_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_cmath_isinf_obj) },
+ //{ MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_cmath_isnan_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_cmath_globals, mp_module_cmath_globals_table);
+
+const mp_obj_module_t mp_module_cmath = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_cmath,
+ .globals = (mp_obj_dict_t*)&mp_module_cmath_globals,
+};
+
+#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_CMATH
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modcollections.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,47 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_COLLECTIONS
+
+STATIC const mp_rom_map_elem_t mp_module_collections_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR__collections) },
+ { MP_ROM_QSTR(MP_QSTR_namedtuple), MP_ROM_PTR(&mp_namedtuple_obj) },
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ { MP_ROM_QSTR(MP_QSTR_OrderedDict), MP_ROM_PTR(&mp_type_ordereddict) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_collections_globals, mp_module_collections_globals_table);
+
+const mp_obj_module_t mp_module_collections = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR__collections,
+ .globals = (mp_obj_dict_t*)&mp_module_collections_globals,
+};
+
+#endif // MICROPY_PY_COLLECTIONS
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modgc.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,104 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+#include "py/obj.h"
+#include "py/gc.h"
+
+#if MICROPY_PY_GC && MICROPY_ENABLE_GC
+
+/// \module gc - control the garbage collector
+
+/// \function collect()
+/// Run a garbage collection.
+STATIC mp_obj_t py_gc_collect(void) {
+ gc_collect();
+#if MICROPY_PY_GC_COLLECT_RETVAL
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_MEM(gc_collected));
+#else
+ return mp_const_none;
+#endif
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_collect_obj, py_gc_collect);
+
+/// \function disable()
+/// Disable the garbage collector.
+STATIC mp_obj_t gc_disable(void) {
+ MP_STATE_MEM(gc_auto_collect_enabled) = 0;
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_disable_obj, gc_disable);
+
+/// \function enable()
+/// Enable the garbage collector.
+STATIC mp_obj_t gc_enable(void) {
+ MP_STATE_MEM(gc_auto_collect_enabled) = 1;
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_enable_obj, gc_enable);
+
+STATIC mp_obj_t gc_isenabled(void) {
+ return mp_obj_new_bool(MP_STATE_MEM(gc_auto_collect_enabled));
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_isenabled_obj, gc_isenabled);
+
+/// \function mem_free()
+/// Return the number of bytes of available heap RAM.
+STATIC mp_obj_t gc_mem_free(void) {
+ gc_info_t info;
+ gc_info(&info);
+ return MP_OBJ_NEW_SMALL_INT(info.free);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_free_obj, gc_mem_free);
+
+/// \function mem_alloc()
+/// Return the number of bytes of heap RAM that are allocated.
+STATIC mp_obj_t gc_mem_alloc(void) {
+ gc_info_t info;
+ gc_info(&info);
+ return MP_OBJ_NEW_SMALL_INT(info.used);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_alloc_obj, gc_mem_alloc);
+
+STATIC const mp_rom_map_elem_t mp_module_gc_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_gc) },
+ { MP_ROM_QSTR(MP_QSTR_collect), MP_ROM_PTR(&gc_collect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_disable), MP_ROM_PTR(&gc_disable_obj) },
+ { MP_ROM_QSTR(MP_QSTR_enable), MP_ROM_PTR(&gc_enable_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isenabled), MP_ROM_PTR(&gc_isenabled_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_free), MP_ROM_PTR(&gc_mem_free_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_alloc), MP_ROM_PTR(&gc_mem_alloc_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_gc_globals, mp_module_gc_globals_table);
+
+const mp_obj_module_t mp_module_gc = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_gc,
+ .globals = (mp_obj_dict_t*)&mp_module_gc_globals,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modio.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,154 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stream.h"
+
+#if MICROPY_PY_IO
+
+extern const mp_obj_type_t mp_type_fileio;
+extern const mp_obj_type_t mp_type_textio;
+
+#if MICROPY_PY_IO_BUFFEREDWRITER
+typedef struct _mp_obj_bufwriter_t {
+ mp_obj_base_t base;
+ mp_obj_t stream;
+ size_t alloc;
+ size_t len;
+ byte buf[0];
+} mp_obj_bufwriter_t;
+
+STATIC mp_obj_t bufwriter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ size_t alloc = mp_obj_get_int(args[1]);
+ mp_obj_bufwriter_t *o = m_new_obj_var(mp_obj_bufwriter_t, byte, alloc);
+ o->base.type = type;
+ o->stream = args[0];
+ o->alloc = alloc;
+ o->len = 0;
+ return o;
+}
+
+STATIC mp_uint_t bufwriter_write(mp_obj_t self_in, const void *buf, mp_uint_t size, int *errcode) {
+ mp_obj_bufwriter_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_uint_t org_size = size;
+
+ while (size > 0) {
+ mp_uint_t rem = self->alloc - self->len;
+ if (size < rem) {
+ memcpy(self->buf + self->len, buf, size);
+ self->len += size;
+ return org_size;
+ }
+
+ // Buffer flushing policy here is to flush entire buffer all the time.
+ // This allows e.g. to have a block device as backing storage and write
+ // entire block to it. memcpy below is not ideal and could be optimized
+ // in some cases. But the way it is now it at least ensures that buffer
+ // is word-aligned, to guard against obscure cases when it matters, e.g.
+ // https://github.com/micropython/micropython/issues/1863
+ memcpy(self->buf + self->len, buf, rem);
+ buf = (byte*)buf + rem;
+ size -= rem;
+ mp_uint_t out_sz = mp_stream_writeall(self->stream, self->buf, self->alloc, errcode);
+ if (out_sz == MP_STREAM_ERROR) {
+ return MP_STREAM_ERROR;
+ }
+ self->len = 0;
+ }
+
+ return org_size;
+}
+
+STATIC mp_obj_t bufwriter_flush(mp_obj_t self_in) {
+ mp_obj_bufwriter_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->len != 0) {
+ int err;
+ mp_uint_t out_sz = mp_stream_writeall(self->stream, self->buf, self->len, &err);
+ self->len = 0;
+ if (out_sz == MP_STREAM_ERROR) {
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(err)));
+ }
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(bufwriter_flush_obj, bufwriter_flush);
+
+STATIC const mp_map_elem_t bufwriter_locals_dict_table[] = {
+ { MP_OBJ_NEW_QSTR(MP_QSTR_write), (mp_obj_t)&mp_stream_write_obj },
+ { MP_OBJ_NEW_QSTR(MP_QSTR_flush), (mp_obj_t)&bufwriter_flush_obj },
+};
+STATIC MP_DEFINE_CONST_DICT(bufwriter_locals_dict, bufwriter_locals_dict_table);
+
+STATIC const mp_stream_p_t bufwriter_stream_p = {
+ .write = bufwriter_write,
+};
+
+STATIC const mp_obj_type_t bufwriter_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_BufferedWriter,
+ .make_new = bufwriter_make_new,
+ .stream_p = &bufwriter_stream_p,
+ .locals_dict = (mp_obj_t)&bufwriter_locals_dict,
+};
+#endif // MICROPY_PY_IO_BUFFEREDWRITER
+
+STATIC const mp_rom_map_elem_t mp_module_io_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR__io) },
+ // Note: mp_builtin_open_obj should be defined by port, it's not
+ // part of the core.
+ { MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&mp_builtin_open_obj) },
+ #if MICROPY_PY_IO_FILEIO
+ { MP_ROM_QSTR(MP_QSTR_FileIO), MP_ROM_PTR(&mp_type_fileio) },
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_TextIOWrapper), MP_ROM_PTR(&mp_type_textio) },
+ #endif
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_StringIO), MP_ROM_PTR(&mp_type_stringio) },
+ #if MICROPY_PY_IO_BYTESIO
+ { MP_ROM_QSTR(MP_QSTR_BytesIO), MP_ROM_PTR(&mp_type_bytesio) },
+ #endif
+ #if MICROPY_PY_IO_BUFFEREDWRITER
+ { MP_ROM_QSTR(MP_QSTR_BufferedWriter), MP_ROM_PTR(&bufwriter_type) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_io_globals, mp_module_io_globals_table);
+
+const mp_obj_module_t mp_module_io = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR__io,
+ .globals = (mp_obj_dict_t*)&mp_module_io_globals,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modmath.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,270 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+#include "py/nlr.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH
+
+#include <math.h>
+
+/// \module math - mathematical functions
+///
+/// The `math` module provides some basic mathematical funtions for
+/// working with floating-point numbers.
+
+STATIC NORETURN void math_error(void) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "math domain error"));
+}
+
+#define MATH_FUN_1(py_name, c_name) \
+ STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { return mp_obj_new_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj))); } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
+
+#define MATH_FUN_2(py_name, c_name) \
+ STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj, mp_obj_t y_obj) { return mp_obj_new_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj), mp_obj_get_float(y_obj))); } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_## py_name ## _obj, mp_math_ ## py_name);
+
+#define MATH_FUN_1_TO_BOOL(py_name, c_name) \
+ STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { return mp_obj_new_bool(c_name(mp_obj_get_float(x_obj))); } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
+
+#define MATH_FUN_1_TO_INT(py_name, c_name) \
+ STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { mp_int_t x = MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj)); return mp_obj_new_int(x); } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
+
+#define MATH_FUN_1_ERRCOND(py_name, c_name, error_condition) \
+ STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { \
+ mp_float_t x = mp_obj_get_float(x_obj); \
+ if (error_condition) { \
+ math_error(); \
+ } \
+ return mp_obj_new_float(MICROPY_FLOAT_C_FUN(c_name)(x)); \
+ } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
+
+#if MP_NEED_LOG2
+// 1.442695040888963407354163704 is 1/_M_LN2
+#define log2(x) (log(x) * 1.442695040888963407354163704)
+#endif
+
+/// \function sqrt(x)
+/// Returns the square root of `x`.
+MATH_FUN_1_ERRCOND(sqrt, sqrt, (x < (mp_float_t)0.0))
+/// \function pow(x, y)
+/// Returns `x` to the power of `y`.
+MATH_FUN_2(pow, pow)
+/// \function exp(x)
+MATH_FUN_1(exp, exp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+/// \function expm1(x)
+MATH_FUN_1(expm1, expm1)
+/// \function log2(x)
+MATH_FUN_1_ERRCOND(log2, log2, (x <= (mp_float_t)0.0))
+/// \function log10(x)
+MATH_FUN_1_ERRCOND(log10, log10, (x <= (mp_float_t)0.0))
+/// \function cosh(x)
+MATH_FUN_1(cosh, cosh)
+/// \function sinh(x)
+MATH_FUN_1(sinh, sinh)
+/// \function tanh(x)
+MATH_FUN_1(tanh, tanh)
+/// \function acosh(x)
+MATH_FUN_1(acosh, acosh)
+/// \function asinh(x)
+MATH_FUN_1(asinh, asinh)
+/// \function atanh(x)
+MATH_FUN_1(atanh, atanh)
+#endif
+/// \function cos(x)
+MATH_FUN_1(cos, cos)
+/// \function sin(x)
+MATH_FUN_1(sin, sin)
+/// \function tan(x)
+MATH_FUN_1(tan, tan)
+/// \function acos(x)
+MATH_FUN_1(acos, acos)
+/// \function asin(x)
+MATH_FUN_1(asin, asin)
+/// \function atan(x)
+MATH_FUN_1(atan, atan)
+/// \function atan2(y, x)
+MATH_FUN_2(atan2, atan2)
+/// \function ceil(x)
+MATH_FUN_1_TO_INT(ceil, ceil)
+/// \function copysign(x, y)
+MATH_FUN_2(copysign, copysign)
+/// \function fabs(x)
+MATH_FUN_1(fabs, fabs)
+/// \function floor(x)
+MATH_FUN_1_TO_INT(floor, floor) //TODO: delegate to x.__floor__() if x is not a float
+/// \function fmod(x, y)
+MATH_FUN_2(fmod, fmod)
+/// \function isfinite(x)
+MATH_FUN_1_TO_BOOL(isfinite, isfinite)
+/// \function isinf(x)
+MATH_FUN_1_TO_BOOL(isinf, isinf)
+/// \function isnan(x)
+MATH_FUN_1_TO_BOOL(isnan, isnan)
+/// \function trunc(x)
+MATH_FUN_1_TO_INT(trunc, trunc)
+/// \function ldexp(x, exp)
+MATH_FUN_2(ldexp, ldexp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+/// \function erf(x)
+/// Return the error function of `x`.
+MATH_FUN_1(erf, erf)
+/// \function erfc(x)
+/// Return the complementary error function of `x`.
+MATH_FUN_1(erfc, erfc)
+/// \function gamma(x)
+/// Return the gamma function of `x`.
+MATH_FUN_1(gamma, tgamma)
+/// \function lgamma(x)
+/// return the natural logarithm of the gamma function of `x`.
+MATH_FUN_1(lgamma, lgamma)
+#endif
+//TODO: factorial, fsum
+
+// Function that takes a variable number of arguments
+
+// log(x[, base])
+STATIC mp_obj_t mp_math_log(size_t n_args, const mp_obj_t *args) {
+ mp_float_t x = mp_obj_get_float(args[0]);
+ if (x <= (mp_float_t)0.0) {
+ math_error();
+ }
+ mp_float_t l = MICROPY_FLOAT_C_FUN(log)(x);
+ if (n_args == 1) {
+ return mp_obj_new_float(l);
+ } else {
+ mp_float_t base = mp_obj_get_float(args[1]);
+ if (base <= (mp_float_t)0.0) {
+ math_error();
+ }
+ return mp_obj_new_float(l / MICROPY_FLOAT_C_FUN(log)(base));
+ }
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_math_log_obj, 1, 2, mp_math_log);
+
+// Functions that return a tuple
+
+/// \function frexp(x)
+/// Converts a floating-point number to fractional and integral components.
+STATIC mp_obj_t mp_math_frexp(mp_obj_t x_obj) {
+ int int_exponent = 0;
+ mp_float_t significand = MICROPY_FLOAT_C_FUN(frexp)(mp_obj_get_float(x_obj), &int_exponent);
+ mp_obj_t tuple[2];
+ tuple[0] = mp_obj_new_float(significand);
+ tuple[1] = mp_obj_new_int(int_exponent);
+ return mp_obj_new_tuple(2, tuple);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_frexp_obj, mp_math_frexp);
+
+/// \function modf(x)
+STATIC mp_obj_t mp_math_modf(mp_obj_t x_obj) {
+ mp_float_t int_part = 0.0;
+ mp_float_t fractional_part = MICROPY_FLOAT_C_FUN(modf)(mp_obj_get_float(x_obj), &int_part);
+ mp_obj_t tuple[2];
+ tuple[0] = mp_obj_new_float(fractional_part);
+ tuple[1] = mp_obj_new_float(int_part);
+ return mp_obj_new_tuple(2, tuple);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_modf_obj, mp_math_modf);
+
+// Angular conversions
+
+/// \function radians(x)
+STATIC mp_obj_t mp_math_radians(mp_obj_t x_obj) {
+ return mp_obj_new_float(mp_obj_get_float(x_obj) * M_PI / 180.0);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_radians_obj, mp_math_radians);
+
+/// \function degrees(x)
+STATIC mp_obj_t mp_math_degrees(mp_obj_t x_obj) {
+ return mp_obj_new_float(mp_obj_get_float(x_obj) * 180.0 / M_PI);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_degrees_obj, mp_math_degrees);
+
+STATIC const mp_rom_map_elem_t mp_module_math_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_math) },
+ { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e },
+ { MP_ROM_QSTR(MP_QSTR_pi), mp_const_float_pi },
+ { MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_math_sqrt_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pow), MP_ROM_PTR(&mp_math_pow_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exp), MP_ROM_PTR(&mp_math_exp_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_expm1), MP_ROM_PTR(&mp_math_expm1_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_math_log_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_log2), MP_ROM_PTR(&mp_math_log2_obj) },
+ { MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_math_log10_obj) },
+ { MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_math_cosh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_math_sinh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_math_tanh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_math_acosh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_math_asinh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_math_atanh_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_math_cos_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_math_sin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_math_tan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_math_acos_obj) },
+ { MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_math_asin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_math_atan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_atan2), MP_ROM_PTR(&mp_math_atan2_obj) },
+ { MP_ROM_QSTR(MP_QSTR_ceil), MP_ROM_PTR(&mp_math_ceil_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copysign), MP_ROM_PTR(&mp_math_copysign_obj) },
+ { MP_ROM_QSTR(MP_QSTR_fabs), MP_ROM_PTR(&mp_math_fabs_obj) },
+ { MP_ROM_QSTR(MP_QSTR_floor), MP_ROM_PTR(&mp_math_floor_obj) },
+ { MP_ROM_QSTR(MP_QSTR_fmod), MP_ROM_PTR(&mp_math_fmod_obj) },
+ { MP_ROM_QSTR(MP_QSTR_frexp), MP_ROM_PTR(&mp_math_frexp_obj) },
+ { MP_ROM_QSTR(MP_QSTR_ldexp), MP_ROM_PTR(&mp_math_ldexp_obj) },
+ { MP_ROM_QSTR(MP_QSTR_modf), MP_ROM_PTR(&mp_math_modf_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_math_isfinite_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_math_isinf_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_math_isnan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_trunc), MP_ROM_PTR(&mp_math_trunc_obj) },
+ { MP_ROM_QSTR(MP_QSTR_radians), MP_ROM_PTR(&mp_math_radians_obj) },
+ { MP_ROM_QSTR(MP_QSTR_degrees), MP_ROM_PTR(&mp_math_degrees_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_erf), MP_ROM_PTR(&mp_math_erf_obj) },
+ { MP_ROM_QSTR(MP_QSTR_erfc), MP_ROM_PTR(&mp_math_erfc_obj) },
+ { MP_ROM_QSTR(MP_QSTR_gamma), MP_ROM_PTR(&mp_math_gamma_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lgamma), MP_ROM_PTR(&mp_math_lgamma_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_math_globals, mp_module_math_globals_table);
+
+const mp_obj_module_t mp_module_math = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_math,
+ .globals = (mp_obj_dict_t*)&mp_module_math_globals,
+};
+
+#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modmicropython.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,150 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+
+#include "py/mpstate.h"
+#include "py/builtin.h"
+#include "py/stackctrl.h"
+#include "py/gc.h"
+
+// Various builtins specific to MicroPython runtime,
+// living in micropython module
+
+#if MICROPY_PY_MICROPYTHON_MEM_INFO
+
+#if MICROPY_MEM_STATS
+STATIC mp_obj_t mp_micropython_mem_total(void) {
+ return MP_OBJ_NEW_SMALL_INT(m_get_total_bytes_allocated());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_total_obj, mp_micropython_mem_total);
+
+STATIC mp_obj_t mp_micropython_mem_current(void) {
+ return MP_OBJ_NEW_SMALL_INT(m_get_current_bytes_allocated());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_current_obj, mp_micropython_mem_current);
+
+STATIC mp_obj_t mp_micropython_mem_peak(void) {
+ return MP_OBJ_NEW_SMALL_INT(m_get_peak_bytes_allocated());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_peak_obj, mp_micropython_mem_peak);
+#endif
+
+mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args) {
+ (void)args;
+#if MICROPY_MEM_STATS
+ mp_printf(&mp_plat_print, "mem: total=" UINT_FMT ", current=" UINT_FMT ", peak=" UINT_FMT "\n",
+ (mp_uint_t)m_get_total_bytes_allocated(), (mp_uint_t)m_get_current_bytes_allocated(), (mp_uint_t)m_get_peak_bytes_allocated());
+#endif
+#if MICROPY_STACK_CHECK
+ mp_printf(&mp_plat_print, "stack: " UINT_FMT " out of " INT_FMT "\n", mp_stack_usage(), MP_STATE_VM(stack_limit));
+#else
+ mp_printf(&mp_plat_print, "stack: " UINT_FMT "\n", mp_stack_usage());
+#endif
+#if MICROPY_ENABLE_GC
+ gc_dump_info();
+ if (n_args == 1) {
+ // arg given means dump gc allocation table
+ gc_dump_alloc_table();
+ }
+#else
+ (void)n_args;
+#endif
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_mem_info_obj, 0, 1, mp_micropython_mem_info);
+
+STATIC mp_obj_t mp_micropython_qstr_info(size_t n_args, const mp_obj_t *args) {
+ (void)args;
+ size_t n_pool, n_qstr, n_str_data_bytes, n_total_bytes;
+ qstr_pool_info(&n_pool, &n_qstr, &n_str_data_bytes, &n_total_bytes);
+ mp_printf(&mp_plat_print, "qstr pool: n_pool=%u, n_qstr=%u, n_str_data_bytes=%u, n_total_bytes=%u\n",
+ n_pool, n_qstr, n_str_data_bytes, n_total_bytes);
+ if (n_args == 1) {
+ // arg given means dump qstr data
+ qstr_dump_data();
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_qstr_info_obj, 0, 1, mp_micropython_qstr_info);
+
+#if MICROPY_STACK_CHECK
+STATIC mp_obj_t mp_micropython_stack_use(void) {
+ return MP_OBJ_NEW_SMALL_INT(mp_stack_usage());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_stack_use_obj, mp_micropython_stack_use);
+#endif
+
+#endif // MICROPY_PY_MICROPYTHON_MEM_INFO
+
+#if MICROPY_ENABLE_GC
+STATIC mp_obj_t mp_micropython_heap_lock(void) {
+ gc_lock();
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_lock_obj, mp_micropython_heap_lock);
+
+STATIC mp_obj_t mp_micropython_heap_unlock(void) {
+ gc_unlock();
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_unlock_obj, mp_micropython_heap_unlock);
+#endif
+
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_alloc_emergency_exception_buf_obj, mp_alloc_emergency_exception_buf);
+#endif
+
+STATIC const mp_rom_map_elem_t mp_module_micropython_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_micropython) },
+#if MICROPY_PY_MICROPYTHON_MEM_INFO
+#if MICROPY_MEM_STATS
+ { MP_ROM_QSTR(MP_QSTR_mem_total), MP_ROM_PTR(&mp_micropython_mem_total_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_current), MP_ROM_PTR(&mp_micropython_mem_current_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_peak), MP_ROM_PTR(&mp_micropython_mem_peak_obj) },
+#endif
+ { MP_ROM_QSTR(MP_QSTR_mem_info), MP_ROM_PTR(&mp_micropython_mem_info_obj) },
+ { MP_ROM_QSTR(MP_QSTR_qstr_info), MP_ROM_PTR(&mp_micropython_qstr_info_obj) },
+ #if MICROPY_STACK_CHECK
+ { MP_ROM_QSTR(MP_QSTR_stack_use), MP_ROM_PTR(&mp_micropython_stack_use_obj) },
+ #endif
+#endif
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+ { MP_ROM_QSTR(MP_QSTR_alloc_emergency_exception_buf), MP_ROM_PTR(&mp_alloc_emergency_exception_buf_obj) },
+#endif
+ #if MICROPY_ENABLE_GC
+ { MP_ROM_QSTR(MP_QSTR_heap_lock), MP_ROM_PTR(&mp_micropython_heap_lock_obj) },
+ { MP_ROM_QSTR(MP_QSTR_heap_unlock), MP_ROM_PTR(&mp_micropython_heap_unlock_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_micropython_globals, mp_module_micropython_globals_table);
+
+const mp_obj_module_t mp_module_micropython = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_micropython,
+ .globals = (mp_obj_dict_t*)&mp_module_micropython_globals,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modstruct.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,277 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/objtuple.h"
+#include "py/binary.h"
+#include "py/parsenum.h"
+
+#if MICROPY_PY_STRUCT
+
+/*
+ This module implements most of character typecodes from CPython, with
+ some extensions:
+
+ O - (Pointer to) an arbitrary Python object. This is useful for callback
+ data, etc. Note that you must keep reference to passed object in
+ your Python application, otherwise it may be garbage-collected,
+ and then when you get back this value from callback it may be
+ invalid (and lead to crash).
+ S - Pointer to a string (returned as a Python string). Note the
+ difference from "Ns", - the latter says "in this place of structure
+ is character data of up to N bytes length", while "S" means
+ "in this place of a structure is a pointer to zero-terminated
+ character data".
+ */
+
+STATIC char get_fmt_type(const char **fmt) {
+ char t = **fmt;
+ switch (t) {
+ case '!':
+ t = '>';
+ break;
+ case '@':
+ case '=':
+ case '<':
+ case '>':
+ break;
+ default:
+ return '@';
+ }
+ // Skip type char
+ (*fmt)++;
+ return t;
+}
+
+STATIC mp_uint_t get_fmt_num(const char **p) {
+ const char *num = *p;
+ uint len = 1;
+ while (unichar_isdigit(*++num)) {
+ len++;
+ }
+ mp_uint_t val = (mp_uint_t)MP_OBJ_SMALL_INT_VALUE(mp_parse_num_integer(*p, len, 10, NULL));
+ *p = num;
+ return val;
+}
+
+STATIC uint calcsize_items(const char *fmt) {
+ uint cnt = 0;
+ while (*fmt) {
+ int num = 1;
+ if (unichar_isdigit(*fmt)) {
+ num = get_fmt_num(&fmt);
+ if (*fmt == 's') {
+ num = 1;
+ }
+ }
+ cnt += num;
+ fmt++;
+ }
+ return cnt;
+}
+
+STATIC mp_obj_t struct_calcsize(mp_obj_t fmt_in) {
+ const char *fmt = mp_obj_str_get_str(fmt_in);
+ char fmt_type = get_fmt_type(&fmt);
+ mp_uint_t size;
+ for (size = 0; *fmt; fmt++) {
+ mp_uint_t align = 1;
+ mp_uint_t cnt = 1;
+ if (unichar_isdigit(*fmt)) {
+ cnt = get_fmt_num(&fmt);
+ }
+
+ mp_uint_t sz = 0;
+ if (*fmt == 's') {
+ sz = cnt;
+ cnt = 1;
+ }
+
+ while (cnt--) {
+ // If we already have size for 's' case, don't set it again
+ if (sz == 0) {
+ sz = (mp_uint_t)mp_binary_get_size(fmt_type, *fmt, &align);
+ }
+ // TODO
+ assert(sz != (mp_uint_t)-1);
+ // Apply alignment
+ size = (size + align - 1) & ~(align - 1);
+ size += sz;
+ sz = 0;
+ }
+ }
+ return MP_OBJ_NEW_SMALL_INT(size);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(struct_calcsize_obj, struct_calcsize);
+
+STATIC mp_obj_t struct_unpack_from(size_t n_args, const mp_obj_t *args) {
+ // unpack requires that the buffer be exactly the right size.
+ // unpack_from requires that the buffer be "big enough".
+ // Since we implement unpack and unpack_from using the same function
+ // we relax the "exact" requirement, and only implement "big enough".
+ const char *fmt = mp_obj_str_get_str(args[0]);
+ char fmt_type = get_fmt_type(&fmt);
+ uint num_items = calcsize_items(fmt);
+ mp_obj_tuple_t *res = MP_OBJ_TO_PTR(mp_obj_new_tuple(num_items, NULL));
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ);
+ byte *p = bufinfo.buf;
+ byte *end_p = &p[bufinfo.len];
+ mp_int_t offset = 0;
+
+ if (n_args > 2) {
+ // offset arg provided
+ offset = mp_obj_get_int(args[2]);
+ if (offset < 0) {
+ // negative offsets are relative to the end of the buffer
+ offset = bufinfo.len + offset;
+ if (offset < 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "buffer too small"));
+ }
+ }
+ p += offset;
+ }
+
+ for (uint i = 0; i < num_items;) {
+ if (*fmt == '\0') {
+ break;
+ }
+ mp_uint_t sz = 1;
+ if (unichar_isdigit(*fmt)) {
+ sz = get_fmt_num(&fmt);
+ }
+ if (p + sz > end_p) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "buffer too small"));
+ }
+ mp_obj_t item;
+ if (*fmt == 's') {
+ item = mp_obj_new_bytes(p, sz);
+ p += sz;
+ res->items[i++] = item;
+ } else {
+ while (sz--) {
+ item = mp_binary_get_val(fmt_type, *fmt, &p);
+ res->items[i++] = item;
+ }
+ }
+ fmt++;
+ }
+ return MP_OBJ_FROM_PTR(res);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_unpack_from_obj, 2, 3, struct_unpack_from);
+
+STATIC void struct_pack_into_internal(mp_obj_t fmt_in, byte *p, byte* end_p, size_t n_args, const mp_obj_t *args) {
+ const char *fmt = mp_obj_str_get_str(fmt_in);
+ char fmt_type = get_fmt_type(&fmt);
+
+ size_t i;
+ for (i = 0; i < n_args;) {
+ mp_uint_t sz = 1;
+ if (*fmt == '\0') {
+ break;
+ }
+ if (unichar_isdigit(*fmt)) {
+ sz = get_fmt_num(&fmt);
+ }
+ if (p + sz > end_p) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "buffer too small"));
+ }
+
+ if (*fmt == 's') {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[i++], &bufinfo, MP_BUFFER_READ);
+ mp_uint_t to_copy = sz;
+ if (bufinfo.len < to_copy) {
+ to_copy = bufinfo.len;
+ }
+ memcpy(p, bufinfo.buf, to_copy);
+ memset(p + to_copy, 0, sz - to_copy);
+ p += sz;
+ } else {
+ while (sz--) {
+ mp_binary_set_val(fmt_type, *fmt, args[i++], &p);
+ }
+ }
+ fmt++;
+ }
+}
+
+STATIC mp_obj_t struct_pack(size_t n_args, const mp_obj_t *args) {
+ // TODO: "The arguments must match the values required by the format exactly."
+ mp_int_t size = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
+ vstr_t vstr;
+ vstr_init_len(&vstr, size);
+ byte *p = (byte*)vstr.buf;
+ memset(p, 0, size);
+ byte *end_p = &p[size];
+ struct_pack_into_internal(args[0], p, end_p, n_args - 1, &args[1]);
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_pack_obj, 1, MP_OBJ_FUN_ARGS_MAX, struct_pack);
+
+STATIC mp_obj_t struct_pack_into(size_t n_args, const mp_obj_t *args) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_WRITE);
+ mp_int_t offset = mp_obj_get_int(args[2]);
+ if (offset < 0) {
+ // negative offsets are relative to the end of the buffer
+ offset = (mp_int_t)bufinfo.len + offset;
+ if (offset < 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "buffer too small"));
+ }
+ }
+ byte *p = (byte *)bufinfo.buf;
+ byte *end_p = &p[bufinfo.len];
+ p += offset;
+
+ struct_pack_into_internal(args[0], p, end_p, n_args - 3, &args[3]);
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_pack_into_obj, 3, MP_OBJ_FUN_ARGS_MAX, struct_pack_into);
+
+STATIC const mp_rom_map_elem_t mp_module_struct_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_ustruct) },
+ { MP_ROM_QSTR(MP_QSTR_calcsize), MP_ROM_PTR(&struct_calcsize_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pack), MP_ROM_PTR(&struct_pack_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pack_into), MP_ROM_PTR(&struct_pack_into_obj) },
+ { MP_ROM_QSTR(MP_QSTR_unpack), MP_ROM_PTR(&struct_unpack_from_obj) },
+ { MP_ROM_QSTR(MP_QSTR_unpack_from), MP_ROM_PTR(&struct_unpack_from_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_struct_globals, mp_module_struct_globals_table);
+
+const mp_obj_module_t mp_module_ustruct = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_ustruct,
+ .globals = (mp_obj_dict_t*)&mp_module_struct_globals,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/modsys.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,210 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+#include "py/nlr.h"
+#include "py/builtin.h"
+#include "py/objlist.h"
+#include "py/objtuple.h"
+#include "py/objstr.h"
+#include "py/objint.h"
+#include "py/stream.h"
+
+#if MICROPY_PY_SYS
+
+#include "genhdr/mpversion.h"
+
+/// \module sys - system specific functions
+
+// defined per port; type of these is irrelevant, just need pointer
+extern struct _mp_dummy_t mp_sys_stdin_obj;
+extern struct _mp_dummy_t mp_sys_stdout_obj;
+extern struct _mp_dummy_t mp_sys_stderr_obj;
+
+#if MICROPY_PY_IO
+const mp_print_t mp_sys_stdout_print = {&mp_sys_stdout_obj, mp_stream_write_adaptor};
+#endif
+
+/// \constant version - Python language version that this implementation conforms to, as a string
+STATIC const MP_DEFINE_STR_OBJ(version_obj, "3.4.0");
+
+/// \constant version_info - Python language version that this implementation conforms to, as a tuple of ints
+#define I(n) MP_OBJ_NEW_SMALL_INT(n)
+// TODO: CPython is now at 5-element array, but save 2 els so far...
+STATIC const mp_obj_tuple_t mp_sys_version_info_obj = {{&mp_type_tuple}, 3, {I(3), I(4), I(0)}};
+
+// sys.implementation object
+// this holds the MicroPython version
+STATIC const mp_obj_tuple_t mp_sys_implementation_version_info_obj = {
+ {&mp_type_tuple},
+ 3,
+ { I(MICROPY_VERSION_MAJOR), I(MICROPY_VERSION_MINOR), I(MICROPY_VERSION_MICRO) }
+};
+#if MICROPY_PY_ATTRTUPLE
+STATIC const qstr impl_fields[] = { MP_QSTR_name, MP_QSTR_version };
+STATIC MP_DEFINE_ATTRTUPLE(
+ mp_sys_implementation_obj,
+ impl_fields,
+ 2,
+ MP_ROM_QSTR(MP_QSTR_micropython),
+ MP_ROM_PTR(&mp_sys_implementation_version_info_obj)
+);
+#else
+STATIC const mp_obj_tuple_t mp_sys_implementation_obj = {
+ {&mp_type_tuple},
+ 2,
+ {
+ MP_OBJ_NEW_QSTR(MP_QSTR_micropython),
+ (mp_obj_t)&mp_sys_implementation_version_info_obj,
+ }
+};
+#endif
+
+#undef I
+
+#ifdef MICROPY_PY_SYS_PLATFORM
+/// \constant platform - the platform that Micro Python is running on
+STATIC const MP_DEFINE_STR_OBJ(platform_obj, MICROPY_PY_SYS_PLATFORM);
+#endif
+
+/// \function exit([retval])
+/// Raise a `SystemExit` exception. If an argument is given, it is the
+/// value given to `SystemExit`.
+STATIC mp_obj_t mp_sys_exit(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t exc;
+ if (n_args == 0) {
+ exc = mp_obj_new_exception(&mp_type_SystemExit);
+ } else {
+ exc = mp_obj_new_exception_arg1(&mp_type_SystemExit, args[0]);
+ }
+ nlr_raise(exc);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_sys_exit_obj, 0, 1, mp_sys_exit);
+
+STATIC mp_obj_t mp_sys_print_exception(size_t n_args, const mp_obj_t *args) {
+ #if MICROPY_PY_IO
+ void *stream_obj = &mp_sys_stdout_obj;
+ if (n_args > 1) {
+ stream_obj = MP_OBJ_TO_PTR(args[1]); // XXX may fail
+ }
+
+ mp_print_t print = {stream_obj, mp_stream_write_adaptor};
+ mp_obj_print_exception(&print, args[0]);
+ #else
+ (void)n_args;
+ mp_obj_print_exception(&mp_plat_print, args[0]);
+ #endif
+
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_sys_print_exception_obj, 1, 2, mp_sys_print_exception);
+
+#if MICROPY_PY_SYS_EXC_INFO
+STATIC mp_obj_t mp_sys_exc_info(void) {
+ mp_obj_t cur_exc = MP_OBJ_FROM_PTR(MP_STATE_VM(cur_exception));
+ mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(3, NULL));
+
+ if (cur_exc == MP_OBJ_NULL) {
+ t->items[0] = mp_const_none;
+ t->items[1] = mp_const_none;
+ t->items[2] = mp_const_none;
+ return MP_OBJ_FROM_PTR(t);
+ }
+
+ t->items[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(cur_exc));
+ t->items[1] = cur_exc;
+ t->items[2] = mp_const_none;
+ return MP_OBJ_FROM_PTR(t);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_sys_exc_info_obj, mp_sys_exc_info);
+#endif
+
+STATIC const mp_rom_map_elem_t mp_module_sys_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_sys) },
+
+ { MP_ROM_QSTR(MP_QSTR_path), MP_ROM_PTR(&MP_STATE_VM(mp_sys_path_obj)) },
+ { MP_ROM_QSTR(MP_QSTR_argv), MP_ROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)) },
+ { MP_ROM_QSTR(MP_QSTR_version), MP_ROM_PTR(&version_obj) },
+ { MP_ROM_QSTR(MP_QSTR_version_info), MP_ROM_PTR(&mp_sys_version_info_obj) },
+ { MP_ROM_QSTR(MP_QSTR_implementation), MP_ROM_PTR(&mp_sys_implementation_obj) },
+ #ifdef MICROPY_PY_SYS_PLATFORM
+ { MP_ROM_QSTR(MP_QSTR_platform), MP_ROM_PTR(&platform_obj) },
+ #endif
+ /// \constant byteorder - the byte order of the system ("little" or "big")
+ #if MP_ENDIANNESS_LITTLE
+ { MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_little) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_big) },
+ #endif
+
+ #if MICROPY_PY_SYS_MAXSIZE
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+ // INT_MAX is not representable as small int, as we know that small int
+ // takes one bit for tag. So, we have little choice but to provide this
+ // value. Apps also should be careful to not try to compare sys.maxsize
+ // with some number (which may not fit in available int size), but instead
+ // count number of significant bits in sys.maxsize.
+ { MP_ROM_QSTR(MP_QSTR_maxsize), MP_OBJ_NEW_SMALL_INT(INT_MAX >> 1) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_PTR(&mp_maxsize_obj) },
+ #endif
+ #endif
+
+ #if MICROPY_PY_SYS_EXIT
+ // documented per-port
+ { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mp_sys_exit_obj) },
+ #endif
+
+ #if MICROPY_PY_SYS_STDFILES
+ // documented per-port
+ { MP_ROM_QSTR(MP_QSTR_stdin), MP_ROM_PTR(&mp_sys_stdin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_stdout), MP_ROM_PTR(&mp_sys_stdout_obj) },
+ { MP_ROM_QSTR(MP_QSTR_stderr), MP_ROM_PTR(&mp_sys_stderr_obj) },
+ #endif
+
+ #if MICROPY_PY_SYS_MODULES
+ { MP_ROM_QSTR(MP_QSTR_modules), MP_ROM_PTR(&MP_STATE_VM(mp_loaded_modules_dict)) },
+ #endif
+ #if MICROPY_PY_SYS_EXC_INFO
+ { MP_ROM_QSTR(MP_QSTR_exc_info), MP_ROM_PTR(&mp_sys_exc_info_obj) },
+ #endif
+
+ /*
+ * Extensions to CPython
+ */
+
+ { MP_ROM_QSTR(MP_QSTR_print_exception), MP_ROM_PTR(&mp_sys_print_exception_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_sys_globals, mp_module_sys_globals_table);
+
+const mp_obj_module_t mp_module_sys = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR_sys,
+ .globals = (mp_obj_dict_t*)&mp_module_sys_globals,
+};
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpconfig.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1001 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_MPCONFIG_H__
+#define __MICROPY_INCLUDED_PY_MPCONFIG_H__
+
+// This file contains default configuration settings for MicroPython.
+// You can override any of the options below using mpconfigport.h file
+// located in a directory of your port.
+
+// mpconfigport.h is a file containing configuration settings for a
+// particular port. mpconfigport.h is actually a default name for
+// such config, and it can be overriden using MP_CONFIGFILE preprocessor
+// define (you can do that by passing CFLAGS_EXTRA='-DMP_CONFIGFILE="<file.h>"'
+// argument to make when using standard MicroPython makefiles).
+// This is useful to have more than one config per port, for example,
+// release vs debug configs, etc. Note that if you switch from one config
+// to another, you must rebuild from scratch using "-B" switch to make.
+
+#ifdef MP_CONFIGFILE
+#include MP_CONFIGFILE
+#else
+#include <mpconfigport.h>
+#endif
+
+// Any options not explicitly set in mpconfigport.h will get default
+// values below.
+
+/*****************************************************************************/
+/* Object representation */
+
+// A Micro Python object is a machine word having the following form:
+// - xxxx...xxx1 : a small int, bits 1 and above are the value
+// - xxxx...xx10 : a qstr, bits 2 and above are the value
+// - xxxx...xx00 : a pointer to an mp_obj_base_t (unless a fake object)
+#define MICROPY_OBJ_REPR_A (0)
+
+// A Micro Python object is a machine word having the following form:
+// - xxxx...xx01 : a small int, bits 2 and above are the value
+// - xxxx...xx11 : a qstr, bits 2 and above are the value
+// - xxxx...xxx0 : a pointer to an mp_obj_base_t (unless a fake object)
+#define MICROPY_OBJ_REPR_B (1)
+
+// A MicroPython object is a machine word having the following form (called R):
+// - iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int with 31-bit signed value
+// - 01111111 1qqqqqqq qqqqqqqq qqqqq110 str with 20-bit qstr value
+// - s1111111 10000000 00000000 00000010 +/- inf
+// - s1111111 1xxxxxxx xxxxxxxx xxxxx010 nan, x != 0
+// - seeeeeee efffffff ffffffff ffffff10 30-bit fp, e != 0xff
+// - pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
+// Str and float stored as O = R + 0x80800000, retrieved as R = O - 0x80800000.
+// This makes strs easier to encode/decode as they have zeros in the top 9 bits.
+// This scheme only works with 32-bit word size and float enabled.
+
+#define MICROPY_OBJ_REPR_C (2)
+
+// A MicroPython object is a 64-bit word having the following form (called R):
+// - seeeeeee eeeeffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff 64-bit fp, e != 0x7ff
+// - s1111111 11110000 00000000 00000000 00000000 00000000 00000000 00000000 +/- inf
+// - 01111111 11111000 00000000 00000000 00000000 00000000 00000000 00000000 normalised nan
+// - 01111111 11111101 00000000 00000000 iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int
+// - 01111111 11111110 00000000 00000000 qqqqqqqq qqqqqqqq qqqqqqqq qqqqqqq1 str
+// - 01111111 11111100 00000000 00000000 pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
+// Stored as O = R + 0x8004000000000000, retrieved as R = O - 0x8004000000000000.
+// This makes pointers have all zeros in the top 32 bits.
+// Small-ints and strs have 1 as LSB to make sure they don't look like pointers
+// to the garbage collector.
+#define MICROPY_OBJ_REPR_D (3)
+
+#ifndef MICROPY_OBJ_REPR
+#define MICROPY_OBJ_REPR (MICROPY_OBJ_REPR_A)
+#endif
+
+/*****************************************************************************/
+/* Memory allocation policy */
+
+// Number of bytes in memory allocation/GC block. Any size allocated will be
+// rounded up to be multiples of this.
+#ifndef MICROPY_BYTES_PER_GC_BLOCK
+#define MICROPY_BYTES_PER_GC_BLOCK (4 * BYTES_PER_WORD)
+#endif
+
+// Number of words allocated (in BSS) to the GC stack (minimum is 1)
+#ifndef MICROPY_ALLOC_GC_STACK_SIZE
+#define MICROPY_ALLOC_GC_STACK_SIZE (64)
+#endif
+
+// Number of bytes to allocate initially when creating new chunks to store
+// interned string data. Smaller numbers lead to more chunks being needed
+// and more wastage at the end of the chunk. Larger numbers lead to wasted
+// space at the end when no more strings need interning.
+#ifndef MICROPY_ALLOC_QSTR_CHUNK_INIT
+#define MICROPY_ALLOC_QSTR_CHUNK_INIT (128)
+#endif
+
+// Initial amount for lexer indentation level
+#ifndef MICROPY_ALLOC_LEXER_INDENT_INIT
+#define MICROPY_ALLOC_LEXER_INDENT_INIT (10)
+#endif
+
+// Increment for lexer indentation level
+#ifndef MICROPY_ALLOC_LEXEL_INDENT_INC
+#define MICROPY_ALLOC_LEXEL_INDENT_INC (8)
+#endif
+
+// Initial amount for parse rule stack
+#ifndef MICROPY_ALLOC_PARSE_RULE_INIT
+#define MICROPY_ALLOC_PARSE_RULE_INIT (64)
+#endif
+
+// Increment for parse rule stack
+#ifndef MICROPY_ALLOC_PARSE_RULE_INC
+#define MICROPY_ALLOC_PARSE_RULE_INC (16)
+#endif
+
+// Initial amount for parse result stack
+#ifndef MICROPY_ALLOC_PARSE_RESULT_INIT
+#define MICROPY_ALLOC_PARSE_RESULT_INIT (32)
+#endif
+
+// Increment for parse result stack
+#ifndef MICROPY_ALLOC_PARSE_RESULT_INC
+#define MICROPY_ALLOC_PARSE_RESULT_INC (16)
+#endif
+
+// Strings this length or less will be interned by the parser
+#ifndef MICROPY_ALLOC_PARSE_INTERN_STRING_LEN
+#define MICROPY_ALLOC_PARSE_INTERN_STRING_LEN (10)
+#endif
+
+// Number of bytes to allocate initially when creating new chunks to store
+// parse nodes. Small leads to fragmentation, large leads to excess use.
+#ifndef MICROPY_ALLOC_PARSE_CHUNK_INIT
+#define MICROPY_ALLOC_PARSE_CHUNK_INIT (128)
+#endif
+
+// Initial amount for ids in a scope
+#ifndef MICROPY_ALLOC_SCOPE_ID_INIT
+#define MICROPY_ALLOC_SCOPE_ID_INIT (4)
+#endif
+
+// Increment for ids in a scope
+#ifndef MICROPY_ALLOC_SCOPE_ID_INC
+#define MICROPY_ALLOC_SCOPE_ID_INC (6)
+#endif
+
+// Maximum length of a path in the filesystem
+// So we can allocate a buffer on the stack for path manipulation in import
+#ifndef MICROPY_ALLOC_PATH_MAX
+#define MICROPY_ALLOC_PATH_MAX (512)
+#endif
+
+// Initial size of module dict
+#ifndef MICROPY_MODULE_DICT_SIZE
+#define MICROPY_MODULE_DICT_SIZE (1)
+#endif
+
+// Whether realloc/free should be passed allocated memory region size
+// You must enable this if MICROPY_MEM_STATS is enabled
+#ifndef MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#define MICROPY_MALLOC_USES_ALLOCATED_SIZE (0)
+#endif
+
+// Number of bytes used to store qstr length
+// Dictates hard limit on maximum Python identifier length, but 1 byte
+// (limit of 255 bytes in an identifier) should be enough for everyone
+#ifndef MICROPY_QSTR_BYTES_IN_LEN
+#define MICROPY_QSTR_BYTES_IN_LEN (1)
+#endif
+
+// Number of bytes used to store qstr hash
+#ifndef MICROPY_QSTR_BYTES_IN_HASH
+#define MICROPY_QSTR_BYTES_IN_HASH (2)
+#endif
+
+// Avoid using C stack when making Python function calls. C stack still
+// may be used if there's no free heap.
+#ifndef MICROPY_STACKLESS
+#define MICROPY_STACKLESS (0)
+#endif
+
+// Never use C stack when making Python function calls. This may break
+// testsuite as will subtly change which exception is thrown in case
+// of too deep recursion and other similar cases.
+#ifndef MICROPY_STACKLESS_STRICT
+#define MICROPY_STACKLESS_STRICT (0)
+#endif
+
+// Don't use alloca calls. As alloca() is not part of ANSI C, this
+// workaround option is provided for compilers lacking this de-facto
+// standard function. The way it works is allocating from heap, and
+// relying on garbage collection to free it eventually. This is of
+// course much less optimal than real alloca().
+#if defined(MICROPY_NO_ALLOCA) && MICROPY_NO_ALLOCA
+#undef alloca
+#define alloca(x) m_malloc(x)
+#endif
+
+/*****************************************************************************/
+/* Micro Python emitters */
+
+// Whether to support loading of persistent code
+#ifndef MICROPY_PERSISTENT_CODE_LOAD
+#define MICROPY_PERSISTENT_CODE_LOAD (0)
+#endif
+
+// Whether to support saving of persistent code
+#ifndef MICROPY_PERSISTENT_CODE_SAVE
+#define MICROPY_PERSISTENT_CODE_SAVE (0)
+#endif
+
+// Whether generated code can persist independently of the VM/runtime instance
+// This is enabled automatically when needed by other features
+#ifndef MICROPY_PERSISTENT_CODE
+#define MICROPY_PERSISTENT_CODE (MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE)
+#endif
+
+// Whether to emit x64 native code
+#ifndef MICROPY_EMIT_X64
+#define MICROPY_EMIT_X64 (0)
+#endif
+
+// Whether to emit x86 native code
+#ifndef MICROPY_EMIT_X86
+#define MICROPY_EMIT_X86 (0)
+#endif
+
+// Whether to emit thumb native code
+#ifndef MICROPY_EMIT_THUMB
+#define MICROPY_EMIT_THUMB (0)
+#endif
+
+// Whether to enable the thumb inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB
+#define MICROPY_EMIT_INLINE_THUMB (0)
+#endif
+
+// Whether to enable ARMv7-M instruction support in the Thumb2 inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB_ARMV7M
+#define MICROPY_EMIT_INLINE_THUMB_ARMV7M (1)
+#endif
+
+// Whether to enable float support in the Thumb2 inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB_FLOAT
+#define MICROPY_EMIT_INLINE_THUMB_FLOAT (1)
+#endif
+
+// Whether to emit ARM native code
+#ifndef MICROPY_EMIT_ARM
+#define MICROPY_EMIT_ARM (0)
+#endif
+
+// Convenience definition for whether any native emitter is enabled
+#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM)
+
+/*****************************************************************************/
+/* Compiler configuration */
+
+// Whether to include the compiler
+#ifndef MICROPY_ENABLE_COMPILER
+#define MICROPY_ENABLE_COMPILER (1)
+#endif
+
+// Whether the compiler is dynamically configurable (ie at runtime)
+#ifndef MICROPY_DYNAMIC_COMPILER
+#define MICROPY_DYNAMIC_COMPILER (0)
+#endif
+
+// Configure dynamic compiler macros
+#if MICROPY_DYNAMIC_COMPILER
+#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC (mp_dynamic_compiler.opt_cache_map_lookup_in_bytecode)
+#define MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC (mp_dynamic_compiler.py_builtins_str_unicode)
+#else
+#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+#define MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC MICROPY_PY_BUILTINS_STR_UNICODE
+#endif
+
+// Whether to enable constant folding; eg 1+2 rewritten as 3
+#ifndef MICROPY_COMP_CONST_FOLDING
+#define MICROPY_COMP_CONST_FOLDING (1)
+#endif
+
+// Whether to enable lookup of constants in modules; eg module.CONST
+#ifndef MICROPY_COMP_MODULE_CONST
+#define MICROPY_COMP_MODULE_CONST (0)
+#endif
+
+// Whether to enable constant optimisation; id = const(value)
+#ifndef MICROPY_COMP_CONST
+#define MICROPY_COMP_CONST (1)
+#endif
+
+// Whether to enable optimisation of: a, b = c, d
+// Costs 124 bytes (Thumb2)
+#ifndef MICROPY_COMP_DOUBLE_TUPLE_ASSIGN
+#define MICROPY_COMP_DOUBLE_TUPLE_ASSIGN (1)
+#endif
+
+// Whether to enable optimisation of: a, b, c = d, e, f
+// Cost 156 bytes (Thumb2)
+#ifndef MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+#define MICROPY_COMP_TRIPLE_TUPLE_ASSIGN (0)
+#endif
+
+/*****************************************************************************/
+/* Internal debugging stuff */
+
+// Whether to collect memory allocation stats
+#ifndef MICROPY_MEM_STATS
+#define MICROPY_MEM_STATS (0)
+#endif
+
+// Whether to build functions that print debugging info:
+// mp_lexer_show_token
+// mp_bytecode_print
+// mp_parse_node_print
+#ifndef MICROPY_DEBUG_PRINTERS
+#define MICROPY_DEBUG_PRINTERS (0)
+#endif
+
+/*****************************************************************************/
+/* Optimisations */
+
+// Whether to use computed gotos in the VM, or a switch
+// Computed gotos are roughly 10% faster, and increase VM code size by a little
+#ifndef MICROPY_OPT_COMPUTED_GOTO
+#define MICROPY_OPT_COMPUTED_GOTO (0)
+#endif
+
+// Whether to cache result of map lookups in LOAD_NAME, LOAD_GLOBAL, LOAD_ATTR,
+// STORE_ATTR bytecodes. Uses 1 byte extra RAM for each of these opcodes and
+// uses a bit of extra code ROM, but greatly improves lookup speed.
+#ifndef MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE (0)
+#endif
+
+// Whether to use fast versions of bitwise operations (and, or, xor) when the
+// arguments are both positive. Increases Thumb2 code size by about 250 bytes.
+#ifndef MICROPY_OPT_MPZ_BITWISE
+#define MICROPY_OPT_MPZ_BITWISE (0)
+#endif
+
+/*****************************************************************************/
+/* Python internal features */
+
+// Hook for the VM at the start of the opcode loop (can contain variable
+// definitions usable by the other hook functions)
+#ifndef MICROPY_VM_HOOK_INIT
+#define MICROPY_VM_HOOK_INIT
+#endif
+
+// Hook for the VM during the opcode loop (but only after jump opcodes)
+#ifndef MICROPY_VM_HOOK_LOOP
+#define MICROPY_VM_HOOK_LOOP
+#endif
+
+// Hook for the VM just before return opcode is finished being interpreted
+#ifndef MICROPY_VM_HOOK_RETURN
+#define MICROPY_VM_HOOK_RETURN
+#endif
+
+// Whether to include the garbage collector
+#ifndef MICROPY_ENABLE_GC
+#define MICROPY_ENABLE_GC (0)
+#endif
+
+// Whether to enable finalisers in the garbage collector (ie call __del__)
+#ifndef MICROPY_ENABLE_FINALISER
+#define MICROPY_ENABLE_FINALISER (0)
+#endif
+
+// Whether to check C stack usage. C stack used for calling Python functions,
+// etc. Not checking means segfault on overflow.
+#ifndef MICROPY_STACK_CHECK
+#define MICROPY_STACK_CHECK (0)
+#endif
+
+// Whether to have an emergency exception buffer
+#ifndef MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+#define MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF (0)
+#endif
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+# ifndef MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
+# define MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE (0) // 0 - implies dynamic allocation
+# endif
+#endif
+
+// Prefer to raise KeyboardInterrupt asynchronously (from signal or interrupt
+// handler) - if supported by a particular port.
+#ifndef MICROPY_ASYNC_KBD_INTR
+#define MICROPY_ASYNC_KBD_INTR (0)
+#endif
+
+// Whether to include REPL helper function
+#ifndef MICROPY_HELPER_REPL
+#define MICROPY_HELPER_REPL (0)
+#endif
+
+// Whether to include emacs-style readline behavior in REPL
+#ifndef MICROPY_REPL_EMACS_KEYS
+#define MICROPY_REPL_EMACS_KEYS (0)
+#endif
+
+// Whether to implement auto-indent in REPL
+#ifndef MICROPY_REPL_AUTO_INDENT
+#define MICROPY_REPL_AUTO_INDENT (0)
+#endif
+
+// Whether port requires event-driven REPL functions
+#ifndef MICROPY_REPL_EVENT_DRIVEN
+#define MICROPY_REPL_EVENT_DRIVEN (0)
+#endif
+
+// Whether to include lexer helper function for unix
+#ifndef MICROPY_HELPER_LEXER_UNIX
+#define MICROPY_HELPER_LEXER_UNIX (0)
+#endif
+
+// Long int implementation
+#define MICROPY_LONGINT_IMPL_NONE (0)
+#define MICROPY_LONGINT_IMPL_LONGLONG (1)
+#define MICROPY_LONGINT_IMPL_MPZ (2)
+
+#ifndef MICROPY_LONGINT_IMPL
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_NONE)
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+typedef long long mp_longint_impl_t;
+#endif
+
+// Whether to include information in the byte code to determine source
+// line number (increases RAM usage, but doesn't slow byte code execution)
+#ifndef MICROPY_ENABLE_SOURCE_LINE
+#define MICROPY_ENABLE_SOURCE_LINE (0)
+#endif
+
+// Whether to include doc strings (increases RAM usage)
+#ifndef MICROPY_ENABLE_DOC_STRING
+#define MICROPY_ENABLE_DOC_STRING (0)
+#endif
+
+// Exception messages are short static strings
+#define MICROPY_ERROR_REPORTING_TERSE (1)
+// Exception messages provide basic error details
+#define MICROPY_ERROR_REPORTING_NORMAL (2)
+// Exception messages provide full info, e.g. object names
+#define MICROPY_ERROR_REPORTING_DETAILED (3)
+
+#ifndef MICROPY_ERROR_REPORTING
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_NORMAL)
+#endif
+
+// Whether issue warnings during compiling/execution
+#ifndef MICROPY_WARNINGS
+#define MICROPY_WARNINGS (0)
+#endif
+
+// Float and complex implementation
+#define MICROPY_FLOAT_IMPL_NONE (0)
+#define MICROPY_FLOAT_IMPL_FLOAT (1)
+#define MICROPY_FLOAT_IMPL_DOUBLE (2)
+
+#ifndef MICROPY_FLOAT_IMPL
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_NONE)
+#endif
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define MICROPY_PY_BUILTINS_FLOAT (1)
+#define MICROPY_FLOAT_C_FUN(fun) fun##f
+typedef float mp_float_t;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define MICROPY_PY_BUILTINS_FLOAT (1)
+#define MICROPY_FLOAT_C_FUN(fun) fun
+typedef double mp_float_t;
+#else
+#define MICROPY_PY_BUILTINS_FLOAT (0)
+#endif
+
+#ifndef MICROPY_PY_BUILTINS_COMPLEX
+#define MICROPY_PY_BUILTINS_COMPLEX (MICROPY_PY_BUILTINS_FLOAT)
+#endif
+
+// Enable features which improve CPython compatibility
+// but may lead to more code size/memory usage.
+// TODO: Originally intended as generic category to not
+// add bunch of once-off options. May need refactoring later
+#ifndef MICROPY_CPYTHON_COMPAT
+#define MICROPY_CPYTHON_COMPAT (1)
+#endif
+
+// Whether POSIX-semantics non-blocking streams are supported
+#ifndef MICROPY_STREAMS_NON_BLOCK
+#define MICROPY_STREAMS_NON_BLOCK (0)
+#endif
+
+// Whether to call __init__ when importing builtin modules for the first time
+#ifndef MICROPY_MODULE_BUILTIN_INIT
+#define MICROPY_MODULE_BUILTIN_INIT (0)
+#endif
+
+// Whether module weak links are supported
+#ifndef MICROPY_MODULE_WEAK_LINKS
+#define MICROPY_MODULE_WEAK_LINKS (0)
+#endif
+
+// Whether frozen modules are supported
+#ifndef MICROPY_MODULE_FROZEN
+#define MICROPY_MODULE_FROZEN (0)
+#endif
+
+// Whether you can override builtins in the builtins module
+#ifndef MICROPY_CAN_OVERRIDE_BUILTINS
+#define MICROPY_CAN_OVERRIDE_BUILTINS (0)
+#endif
+
+// Whether to check that the "self" argument of a builtin method has the
+// correct type. Such an explicit check is only needed if a builtin
+// method escapes to Python land without a first argument, eg
+// list.append([], 1). Without this check such calls will have undefined
+// behaviour (usually segfault) if the first argument is the wrong type.
+#ifndef MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+#define MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG (1)
+#endif
+
+// Support for user-space VFS mount (selected ports)
+#ifndef MICROPY_FSUSERMOUNT
+#define MICROPY_FSUSERMOUNT (0)
+#endif
+
+/*****************************************************************************/
+/* Fine control over Python builtins, classes, modules, etc */
+
+// Whether to implement attributes on functions
+#ifndef MICROPY_PY_FUNCTION_ATTRS
+#define MICROPY_PY_FUNCTION_ATTRS (0)
+#endif
+
+// Whether to support descriptors (__get__ and __set__)
+// This costs some code size and makes all load attrs and store attrs slow
+#ifndef MICROPY_PY_DESCRIPTORS
+#define MICROPY_PY_DESCRIPTORS (0)
+#endif
+
+// Whether str object is proper unicode
+#ifndef MICROPY_PY_BUILTINS_STR_UNICODE
+#define MICROPY_PY_BUILTINS_STR_UNICODE (0)
+#endif
+
+// Whether str.splitlines() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_SPLITLINES
+#define MICROPY_PY_BUILTINS_STR_SPLITLINES (0)
+#endif
+
+// Whether to support bytearray object
+#ifndef MICROPY_PY_BUILTINS_BYTEARRAY
+#define MICROPY_PY_BUILTINS_BYTEARRAY (1)
+#endif
+
+// Whether to support memoryview object
+#ifndef MICROPY_PY_BUILTINS_MEMORYVIEW
+#define MICROPY_PY_BUILTINS_MEMORYVIEW (0)
+#endif
+
+// Whether to support set object
+#ifndef MICROPY_PY_BUILTINS_SET
+#define MICROPY_PY_BUILTINS_SET (1)
+#endif
+
+// Whether to support slice subscript operators and slice object
+#ifndef MICROPY_PY_BUILTINS_SLICE
+#define MICROPY_PY_BUILTINS_SLICE (1)
+#endif
+
+// Whether to support slice attribute read access,
+// i.e. slice.start, slice.stop, slice.step
+#ifndef MICROPY_PY_BUILTINS_SLICE_ATTRS
+#define MICROPY_PY_BUILTINS_SLICE_ATTRS (0)
+#endif
+
+// Whether to support frozenset object
+#ifndef MICROPY_PY_BUILTINS_FROZENSET
+#define MICROPY_PY_BUILTINS_FROZENSET (0)
+#endif
+
+// Whether to support property object
+#ifndef MICROPY_PY_BUILTINS_PROPERTY
+#define MICROPY_PY_BUILTINS_PROPERTY (1)
+#endif
+
+// Whether to implement the start/stop/step attributes (readback) on
+// the "range" builtin type. Rarely used, and costs ~60 bytes (x86).
+#ifndef MICROPY_PY_BUILTINS_RANGE_ATTRS
+#define MICROPY_PY_BUILTINS_RANGE_ATTRS (1)
+#endif
+
+// Whether to support timeout exceptions (like socket.timeout)
+#ifndef MICROPY_PY_BUILTINS_TIMEOUTERROR
+#define MICROPY_PY_BUILTINS_TIMEOUTERROR (0)
+#endif
+
+// Whether to support complete set of special methods
+// for user classes, otherwise only the most used
+#ifndef MICROPY_PY_ALL_SPECIAL_METHODS
+#define MICROPY_PY_ALL_SPECIAL_METHODS (0)
+#endif
+
+// Whether to support compile function
+#ifndef MICROPY_PY_BUILTINS_COMPILE
+#define MICROPY_PY_BUILTINS_COMPILE (0)
+#endif
+
+// Whether to support enumerate function(type)
+#ifndef MICROPY_PY_BUILTINS_ENUMERATE
+#define MICROPY_PY_BUILTINS_ENUMERATE (1)
+#endif
+
+// Whether to support eval and exec functions
+// By default they are supported if the compiler is enabled
+#ifndef MICROPY_PY_BUILTINS_EVAL_EXEC
+#define MICROPY_PY_BUILTINS_EVAL_EXEC (MICROPY_ENABLE_COMPILER)
+#endif
+
+// Whether to support the Python 2 execfile function
+#ifndef MICROPY_PY_BUILTINS_EXECFILE
+#define MICROPY_PY_BUILTINS_EXECFILE (0)
+#endif
+
+// Whether to support filter function(type)
+#ifndef MICROPY_PY_BUILTINS_FILTER
+#define MICROPY_PY_BUILTINS_FILTER (1)
+#endif
+
+// Whether to support reversed function(type)
+#ifndef MICROPY_PY_BUILTINS_REVERSED
+#define MICROPY_PY_BUILTINS_REVERSED (1)
+#endif
+
+// Whether to define "NotImplemented" special constant
+#ifndef MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+#define MICROPY_PY_BUILTINS_NOTIMPLEMENTED (0)
+#endif
+
+// Whether to support min/max functions
+#ifndef MICROPY_PY_BUILTINS_MIN_MAX
+#define MICROPY_PY_BUILTINS_MIN_MAX (1)
+#endif
+
+// Whether to set __file__ for imported modules
+#ifndef MICROPY_PY___FILE__
+#define MICROPY_PY___FILE__ (1)
+#endif
+
+// Whether to provide mem-info related functions in micropython module
+#ifndef MICROPY_PY_MICROPYTHON_MEM_INFO
+#define MICROPY_PY_MICROPYTHON_MEM_INFO (0)
+#endif
+
+// Whether to provide "array" module. Note that large chunk of the
+// underlying code is shared with "bytearray" builtin type, so to
+// get real savings, it should be disabled too.
+#ifndef MICROPY_PY_ARRAY
+#define MICROPY_PY_ARRAY (1)
+#endif
+
+// Whether to support slice assignments for array (and bytearray).
+// This is rarely used, but adds ~0.5K of code.
+#ifndef MICROPY_PY_ARRAY_SLICE_ASSIGN
+#define MICROPY_PY_ARRAY_SLICE_ASSIGN (0)
+#endif
+
+// Whether to support attrtuple type (MicroPython extension)
+// It provides space-efficient tuples with attribute access
+#ifndef MICROPY_PY_ATTRTUPLE
+#define MICROPY_PY_ATTRTUPLE (1)
+#endif
+
+// Whether to provide "collections" module
+#ifndef MICROPY_PY_COLLECTIONS
+#define MICROPY_PY_COLLECTIONS (1)
+#endif
+
+// Whether to provide "collections.OrderedDict" type
+#ifndef MICROPY_PY_COLLECTIONS_ORDEREDDICT
+#define MICROPY_PY_COLLECTIONS_ORDEREDDICT (0)
+#endif
+
+// Whether to provide "math" module
+#ifndef MICROPY_PY_MATH
+#define MICROPY_PY_MATH (1)
+#endif
+
+// Whether to provide special math functions: math.{erf,erfc,gamma,lgamma}
+#ifndef MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+#define MICROPY_PY_MATH_SPECIAL_FUNCTIONS (0)
+#endif
+
+// Whether to provide "cmath" module
+#ifndef MICROPY_PY_CMATH
+#define MICROPY_PY_CMATH (0)
+#endif
+
+// Whether to provide "gc" module
+#ifndef MICROPY_PY_GC
+#define MICROPY_PY_GC (1)
+#endif
+
+// Whether to return number of collected objects from gc.collect()
+#ifndef MICROPY_PY_GC_COLLECT_RETVAL
+#define MICROPY_PY_GC_COLLECT_RETVAL (0)
+#endif
+
+// Whether to provide "io" module
+#ifndef MICROPY_PY_IO
+#define MICROPY_PY_IO (1)
+#endif
+
+// Whether to provide "io.FileIO" class
+#ifndef MICROPY_PY_IO_FILEIO
+#define MICROPY_PY_IO_FILEIO (0)
+#endif
+
+// Whether to provide "io.BytesIO" class
+#ifndef MICROPY_PY_IO_BYTESIO
+#define MICROPY_PY_IO_BYTESIO (1)
+#endif
+
+// Whether to provide "io.BufferedWriter" class
+#ifndef MICROPY_PY_IO_BUFFEREDWRITER
+#define MICROPY_PY_IO_BUFFEREDWRITER (0)
+#endif
+
+// Whether to provide "struct" module
+#ifndef MICROPY_PY_STRUCT
+#define MICROPY_PY_STRUCT (1)
+#endif
+
+// Whether to provide "sys" module
+#ifndef MICROPY_PY_SYS
+#define MICROPY_PY_SYS (1)
+#endif
+
+// Whether to provide "sys.maxsize" constant
+#ifndef MICROPY_PY_SYS_MAXSIZE
+#define MICROPY_PY_SYS_MAXSIZE (0)
+#endif
+
+// Whether to provide "sys.modules" dictionary
+#ifndef MICROPY_PY_SYS_MODULES
+#define MICROPY_PY_SYS_MODULES (1)
+#endif
+
+// Whether to provide "sys.exc_info" function
+// Avoid enabling this, this function is Python2 heritage
+#ifndef MICROPY_PY_SYS_EXC_INFO
+#define MICROPY_PY_SYS_EXC_INFO (0)
+#endif
+
+// Whether to provide "sys.exit" function
+#ifndef MICROPY_PY_SYS_EXIT
+#define MICROPY_PY_SYS_EXIT (0)
+#endif
+
+// Whether to provide sys.{stdin,stdout,stderr} objects
+#ifndef MICROPY_PY_SYS_STDFILES
+#define MICROPY_PY_SYS_STDFILES (0)
+#endif
+
+// Whether to provide sys.{stdin,stdout,stderr}.buffer object
+// This is implemented per-port
+#ifndef MICROPY_PY_SYS_STDIO_BUFFER
+#define MICROPY_PY_SYS_STDIO_BUFFER (0)
+#endif
+
+// Extended modules
+
+#ifndef MICROPY_PY_UCTYPES
+#define MICROPY_PY_UCTYPES (0)
+#endif
+
+#ifndef MICROPY_PY_UZLIB
+#define MICROPY_PY_UZLIB (0)
+#endif
+
+#ifndef MICROPY_PY_UJSON
+#define MICROPY_PY_UJSON (0)
+#endif
+
+#ifndef MICROPY_PY_URE
+#define MICROPY_PY_URE (0)
+#endif
+
+#ifndef MICROPY_PY_UHEAPQ
+#define MICROPY_PY_UHEAPQ (0)
+#endif
+
+#ifndef MICROPY_PY_UHASHLIB
+#define MICROPY_PY_UHASHLIB (0)
+#endif
+
+#ifndef MICROPY_PY_UBINASCII
+#define MICROPY_PY_UBINASCII (0)
+#endif
+
+#ifndef MICROPY_PY_URANDOM
+#define MICROPY_PY_URANDOM (0)
+#endif
+
+// Whether to include: randrange, randint, choice, random, uniform
+#ifndef MICROPY_PY_URANDOM_EXTRA_FUNCS
+#define MICROPY_PY_URANDOM_EXTRA_FUNCS (0)
+#endif
+
+#ifndef MICROPY_PY_MACHINE
+#define MICROPY_PY_MACHINE (0)
+#endif
+
+#ifndef MICROPY_PY_USSL
+#define MICROPY_PY_USSL (0)
+#endif
+
+#ifndef MICROPY_PY_WEBSOCKET
+#define MICROPY_PY_WEBSOCKET (0)
+#endif
+
+/*****************************************************************************/
+/* Hooks for a port to add builtins */
+
+// Additional builtin function definitions - see builtintables.c:builtin_object_table for format.
+#ifndef MICROPY_PORT_BUILTINS
+#define MICROPY_PORT_BUILTINS
+#endif
+
+// Additional builtin module definitions - see builtintables.c:builtin_module_table for format.
+#ifndef MICROPY_PORT_BUILTIN_MODULES
+#define MICROPY_PORT_BUILTIN_MODULES
+#endif
+
+// Any module weak links - see builtintables.c:mp_builtin_module_weak_links_table.
+#ifndef MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS
+#define MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS
+#endif
+
+// Additional constant definitions for the compiler - see compile.c:mp_constants_table.
+#ifndef MICROPY_PORT_CONSTANTS
+#define MICROPY_PORT_CONSTANTS
+#endif
+
+// Any root pointers for GC scanning - see mpstate.c
+#ifndef MICROPY_PORT_ROOT_POINTERS
+#define MICROPY_PORT_ROOT_POINTERS
+#endif
+
+/*****************************************************************************/
+/* Miscellaneous settings */
+
+// All uPy objects in ROM must be aligned on at least a 4 byte boundary
+// so that the small-int/qstr/pointer distinction can be made. For machines
+// that don't do this (eg 16-bit CPU), define the following macro to something
+// like __attribute__((aligned(4))).
+#ifndef MICROPY_OBJ_BASE_ALIGNMENT
+#define MICROPY_OBJ_BASE_ALIGNMENT
+#endif
+
+// On embedded platforms, these will typically enable/disable irqs.
+#ifndef MICROPY_BEGIN_ATOMIC_SECTION
+#define MICROPY_BEGIN_ATOMIC_SECTION() (0)
+#endif
+#ifndef MICROPY_END_ATOMIC_SECTION
+#define MICROPY_END_ATOMIC_SECTION(state) (void)(state)
+#endif
+
+// Allow to override static modifier for global objects, e.g. to use with
+// object code analysis tools which don't support static symbols.
+#ifndef STATIC
+#define STATIC static
+#endif
+
+#define BITS_PER_BYTE (8)
+#define BITS_PER_WORD (BITS_PER_BYTE * BYTES_PER_WORD)
+// mp_int_t value with most significant bit set
+#define WORD_MSBIT_HIGH (((mp_uint_t)1) << (BYTES_PER_WORD * 8 - 1))
+
+// Make sure both MP_ENDIANNESS_LITTLE and MP_ENDIANNESS_BIG are
+// defined and that they are the opposite of each other.
+#if defined(MP_ENDIANNESS_LITTLE)
+#define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE)
+#elif defined(MP_ENDIANNESS_BIG)
+#define MP_ENDIANNESS_LITTLE (!MP_ENDIANNESS_BIG)
+#else
+ // Endiannes not defined by port so try to autodetect it.
+ #if defined(__BYTE_ORDER__)
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define MP_ENDIANNESS_LITTLE (1)
+ #else
+ #define MP_ENDIANNESS_LITTLE (0)
+ #endif
+ #elif defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined (_LITTLE_ENDIAN)
+ #define MP_ENDIANNESS_LITTLE (1)
+ #elif defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined (_BIG_ENDIAN)
+ #define MP_ENDIANNESS_LITTLE (0)
+ #else
+ #include <endian.h>
+ #if defined(__BYTE_ORDER)
+ #if __BYTE_ORDER == __LITTLE_ENDIAN
+ #define MP_ENDIANNESS_LITTLE (1)
+ #else
+ #define MP_ENDIANNESS_LITTLE (0)
+ #endif
+ #else
+ #error endianness not defined and cannot detect it
+ #endif
+ #endif
+ #define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE)
+#endif
+
+// Make a pointer to RAM callable (eg set lower bit for Thumb code)
+// (This scheme won't work if we want to mix Thumb and normal ARM code.)
+#ifndef MICROPY_MAKE_POINTER_CALLABLE
+#define MICROPY_MAKE_POINTER_CALLABLE(p) (p)
+#endif
+
+// If these MP_PLAT_*_EXEC macros are overridden then the memory allocated by them
+// must be somehow reachable for marking by the GC, since the native code
+// generators store pointers to GC managed memory in the code.
+#ifndef MP_PLAT_ALLOC_EXEC
+#define MP_PLAT_ALLOC_EXEC(min_size, ptr, size) do { *ptr = m_new(byte, min_size); *size = min_size; } while (0)
+#endif
+
+#ifndef MP_PLAT_FREE_EXEC
+#define MP_PLAT_FREE_EXEC(ptr, size) m_del(byte, ptr, size)
+#endif
+
+// This macro is used to do all output (except when MICROPY_PY_IO is defined)
+#ifndef MP_PLAT_PRINT_STRN
+#define MP_PLAT_PRINT_STRN(str, len) mp_hal_stdout_tx_strn_cooked(str, len)
+#endif
+
+#ifndef MP_SSIZE_MAX
+#define MP_SSIZE_MAX SSIZE_MAX
+#endif
+
+// printf format spec to use for mp_int_t and friends
+#ifndef INT_FMT
+#if defined(__LP64__)
+// Archs where mp_int_t == long, long != int
+#define UINT_FMT "%lu"
+#define INT_FMT "%ld"
+#elif defined(_WIN64)
+#define UINT_FMT "%llu"
+#define INT_FMT "%lld"
+#else
+// Archs where mp_int_t == int
+#define UINT_FMT "%u"
+#define INT_FMT "%d"
+#endif
+#endif //INT_FMT
+
+// Modifier for function which doesn't return
+#ifndef NORETURN
+#define NORETURN __attribute__((noreturn))
+#endif
+
+// Modifier for weak functions
+#ifndef MP_WEAK
+#define MP_WEAK __attribute__((weak))
+#endif
+
+// Condition is likely to be true, to help branch prediction
+#ifndef MP_LIKELY
+#define MP_LIKELY(x) __builtin_expect((x), 1)
+#endif
+
+// Condition is likely to be false, to help branch prediction
+#ifndef MP_UNLIKELY
+#define MP_UNLIKELY(x) __builtin_expect((x), 0)
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_MPCONFIG_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/mphal.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,61 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2015 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_MPHAL_H__ +#define __MICROPY_INCLUDED_PY_MPHAL_H__ + +#include "py/mpconfig.h" + +#ifdef MICROPY_MPHALPORT_H +#include MICROPY_MPHALPORT_H +#else +#include <mphalport.h> +#endif + +#ifndef mp_hal_stdin_rx_chr +int mp_hal_stdin_rx_chr(void); +#endif + +#ifndef mp_hal_stdout_tx_str +void mp_hal_stdout_tx_str(const char *str); +#endif + +#ifndef mp_hal_stdout_tx_strn +void mp_hal_stdout_tx_strn(const char *str, size_t len); +#endif + +#ifndef mp_hal_stdout_tx_strn_cooked +void mp_hal_stdout_tx_strn_cooked(const char *str, size_t len); +#endif + +#ifndef mp_hal_delay_ms +void mp_hal_delay_ms(mp_uint_t ms); +#endif + +#ifndef mp_hal_ticks_ms +mp_uint_t mp_hal_ticks_ms(void); +#endif + +#endif // __MICROPY_INCLUDED_PY_MPHAL_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpprint.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,551 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/mphal.h"
+#include "py/mpprint.h"
+#include "py/obj.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include "py/formatfloat.h"
+#endif
+
+static const char pad_spaces[] = " ";
+static const char pad_zeroes[] = "0000000000000000";
+
+STATIC void plat_print_strn(void *env, const char *str, size_t len) {
+ (void)env;
+ MP_PLAT_PRINT_STRN(str, len);
+}
+
+const mp_print_t mp_plat_print = {NULL, plat_print_strn};
+
+int mp_print_str(const mp_print_t *print, const char *str) {
+ size_t len = strlen(str);
+ if (len) {
+ print->print_strn(print->data, str, len);
+ }
+ return len;
+}
+
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width) {
+ int left_pad = 0;
+ int right_pad = 0;
+ int pad = width - len;
+ int pad_size;
+ int total_chars_printed = 0;
+ const char *pad_chars;
+
+ if (!fill || fill == ' ') {
+ pad_chars = pad_spaces;
+ pad_size = sizeof(pad_spaces) - 1;
+ } else if (fill == '0') {
+ pad_chars = pad_zeroes;
+ pad_size = sizeof(pad_zeroes) - 1;
+ } else {
+ // Other pad characters are fairly unusual, so we'll take the hit
+ // and output them 1 at a time.
+ pad_chars = &fill;
+ pad_size = 1;
+ }
+
+ if (flags & PF_FLAG_CENTER_ADJUST) {
+ left_pad = pad / 2;
+ right_pad = pad - left_pad;
+ } else if (flags & PF_FLAG_LEFT_ADJUST) {
+ right_pad = pad;
+ } else {
+ left_pad = pad;
+ }
+
+ if (left_pad > 0) {
+ total_chars_printed += left_pad;
+ while (left_pad > 0) {
+ int p = left_pad;
+ if (p > pad_size) {
+ p = pad_size;
+ }
+ print->print_strn(print->data, pad_chars, p);
+ left_pad -= p;
+ }
+ }
+ if (len) {
+ print->print_strn(print->data, str, len);
+ total_chars_printed += len;
+ }
+ if (right_pad > 0) {
+ total_chars_printed += right_pad;
+ while (right_pad > 0) {
+ int p = right_pad;
+ if (p > pad_size) {
+ p = pad_size;
+ }
+ print->print_strn(print->data, pad_chars, p);
+ right_pad -= p;
+ }
+ }
+ return total_chars_printed;
+}
+
+// 32-bits is 10 digits, add 3 for commas, 1 for sign, 1 for terminating null
+// We can use 16 characters for 32-bit and 32 characters for 64-bit
+#define INT_BUF_SIZE (sizeof(mp_int_t) * 4)
+
+// Our mp_vprintf function below does not support the '#' format modifier to
+// print the prefix of a non-base-10 number, so we don't need code for this.
+#define SUPPORT_INT_BASE_PREFIX (0)
+
+// This function is used exclusively by mp_vprintf to format ints.
+// It needs to be a separate function to mp_print_mp_int, since converting to a mp_int looses the MSB.
+STATIC int mp_print_int(const mp_print_t *print, mp_uint_t x, int sgn, int base, int base_char, int flags, char fill, int width) {
+ char sign = 0;
+ if (sgn) {
+ if ((mp_int_t)x < 0) {
+ sign = '-';
+ x = -x;
+ } else if (flags & PF_FLAG_SHOW_SIGN) {
+ sign = '+';
+ } else if (flags & PF_FLAG_SPACE_SIGN) {
+ sign = ' ';
+ }
+ }
+
+ char buf[INT_BUF_SIZE];
+ char *b = buf + INT_BUF_SIZE;
+
+ if (x == 0) {
+ *(--b) = '0';
+ } else {
+ do {
+ int c = x % base;
+ x /= base;
+ if (c >= 10) {
+ c += base_char - 10;
+ } else {
+ c += '0';
+ }
+ *(--b) = c;
+ } while (b > buf && x != 0);
+ }
+
+ #if SUPPORT_INT_BASE_PREFIX
+ char prefix_char = '\0';
+
+ if (flags & PF_FLAG_SHOW_PREFIX) {
+ if (base == 2) {
+ prefix_char = base_char + 'b' - 'a';
+ } else if (base == 8) {
+ prefix_char = base_char + 'o' - 'a';
+ } else if (base == 16) {
+ prefix_char = base_char + 'x' - 'a';
+ }
+ }
+ #endif
+
+ int len = 0;
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ if (sign) {
+ len += mp_print_strn(print, &sign, 1, flags, fill, 1);
+ width--;
+ }
+ #if SUPPORT_INT_BASE_PREFIX
+ if (prefix_char) {
+ len += mp_print_strn(print, "0", 1, flags, fill, 1);
+ len += mp_print_strn(print, &prefix_char, 1, flags, fill, 1);
+ width -= 2;
+ }
+ #endif
+ } else {
+ #if SUPPORT_INT_BASE_PREFIX
+ if (prefix_char && b > &buf[1]) {
+ *(--b) = prefix_char;
+ *(--b) = '0';
+ }
+ #endif
+ if (sign && b > buf) {
+ *(--b) = sign;
+ }
+ }
+
+ len += mp_print_strn(print, b, buf + INT_BUF_SIZE - b, flags, fill, width);
+ return len;
+}
+
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec) {
+ if (!MP_OBJ_IS_INT(x)) {
+ // This will convert booleans to int, or raise an error for
+ // non-integer types.
+ x = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int(x));
+ }
+
+ if ((flags & (PF_FLAG_LEFT_ADJUST | PF_FLAG_CENTER_ADJUST)) == 0 && fill == '0') {
+ if (prec > width) {
+ width = prec;
+ }
+ prec = 0;
+ }
+ char prefix_buf[4];
+ char *prefix = prefix_buf;
+
+ if (mp_obj_int_sign(x) > 0) {
+ if (flags & PF_FLAG_SHOW_SIGN) {
+ *prefix++ = '+';
+ } else if (flags & PF_FLAG_SPACE_SIGN) {
+ *prefix++ = ' ';
+ }
+ }
+
+ if (flags & PF_FLAG_SHOW_PREFIX) {
+ if (base == 2) {
+ *prefix++ = '0';
+ *prefix++ = base_char + 'b' - 'a';
+ } else if (base == 8) {
+ *prefix++ = '0';
+ if (flags & PF_FLAG_SHOW_OCTAL_LETTER) {
+ *prefix++ = base_char + 'o' - 'a';
+ }
+ } else if (base == 16) {
+ *prefix++ = '0';
+ *prefix++ = base_char + 'x' - 'a';
+ }
+ }
+ *prefix = '\0';
+ int prefix_len = prefix - prefix_buf;
+ prefix = prefix_buf;
+
+ char comma = '\0';
+ if (flags & PF_FLAG_SHOW_COMMA) {
+ comma = ',';
+ }
+
+ // The size of this buffer is rather arbitrary. If it's not large
+ // enough, a dynamic one will be allocated.
+ char stack_buf[sizeof(mp_int_t) * 4];
+ char *buf = stack_buf;
+ mp_uint_t buf_size = sizeof(stack_buf);
+ mp_uint_t fmt_size = 0;
+ char *str;
+
+ if (prec > 1) {
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ }
+ char sign = '\0';
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ // We add the pad in this function, so since the pad goes after
+ // the sign & prefix, we format without a prefix
+ str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
+ x, base, NULL, base_char, comma);
+ if (*str == '-') {
+ sign = *str++;
+ fmt_size--;
+ }
+ } else {
+ str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
+ x, base, prefix, base_char, comma);
+ }
+
+ int spaces_before = 0;
+ int spaces_after = 0;
+
+ if (prec > 1) {
+ // If prec was specified, then prec specifies the width to zero-pad the
+ // the number to. This zero-padded number then gets left or right
+ // aligned in width characters.
+
+ int prec_width = fmt_size; // The digits
+ if (prec_width < prec) {
+ prec_width = prec;
+ }
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ if (sign) {
+ prec_width++;
+ }
+ prec_width += prefix_len;
+ }
+ if (prec_width < width) {
+ if (flags & PF_FLAG_LEFT_ADJUST) {
+ spaces_after = width - prec_width;
+ } else {
+ spaces_before = width - prec_width;
+ }
+ }
+ fill = '0';
+ flags &= ~PF_FLAG_LEFT_ADJUST;
+ }
+
+ int len = 0;
+ if (spaces_before) {
+ len += mp_print_strn(print, "", 0, 0, ' ', spaces_before);
+ }
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ // pad after sign implies pad after prefix as well.
+ if (sign) {
+ len += mp_print_strn(print, &sign, 1, 0, 0, 1);
+ width--;
+ }
+ if (prefix_len) {
+ len += mp_print_strn(print, prefix, prefix_len, 0, 0, 1);
+ width -= prefix_len;
+ }
+ }
+ if (prec > 1) {
+ width = prec;
+ }
+
+ len += mp_print_strn(print, str, fmt_size, flags, fill, width);
+
+ if (spaces_after) {
+ len += mp_print_strn(print, "", 0, 0, ' ', spaces_after);
+ }
+
+ if (buf != stack_buf) {
+ m_del(char, buf, buf_size);
+ }
+ return len;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec) {
+ char buf[32];
+ char sign = '\0';
+ int chrs = 0;
+
+ if (flags & PF_FLAG_SHOW_SIGN) {
+ sign = '+';
+ }
+ else
+ if (flags & PF_FLAG_SPACE_SIGN) {
+ sign = ' ';
+ }
+
+ int len = mp_format_float(f, buf, sizeof(buf), fmt, prec, sign);
+ if (len < 0) {
+ len = 0;
+ }
+
+ char *s = buf;
+
+ if ((flags & PF_FLAG_ADD_PERCENT) && (size_t)(len + 1) < sizeof(buf)) {
+ buf[len++] = '%';
+ buf[len] = '\0';
+ }
+
+ // buf[0] < '0' returns true if the first character is space, + or -
+ if ((flags & PF_FLAG_PAD_AFTER_SIGN) && buf[0] < '0') {
+ // We have a sign character
+ s++;
+ chrs += mp_print_strn(print, &buf[0], 1, 0, 0, 1);
+ width--;
+ len--;
+ }
+
+ chrs += mp_print_strn(print, s, len, flags, fill, width);
+
+ return chrs;
+}
+#endif
+
+int mp_printf(const mp_print_t *print, const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ int ret = mp_vprintf(print, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
+ int chrs = 0;
+ for (;;) {
+ {
+ const char *f = fmt;
+ while (*f != '\0' && *f != '%') {
+ ++f; // XXX UTF8 advance char
+ }
+ if (f > fmt) {
+ print->print_strn(print->data, fmt, f - fmt);
+ chrs += f - fmt;
+ fmt = f;
+ }
+ }
+
+ if (*fmt == '\0') {
+ break;
+ }
+
+ // move past % character
+ ++fmt;
+
+ // parse flags, if they exist
+ int flags = 0;
+ char fill = ' ';
+ while (*fmt != '\0') {
+ if (*fmt == '-') flags |= PF_FLAG_LEFT_ADJUST;
+ else if (*fmt == '+') flags |= PF_FLAG_SHOW_SIGN;
+ else if (*fmt == ' ') flags |= PF_FLAG_SPACE_SIGN;
+ else if (*fmt == '!') flags |= PF_FLAG_NO_TRAILZ;
+ else if (*fmt == '0') {
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ fill = '0';
+ } else break;
+ ++fmt;
+ }
+
+ // parse width, if it exists
+ int width = 0;
+ for (; '0' <= *fmt && *fmt <= '9'; ++fmt) {
+ width = width * 10 + *fmt - '0';
+ }
+
+ // parse precision, if it exists
+ int prec = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (*fmt == '*') {
+ ++fmt;
+ prec = va_arg(args, int);
+ } else {
+ prec = 0;
+ for (; '0' <= *fmt && *fmt <= '9'; ++fmt) {
+ prec = prec * 10 + *fmt - '0';
+ }
+ }
+ if (prec < 0) {
+ prec = 0;
+ }
+ }
+
+ // parse long specifiers (current not used)
+ //bool long_arg = false;
+ if (*fmt == 'l') {
+ ++fmt;
+ //long_arg = true;
+ }
+
+ if (*fmt == '\0') {
+ break;
+ }
+
+ switch (*fmt) {
+ case 'b':
+ if (va_arg(args, int)) {
+ chrs += mp_print_strn(print, "true", 4, flags, fill, width);
+ } else {
+ chrs += mp_print_strn(print, "false", 5, flags, fill, width);
+ }
+ break;
+ case 'c':
+ {
+ char str = va_arg(args, int);
+ chrs += mp_print_strn(print, &str, 1, flags, fill, width);
+ break;
+ }
+ case 'q':
+ {
+ qstr qst = va_arg(args, qstr);
+ size_t len;
+ const char *str = (const char*)qstr_data(qst, &len);
+ if (prec < 0) {
+ prec = len;
+ }
+ chrs += mp_print_strn(print, str, prec, flags, fill, width);
+ break;
+ }
+ case 's':
+ {
+ const char *str = va_arg(args, const char*);
+ if (str) {
+ if (prec < 0) {
+ prec = strlen(str);
+ }
+ chrs += mp_print_strn(print, str, prec, flags, fill, width);
+ } else {
+ chrs += mp_print_strn(print, "(null)", 6, flags, fill, width);
+ }
+ break;
+ }
+ case 'u':
+ chrs += mp_print_int(print, va_arg(args, unsigned int), 0, 10, 'a', flags, fill, width);
+ break;
+ case 'd':
+ chrs += mp_print_int(print, va_arg(args, int), 1, 10, 'a', flags, fill, width);
+ break;
+ case 'x':
+ chrs += mp_print_int(print, va_arg(args, unsigned int), 0, 16, 'a', flags, fill, width);
+ break;
+ case 'X':
+ chrs += mp_print_int(print, va_arg(args, unsigned int), 0, 16, 'A', flags, fill, width);
+ break;
+ case 'p':
+ case 'P': // don't bother to handle upcase for 'P'
+ chrs += mp_print_int(print, va_arg(args, unsigned int), 0, 16, 'a', flags, fill, width);
+ break;
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ {
+#if ((MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT) || (MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE))
+ mp_float_t f = va_arg(args, double);
+ chrs += mp_print_float(print, f, *fmt, flags, fill, width, prec);
+#else
+#error Unknown MICROPY FLOAT IMPL
+#endif
+ break;
+ }
+#endif
+ // Because 'l' is eaten above, another 'l' means %ll. We need to support
+ // this length specifier for OBJ_REPR_D (64-bit NaN boxing).
+ // TODO Either enable this unconditionally, or provide a specific config var.
+ #if (MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D) || defined(_WIN64)
+ case 'l': {
+ unsigned long long int arg_value = va_arg(args, unsigned long long int);
+ ++fmt;
+ if (*fmt == 'u' || *fmt == 'd') {
+ chrs += mp_print_int(print, arg_value, *fmt == 'd', 10, 'a', flags, fill, width);
+ break;
+ }
+ // fall through to default case to print unknown format char
+ }
+ #endif
+ default:
+ print->print_strn(print->data, fmt, 1);
+ chrs += 1;
+ break;
+ }
+ ++fmt;
+ }
+ return chrs;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpprint.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,68 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_MPPRINT_H__
+#define __MICROPY_INCLUDED_PY_MPPRINT_H__
+
+#include "py/mpconfig.h"
+
+#define PF_FLAG_LEFT_ADJUST (0x001)
+#define PF_FLAG_SHOW_SIGN (0x002)
+#define PF_FLAG_SPACE_SIGN (0x004)
+#define PF_FLAG_NO_TRAILZ (0x008)
+#define PF_FLAG_SHOW_PREFIX (0x010)
+#define PF_FLAG_SHOW_COMMA (0x020)
+#define PF_FLAG_PAD_AFTER_SIGN (0x040)
+#define PF_FLAG_CENTER_ADJUST (0x080)
+#define PF_FLAG_ADD_PERCENT (0x100)
+#define PF_FLAG_SHOW_OCTAL_LETTER (0x200)
+
+typedef void (*mp_print_strn_t)(void *data, const char *str, size_t len);
+
+typedef struct _mp_print_t {
+ void *data;
+ mp_print_strn_t print_strn;
+} mp_print_t;
+
+// All (non-debug) prints go through one of the two interfaces below.
+// 1) Wrapper for platform print function, which wraps MP_PLAT_PRINT_STRN.
+extern const mp_print_t mp_plat_print;
+#if MICROPY_PY_IO
+// 2) Wrapper for printing to sys.stdout.
+extern const mp_print_t mp_sys_stdout_print;
+#endif
+
+int mp_print_str(const mp_print_t *print, const char *str);
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width);
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec);
+#endif
+
+int mp_printf(const mp_print_t *print, const char *fmt, ...);
+#ifdef va_start
+int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args);
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_MPPRINT_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpstate.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,33 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if MICROPY_DYNAMIC_COMPILER
+mp_dynamic_compiler_t mp_dynamic_compiler = {0};
+#endif
+
+mp_state_ctx_t mp_state_ctx;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpstate.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,199 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_MPSTATE_H__
+#define __MICROPY_INCLUDED_PY_MPSTATE_H__
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/objlist.h"
+#include "py/objexcept.h"
+
+// This file contains structures defining the state of the Micro Python
+// memory system, runtime and virtual machine. The state is a global
+// variable, but in the future it is hoped that the state can become local.
+
+// This structure contains dynamic configuration for the compiler.
+#if MICROPY_DYNAMIC_COMPILER
+typedef struct mp_dynamic_compiler_t {
+ uint8_t small_int_bits; // must be <= host small_int_bits
+ bool opt_cache_map_lookup_in_bytecode;
+ bool py_builtins_str_unicode;
+} mp_dynamic_compiler_t;
+extern mp_dynamic_compiler_t mp_dynamic_compiler;
+#endif
+
+// This structure hold information about the memory allocation system.
+typedef struct _mp_state_mem_t {
+ #if MICROPY_MEM_STATS
+ size_t total_bytes_allocated;
+ size_t current_bytes_allocated;
+ size_t peak_bytes_allocated;
+ #endif
+
+ byte *gc_alloc_table_start;
+ size_t gc_alloc_table_byte_len;
+ #if MICROPY_ENABLE_FINALISER
+ byte *gc_finaliser_table_start;
+ #endif
+ byte *gc_pool_start;
+ byte *gc_pool_end;
+
+ int gc_stack_overflow;
+ size_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
+ size_t *gc_sp;
+ uint16_t gc_lock_depth;
+
+ // This variable controls auto garbage collection. If set to 0 then the
+ // GC won't automatically run when gc_alloc can't find enough blocks. But
+ // you can still allocate/free memory and also explicitly call gc_collect.
+ uint16_t gc_auto_collect_enabled;
+
+ size_t gc_last_free_atb_index;
+
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ size_t gc_collected;
+ #endif
+} mp_state_mem_t;
+
+// This structure hold runtime and VM information. It includes a section
+// which contains root pointers that must be scanned by the GC.
+typedef struct _mp_state_vm_t {
+ ////////////////////////////////////////////////////////////
+ // START ROOT POINTER SECTION
+ // everything that needs GC scanning must go here
+ // this must start at the start of this structure
+ //
+
+ // Note: nlr asm code has the offset of this hard-coded
+ nlr_buf_t *nlr_top;
+
+ qstr_pool_t *last_pool;
+
+ // non-heap memory for creating an exception if we can't allocate RAM
+ mp_obj_exception_t mp_emergency_exception_obj;
+
+ // memory for exception arguments if we can't allocate RAM
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ #if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
+ // statically allocated buf
+ byte mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE];
+ #else
+ // dynamically allocated buf
+ byte *mp_emergency_exception_buf;
+ #endif
+ #endif
+
+ // dictionary with loaded modules (may be exposed as sys.modules)
+ mp_obj_dict_t mp_loaded_modules_dict;
+
+ // pending exception object (MP_OBJ_NULL if not pending)
+ volatile mp_obj_t mp_pending_exception;
+
+ // current exception being handled, for sys.exc_info()
+ #if MICROPY_PY_SYS_EXC_INFO
+ mp_obj_base_t *cur_exception;
+ #endif
+
+ // dictionary for the __main__ module
+ mp_obj_dict_t dict_main;
+
+ // these two lists must be initialised per port, after the call to mp_init
+ mp_obj_list_t mp_sys_path_obj;
+ mp_obj_list_t mp_sys_argv_obj;
+
+ // dictionary for overridden builtins
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ mp_obj_dict_t *mp_module_builtins_override_dict;
+ #endif
+
+ // include any root pointers defined by a port
+ MICROPY_PORT_ROOT_POINTERS
+
+ // root pointers for extmod
+
+ #if MICROPY_PY_OS_DUPTERM
+ mp_obj_t term_obj;
+ #endif
+
+ #if MICROPY_PY_LWIP_SLIP
+ mp_obj_t lwip_slip_stream;
+ #endif
+
+ #if MICROPY_FSUSERMOUNT
+ // for user-mountable block device (max fixed at compile time)
+ struct _fs_user_mount_t *fs_user_mount[MICROPY_FATFS_VOLUMES];
+ #endif
+
+ //
+ // END ROOT POINTER SECTION
+ ////////////////////////////////////////////////////////////
+
+ // pointer and sizes to store interned string data
+ // (qstr_last_chunk can be root pointer but is also stored in qstr pool)
+ byte *qstr_last_chunk;
+ size_t qstr_last_alloc;
+ size_t qstr_last_used;
+
+ // Stack top at the start of program
+ // Note: this entry is used to locate the end of the root pointer section.
+ char *stack_top;
+
+ #if MICROPY_STACK_CHECK
+ mp_uint_t stack_limit;
+ #endif
+
+ mp_uint_t mp_optimise_value;
+
+ // size of the emergency exception buf, if it's dynamically allocated
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0
+ mp_int_t mp_emergency_exception_buf_size;
+ #endif
+} mp_state_vm_t;
+
+// This structure combines the above 2 structures, and adds the local
+// and global dicts.
+// Note: if this structure changes then revisit all nlr asm code since they
+// have the offset of nlr_top hard-coded.
+typedef struct _mp_state_ctx_t {
+ // these must come first for root pointer scanning in GC to work
+ mp_obj_dict_t *dict_locals;
+ mp_obj_dict_t *dict_globals;
+ // this must come next for root pointer scanning in GC to work
+ mp_state_vm_t vm;
+ mp_state_mem_t mem;
+} mp_state_ctx_t;
+
+extern mp_state_ctx_t mp_state_ctx;
+
+#define MP_STATE_CTX(x) (mp_state_ctx.x)
+#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
+#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
+
+#endif // __MICROPY_INCLUDED_PY_MPSTATE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpz.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1758 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpz.h"
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+
+#define DIG_SIZE (MPZ_DIG_SIZE)
+#define DIG_MASK ((MPZ_LONG_1 << DIG_SIZE) - 1)
+#define DIG_MSB (MPZ_LONG_1 << (DIG_SIZE - 1))
+#define DIG_BASE (MPZ_LONG_1 << DIG_SIZE)
+
+/*
+ mpz is an arbitrary precision integer type with a public API.
+
+ mpn functions act on non-negative integers represented by an array of generalised
+ digits (eg a word per digit). You also need to specify separately the length of the
+ array. There is no public API for mpn. Rather, the functions are used by mpz to
+ implement its features.
+
+ Integer values are stored little endian (first digit is first in memory).
+
+ Definition of normalise: ?
+*/
+
+/* compares i with j
+ returns sign(i - j)
+ assumes i, j are normalised
+*/
+STATIC int mpn_cmp(const mpz_dig_t *idig, mp_uint_t ilen, const mpz_dig_t *jdig, mp_uint_t jlen) {
+ if (ilen < jlen) { return -1; }
+ if (ilen > jlen) { return 1; }
+
+ for (idig += ilen, jdig += ilen; ilen > 0; --ilen) {
+ mpz_dbl_dig_signed_t cmp = (mpz_dbl_dig_t)*(--idig) - (mpz_dbl_dig_t)*(--jdig);
+ if (cmp < 0) { return -1; }
+ if (cmp > 0) { return 1; }
+ }
+
+ return 0;
+}
+
+/* computes i = j << n
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j; assumes n > 0
+ can have i, j pointing to same memory
+*/
+STATIC mp_uint_t mpn_shl(mpz_dig_t *idig, mpz_dig_t *jdig, mp_uint_t jlen, mp_uint_t n) {
+ mp_uint_t n_whole = (n + DIG_SIZE - 1) / DIG_SIZE;
+ mp_uint_t n_part = n % DIG_SIZE;
+ if (n_part == 0) {
+ n_part = DIG_SIZE;
+ }
+
+ // start from the high end of the digit arrays
+ idig += jlen + n_whole - 1;
+ jdig += jlen - 1;
+
+ // shift the digits
+ mpz_dbl_dig_t d = 0;
+ for (mp_uint_t i = jlen; i > 0; i--, idig--, jdig--) {
+ d |= *jdig;
+ *idig = (d >> (DIG_SIZE - n_part)) & DIG_MASK;
+ d <<= DIG_SIZE;
+ }
+
+ // store remaining bits
+ *idig = (d >> (DIG_SIZE - n_part)) & DIG_MASK;
+ idig -= n_whole - 1;
+ memset(idig, 0, (n_whole - 1) * sizeof(mpz_dig_t));
+
+ // work out length of result
+ jlen += n_whole;
+ while (jlen != 0 && idig[jlen - 1] == 0) {
+ jlen--;
+ }
+
+ // return length of result
+ return jlen;
+}
+
+/* computes i = j >> n
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j; assumes n > 0
+ can have i, j pointing to same memory
+*/
+STATIC mp_uint_t mpn_shr(mpz_dig_t *idig, mpz_dig_t *jdig, mp_uint_t jlen, mp_uint_t n) {
+ mp_uint_t n_whole = n / DIG_SIZE;
+ mp_uint_t n_part = n % DIG_SIZE;
+
+ if (n_whole >= jlen) {
+ return 0;
+ }
+
+ jdig += n_whole;
+ jlen -= n_whole;
+
+ for (mp_uint_t i = jlen; i > 0; i--, idig++, jdig++) {
+ mpz_dbl_dig_t d = *jdig;
+ if (i > 1) {
+ d |= (mpz_dbl_dig_t)jdig[1] << DIG_SIZE;
+ }
+ d >>= n_part;
+ *idig = d & DIG_MASK;
+ }
+
+ if (idig[-1] == 0) {
+ jlen--;
+ }
+
+ return jlen;
+}
+
+/* computes i = j + k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_add(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_t carry = 0;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ carry += (mpz_dbl_dig_t)*jdig + (mpz_dbl_dig_t)*kdig;
+ *idig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ carry += *jdig;
+ *idig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ if (carry != 0) {
+ *idig++ = carry;
+ }
+
+ return idig - oidig;
+}
+
+/* computes i = j - k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes j >= k
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_sub(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_signed_t borrow = 0;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ borrow += (mpz_dbl_dig_t)*jdig - (mpz_dbl_dig_t)*kdig;
+ *idig = borrow & DIG_MASK;
+ borrow >>= DIG_SIZE;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ borrow += *jdig;
+ *idig = borrow & DIG_MASK;
+ borrow >>= DIG_SIZE;
+ }
+
+ for (--idig; idig >= oidig && *idig == 0; --idig) {
+ }
+
+ return idig + 1 - oidig;
+}
+
+STATIC mp_uint_t mpn_remove_trailing_zeros(mpz_dig_t *oidig, mpz_dig_t *idig) {
+ for (--idig; idig >= oidig && *idig == 0; --idig) {
+ }
+ return idig + 1 - oidig;
+}
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+/* computes i = j & k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen (jlen argument not needed)
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_and(mpz_dig_t *idig, const mpz_dig_t *jdig, const mpz_dig_t *kdig, mp_uint_t klen) {
+ mpz_dig_t *oidig = idig;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ *idig = *jdig & *kdig;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#endif
+
+/* i = -((-j) & (-k)) = ~((~j + 1) & (~k + 1)) + 1
+ i = (j & (-k)) = (j & (~k + 1)) = ( j & (~k + 1))
+ i = ((-j) & k) = ((~j + 1) & k) = ((~j + 1) & k )
+ computes general form:
+ i = (im ^ (((j ^ jm) + jc) & ((k ^ km) + kc))) + ic where Xm = Xc == 0 ? 0 : DIG_MASK
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes length j >= length k
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_and_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen,
+ mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+ mpz_dig_t imask = (0 == carryi) ? 0 : DIG_MASK;
+ mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
+ mpz_dig_t kmask = (0 == carryk) ? 0 : DIG_MASK;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig ^ jmask;
+ carryk += (--klen <= --jlen) ? (*kdig++ ^ kmask) : kmask;
+ carryi += ((carryj & carryk) ^ imask) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ if (0 != carryi) {
+ *idig++ = carryi;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+/* computes i = j | k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_or(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen) {
+ mpz_dig_t *oidig = idig;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ *idig = *jdig | *kdig;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ *idig = *jdig;
+ }
+
+ return idig - oidig;
+}
+
+#endif
+
+/* i = -((-j) | (-k)) = ~((~j + 1) | (~k + 1)) + 1
+ i = -(j | (-k)) = -(j | (~k + 1)) = ~( j | (~k + 1)) + 1
+ i = -((-j) | k) = -((~j + 1) | k) = ~((~j + 1) | k ) + 1
+ computes general form:
+ i = ~(((j ^ jm) + jc) | ((k ^ km) + kc)) + 1 where Xm = Xc == 0 ? 0 : DIG_MASK
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes length j >= length k
+ can have i, j, k pointing to same memory
+*/
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+STATIC mp_uint_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen,
+ mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_t carryi = 1;
+ mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
+ mpz_dig_t kmask = (0 == carryk) ? 0 : DIG_MASK;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig ^ jmask;
+ carryk += (--klen <= --jlen) ? (*kdig++ ^ kmask) : kmask;
+ carryi += ((carryj | carryk) ^ DIG_MASK) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ if (0 != carryi) {
+ *idig++ = carryi;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#else
+
+STATIC mp_uint_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen,
+ mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+ mpz_dig_t imask = (0 == carryi) ? 0 : DIG_MASK;
+ mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
+ mpz_dig_t kmask = (0 == carryk) ? 0 : DIG_MASK;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig ^ jmask;
+ carryk += (--klen <= --jlen) ? (*kdig++ ^ kmask) : kmask;
+ carryi += ((carryj | carryk) ^ imask) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ if (0 != carryi) {
+ *idig++ = carryi;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#endif
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+/* computes i = j ^ k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_xor(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen) {
+ mpz_dig_t *oidig = idig;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ *idig = *jdig ^ *kdig;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ *idig = *jdig;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#endif
+
+/* i = (-j) ^ (-k) = ~(j - 1) ^ ~(k - 1) = (j - 1) ^ (k - 1)
+ i = -(j ^ (-k)) = -(j ^ ~(k - 1)) = ~(j ^ ~(k - 1)) + 1 = (j ^ (k - 1)) + 1
+ i = -((-j) ^ k) = -(~(j - 1) ^ k) = ~(~(j - 1) ^ k) + 1 = ((j - 1) ^ k) + 1
+ computes general form:
+ i = ((j - 1 + jc) ^ (k - 1 + kc)) + ic
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes length j >= length k
+ can have i, j, k pointing to same memory
+*/
+STATIC mp_uint_t mpn_xor_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jlen, const mpz_dig_t *kdig, mp_uint_t klen,
+ mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig + DIG_MASK;
+ carryk += (--klen <= --jlen) ? (*kdig++ + DIG_MASK) : DIG_MASK;
+ carryi += (carryj ^ carryk) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ if (0 != carryi) {
+ *idig++ = carryi;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+/* computes i = i * d1 + d2
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised i; assumes dmul != 0
+*/
+STATIC mp_uint_t mpn_mul_dig_add_dig(mpz_dig_t *idig, mp_uint_t ilen, mpz_dig_t dmul, mpz_dig_t dadd) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_t carry = dadd;
+
+ for (; ilen > 0; --ilen, ++idig) {
+ carry += (mpz_dbl_dig_t)*idig * (mpz_dbl_dig_t)dmul; // will never overflow so long as DIG_SIZE <= 8*sizeof(mpz_dbl_dig_t)/2
+ *idig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ if (carry != 0) {
+ *idig++ = carry;
+ }
+
+ return idig - oidig;
+}
+
+/* computes i = j * k
+ returns number of digits in i
+ assumes enough memory in i; assumes i is zeroed; assumes normalised j, k
+ can have j, k point to same memory
+*/
+STATIC mp_uint_t mpn_mul(mpz_dig_t *idig, mpz_dig_t *jdig, mp_uint_t jlen, mpz_dig_t *kdig, mp_uint_t klen) {
+ mpz_dig_t *oidig = idig;
+ mp_uint_t ilen = 0;
+
+ for (; klen > 0; --klen, ++idig, ++kdig) {
+ mpz_dig_t *id = idig;
+ mpz_dbl_dig_t carry = 0;
+
+ mp_uint_t jl = jlen;
+ for (mpz_dig_t *jd = jdig; jl > 0; --jl, ++jd, ++id) {
+ carry += (mpz_dbl_dig_t)*id + (mpz_dbl_dig_t)*jd * (mpz_dbl_dig_t)*kdig; // will never overflow so long as DIG_SIZE <= 8*sizeof(mpz_dbl_dig_t)/2
+ *id = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ if (carry != 0) {
+ *id++ = carry;
+ }
+
+ ilen = id - oidig;
+ }
+
+ return ilen;
+}
+
+/* natural_div - quo * den + new_num = old_num (ie num is replaced with rem)
+ assumes den != 0
+ assumes num_dig has enough memory to be extended by 1 digit
+ assumes quo_dig has enough memory (as many digits as num)
+ assumes quo_dig is filled with zeros
+ modifies den_dig memory, but restors it to original state at end
+*/
+
+STATIC void mpn_div(mpz_dig_t *num_dig, mp_uint_t *num_len, mpz_dig_t *den_dig, mp_uint_t den_len, mpz_dig_t *quo_dig, mp_uint_t *quo_len) {
+ mpz_dig_t *orig_num_dig = num_dig;
+ mpz_dig_t *orig_quo_dig = quo_dig;
+ mpz_dig_t norm_shift = 0;
+ mpz_dbl_dig_t lead_den_digit;
+
+ // handle simple cases
+ {
+ int cmp = mpn_cmp(num_dig, *num_len, den_dig, den_len);
+ if (cmp == 0) {
+ *num_len = 0;
+ quo_dig[0] = 1;
+ *quo_len = 1;
+ return;
+ } else if (cmp < 0) {
+ // numerator remains the same
+ *quo_len = 0;
+ return;
+ }
+ }
+
+ // count number of leading zeros in leading digit of denominator
+ {
+ mpz_dig_t d = den_dig[den_len - 1];
+ while ((d & DIG_MSB) == 0) {
+ d <<= 1;
+ ++norm_shift;
+ }
+ }
+
+ // normalise denomenator (leading bit of leading digit is 1)
+ for (mpz_dig_t *den = den_dig, carry = 0; den < den_dig + den_len; ++den) {
+ mpz_dig_t d = *den;
+ *den = ((d << norm_shift) | carry) & DIG_MASK;
+ carry = d >> (DIG_SIZE - norm_shift);
+ }
+
+ // now need to shift numerator by same amount as denominator
+ // first, increase length of numerator in case we need more room to shift
+ num_dig[*num_len] = 0;
+ ++(*num_len);
+ for (mpz_dig_t *num = num_dig, carry = 0; num < num_dig + *num_len; ++num) {
+ mpz_dig_t n = *num;
+ *num = ((n << norm_shift) | carry) & DIG_MASK;
+ carry = n >> (DIG_SIZE - norm_shift);
+ }
+
+ // cache the leading digit of the denominator
+ lead_den_digit = den_dig[den_len - 1];
+
+ // point num_dig to last digit in numerator
+ num_dig += *num_len - 1;
+
+ // calculate number of digits in quotient
+ *quo_len = *num_len - den_len;
+
+ // point to last digit to store for quotient
+ quo_dig += *quo_len - 1;
+
+ // keep going while we have enough digits to divide
+ while (*num_len > den_len) {
+ mpz_dbl_dig_t quo = ((mpz_dbl_dig_t)*num_dig << DIG_SIZE) | num_dig[-1];
+
+ // get approximate quotient
+ quo /= lead_den_digit;
+
+ // Multiply quo by den and subtract from num to get remainder.
+ // We have different code here to handle different compile-time
+ // configurations of mpz:
+ //
+ // 1. DIG_SIZE is stricly less than half the number of bits
+ // available in mpz_dbl_dig_t. In this case we can use a
+ // slightly more optimal (in time and space) routine that
+ // uses the extra bits in mpz_dbl_dig_signed_t to store a
+ // sign bit.
+ //
+ // 2. DIG_SIZE is exactly half the number of bits available in
+ // mpz_dbl_dig_t. In this (common) case we need to be careful
+ // not to overflow the borrow variable. And the shifting of
+ // borrow needs some special logic (it's a shift right with
+ // round up).
+
+ if (DIG_SIZE < 8 * sizeof(mpz_dbl_dig_t) / 2) {
+ mpz_dbl_dig_signed_t borrow = 0;
+
+ for (mpz_dig_t *n = num_dig - den_len, *d = den_dig; n < num_dig; ++n, ++d) {
+ borrow += (mpz_dbl_dig_t)*n - (mpz_dbl_dig_t)quo * (mpz_dbl_dig_t)*d; // will overflow if DIG_SIZE >= 8*sizeof(mpz_dbl_dig_t)/2
+ *n = borrow & DIG_MASK;
+ borrow >>= DIG_SIZE;
+ }
+ borrow += *num_dig; // will overflow if DIG_SIZE >= 8*sizeof(mpz_dbl_dig_t)/2
+ *num_dig = borrow & DIG_MASK;
+ borrow >>= DIG_SIZE;
+
+ // adjust quotient if it is too big
+ for (; borrow != 0; --quo) {
+ mpz_dbl_dig_t carry = 0;
+ for (mpz_dig_t *n = num_dig - den_len, *d = den_dig; n < num_dig; ++n, ++d) {
+ carry += (mpz_dbl_dig_t)*n + (mpz_dbl_dig_t)*d;
+ *n = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+ carry += *num_dig;
+ *num_dig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+
+ borrow += carry;
+ }
+ } else { // DIG_SIZE == 8 * sizeof(mpz_dbl_dig_t) / 2
+ mpz_dbl_dig_t borrow = 0;
+
+ for (mpz_dig_t *n = num_dig - den_len, *d = den_dig; n < num_dig; ++n, ++d) {
+ mpz_dbl_dig_t x = (mpz_dbl_dig_t)quo * (mpz_dbl_dig_t)(*d);
+ if (x >= *n || *n - x <= borrow) {
+ borrow += (mpz_dbl_dig_t)x - (mpz_dbl_dig_t)*n;
+ *n = (-borrow) & DIG_MASK;
+ borrow = (borrow >> DIG_SIZE) + ((borrow & DIG_MASK) == 0 ? 0 : 1); // shift-right with round-up
+ } else {
+ *n = ((mpz_dbl_dig_t)*n - (mpz_dbl_dig_t)x - (mpz_dbl_dig_t)borrow) & DIG_MASK;
+ borrow = 0;
+ }
+ }
+ if (borrow >= *num_dig) {
+ borrow -= (mpz_dbl_dig_t)*num_dig;
+ *num_dig = (-borrow) & DIG_MASK;
+ borrow = (borrow >> DIG_SIZE) + ((borrow & DIG_MASK) == 0 ? 0 : 1); // shift-right with round-up
+ } else {
+ *num_dig = (*num_dig - borrow) & DIG_MASK;
+ borrow = 0;
+ }
+
+ // adjust quotient if it is too big
+ for (; borrow != 0; --quo) {
+ mpz_dbl_dig_t carry = 0;
+ for (mpz_dig_t *n = num_dig - den_len, *d = den_dig; n < num_dig; ++n, ++d) {
+ carry += (mpz_dbl_dig_t)*n + (mpz_dbl_dig_t)*d;
+ *n = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+ carry += (mpz_dbl_dig_t)*num_dig;
+ *num_dig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+
+ //assert(borrow >= carry); // enable this to check the logic
+ borrow -= carry;
+ }
+ }
+
+ // store this digit of the quotient
+ *quo_dig = quo & DIG_MASK;
+ --quo_dig;
+
+ // move down to next digit of numerator
+ --num_dig;
+ --(*num_len);
+ }
+
+ // unnormalise denomenator
+ for (mpz_dig_t *den = den_dig + den_len - 1, carry = 0; den >= den_dig; --den) {
+ mpz_dig_t d = *den;
+ *den = ((d >> norm_shift) | carry) & DIG_MASK;
+ carry = d << (DIG_SIZE - norm_shift);
+ }
+
+ // unnormalise numerator (remainder now)
+ for (mpz_dig_t *num = orig_num_dig + *num_len - 1, carry = 0; num >= orig_num_dig; --num) {
+ mpz_dig_t n = *num;
+ *num = ((n >> norm_shift) | carry) & DIG_MASK;
+ carry = n << (DIG_SIZE - norm_shift);
+ }
+
+ // strip trailing zeros
+
+ while (*quo_len > 0 && orig_quo_dig[*quo_len - 1] == 0) {
+ --(*quo_len);
+ }
+
+ while (*num_len > 0 && orig_num_dig[*num_len - 1] == 0) {
+ --(*num_len);
+ }
+}
+
+#define MIN_ALLOC (2)
+
+STATIC const uint8_t log_base2_floor[] = {
+ 0,
+ 0, 1, 1, 2,
+ 2, 2, 2, 3,
+ 3, 3, 3, 3,
+ 3, 3, 3, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 5
+};
+
+void mpz_init_zero(mpz_t *z) {
+ z->neg = 0;
+ z->fixed_dig = 0;
+ z->alloc = 0;
+ z->len = 0;
+ z->dig = NULL;
+}
+
+void mpz_init_from_int(mpz_t *z, mp_int_t val) {
+ mpz_init_zero(z);
+ mpz_set_from_int(z, val);
+}
+
+void mpz_init_fixed_from_int(mpz_t *z, mpz_dig_t *dig, mp_uint_t alloc, mp_int_t val) {
+ z->neg = 0;
+ z->fixed_dig = 1;
+ z->alloc = alloc;
+ z->len = 0;
+ z->dig = dig;
+ mpz_set_from_int(z, val);
+}
+
+void mpz_deinit(mpz_t *z) {
+ if (z != NULL && !z->fixed_dig) {
+ m_del(mpz_dig_t, z->dig, z->alloc);
+ }
+}
+
+#if 0
+these functions are unused
+
+mpz_t *mpz_zero(void) {
+ mpz_t *z = m_new_obj(mpz_t);
+ mpz_init_zero(z);
+ return z;
+}
+
+mpz_t *mpz_from_int(mp_int_t val) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_int(z, val);
+ return z;
+}
+
+mpz_t *mpz_from_ll(long long val, bool is_signed) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_ll(z, val, is_signed);
+ return z;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mpz_t *mpz_from_float(mp_float_t val) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_float(z, val);
+ return z;
+}
+#endif
+
+mpz_t *mpz_from_str(const char *str, mp_uint_t len, bool neg, mp_uint_t base) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_str(z, str, len, neg, base);
+ return z;
+}
+#endif
+
+STATIC void mpz_free(mpz_t *z) {
+ if (z != NULL) {
+ m_del(mpz_dig_t, z->dig, z->alloc);
+ m_del_obj(mpz_t, z);
+ }
+}
+
+STATIC void mpz_need_dig(mpz_t *z, mp_uint_t need) {
+ if (need < MIN_ALLOC) {
+ need = MIN_ALLOC;
+ }
+
+ if (z->dig == NULL || z->alloc < need) {
+ if (z->fixed_dig) {
+ // cannot reallocate fixed buffers
+ assert(0);
+ return;
+ }
+ z->dig = m_renew(mpz_dig_t, z->dig, z->alloc, need);
+ z->alloc = need;
+ }
+}
+
+STATIC mpz_t *mpz_clone(const mpz_t *src) {
+ mpz_t *z = m_new_obj(mpz_t);
+ z->neg = src->neg;
+ z->fixed_dig = 0;
+ z->alloc = src->alloc;
+ z->len = src->len;
+ if (src->dig == NULL) {
+ z->dig = NULL;
+ } else {
+ z->dig = m_new(mpz_dig_t, z->alloc);
+ memcpy(z->dig, src->dig, src->alloc * sizeof(mpz_dig_t));
+ }
+ return z;
+}
+
+/* sets dest = src
+ can have dest, src the same
+*/
+void mpz_set(mpz_t *dest, const mpz_t *src) {
+ mpz_need_dig(dest, src->len);
+ dest->neg = src->neg;
+ dest->len = src->len;
+ memcpy(dest->dig, src->dig, src->len * sizeof(mpz_dig_t));
+}
+
+void mpz_set_from_int(mpz_t *z, mp_int_t val) {
+ if (val == 0) {
+ z->len = 0;
+ return;
+ }
+
+ mpz_need_dig(z, MPZ_NUM_DIG_FOR_INT);
+
+ mp_uint_t uval;
+ if (val < 0) {
+ z->neg = 1;
+ uval = -val;
+ } else {
+ z->neg = 0;
+ uval = val;
+ }
+
+ z->len = 0;
+ while (uval > 0) {
+ z->dig[z->len++] = uval & DIG_MASK;
+ uval >>= DIG_SIZE;
+ }
+}
+
+void mpz_set_from_ll(mpz_t *z, long long val, bool is_signed) {
+ mpz_need_dig(z, MPZ_NUM_DIG_FOR_LL);
+
+ unsigned long long uval;
+ if (is_signed && val < 0) {
+ z->neg = 1;
+ uval = -val;
+ } else {
+ z->neg = 0;
+ uval = val;
+ }
+
+ z->len = 0;
+ while (uval > 0) {
+ z->dig[z->len++] = uval & DIG_MASK;
+ uval >>= DIG_SIZE;
+ }
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+void mpz_set_from_float(mpz_t *z, mp_float_t src) {
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+typedef uint64_t mp_float_int_t;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+typedef uint32_t mp_float_int_t;
+#endif
+ union {
+ mp_float_t f;
+ #if MP_ENDIANNESS_LITTLE
+ struct { mp_float_int_t frc:MP_FLOAT_FRAC_BITS, exp:MP_FLOAT_EXP_BITS, sgn:1; } p;
+ #else
+ struct { mp_float_int_t sgn:1, exp:MP_FLOAT_EXP_BITS, frc:MP_FLOAT_FRAC_BITS; } p;
+ #endif
+ } u = {src};
+
+ z->neg = u.p.sgn;
+ if (u.p.exp == 0) {
+ // value == 0 || value < 1
+ mpz_set_from_int(z, 0);
+ } else if (u.p.exp == ((1 << MP_FLOAT_EXP_BITS) - 1)) {
+ // u.p.frc == 0 indicates inf, else NaN
+ // should be handled by caller
+ mpz_set_from_int(z, 0);
+ } else {
+ const int adj_exp = (int)u.p.exp - MP_FLOAT_EXP_BIAS;
+ if (adj_exp < 0) {
+ // value < 1 , truncates to 0
+ mpz_set_from_int(z, 0);
+ } else if (adj_exp == 0) {
+ // 1 <= value < 2 , so truncates to 1
+ mpz_set_from_int(z, 1);
+ } else {
+ // 2 <= value
+ const int dig_cnt = (adj_exp + 1 + (DIG_SIZE - 1)) / DIG_SIZE;
+ const unsigned int rem = adj_exp % DIG_SIZE;
+ int dig_ind, shft;
+ mp_float_int_t frc = u.p.frc | ((mp_float_int_t)1 << MP_FLOAT_FRAC_BITS);
+
+ if (adj_exp < MP_FLOAT_FRAC_BITS) {
+ shft = 0;
+ dig_ind = 0;
+ frc >>= MP_FLOAT_FRAC_BITS - adj_exp;
+ } else {
+ shft = (rem - MP_FLOAT_FRAC_BITS) % DIG_SIZE;
+ dig_ind = (adj_exp - MP_FLOAT_FRAC_BITS) / DIG_SIZE;
+ }
+ mpz_need_dig(z, dig_cnt);
+ z->len = dig_cnt;
+ if (dig_ind != 0) {
+ memset(z->dig, 0, dig_ind * sizeof(mpz_dig_t));
+ }
+ if (shft != 0) {
+ z->dig[dig_ind++] = (frc << shft) & DIG_MASK;
+ frc >>= DIG_SIZE - shft;
+ }
+#if DIG_SIZE < (MP_FLOAT_FRAC_BITS + 1)
+ while (dig_ind != dig_cnt) {
+ z->dig[dig_ind++] = frc & DIG_MASK;
+ frc >>= DIG_SIZE;
+ }
+#else
+ if (dig_ind != dig_cnt) {
+ z->dig[dig_ind] = frc;
+ }
+#endif
+ }
+ }
+}
+#endif
+
+// returns number of bytes from str that were processed
+mp_uint_t mpz_set_from_str(mpz_t *z, const char *str, mp_uint_t len, bool neg, mp_uint_t base) {
+ assert(base < 36);
+
+ const char *cur = str;
+ const char *top = str + len;
+
+ mpz_need_dig(z, len * 8 / DIG_SIZE + 1);
+
+ if (neg) {
+ z->neg = 1;
+ } else {
+ z->neg = 0;
+ }
+
+ z->len = 0;
+ for (; cur < top; ++cur) { // XXX UTF8 next char
+ //mp_uint_t v = char_to_numeric(cur#); // XXX UTF8 get char
+ mp_uint_t v = *cur;
+ if ('0' <= v && v <= '9') {
+ v -= '0';
+ } else if ('A' <= v && v <= 'Z') {
+ v -= 'A' - 10;
+ } else if ('a' <= v && v <= 'z') {
+ v -= 'a' - 10;
+ } else {
+ break;
+ }
+ if (v >= base) {
+ break;
+ }
+ z->len = mpn_mul_dig_add_dig(z->dig, z->len, base, v);
+ }
+
+ return cur - str;
+}
+
+bool mpz_is_zero(const mpz_t *z) {
+ return z->len == 0;
+}
+
+#if 0
+these functions are unused
+
+bool mpz_is_pos(const mpz_t *z) {
+ return z->len > 0 && z->neg == 0;
+}
+
+bool mpz_is_neg(const mpz_t *z) {
+ return z->len > 0 && z->neg != 0;
+}
+
+bool mpz_is_odd(const mpz_t *z) {
+ return z->len > 0 && (z->dig[0] & 1) != 0;
+}
+
+bool mpz_is_even(const mpz_t *z) {
+ return z->len == 0 || (z->dig[0] & 1) == 0;
+}
+#endif
+
+int mpz_cmp(const mpz_t *z1, const mpz_t *z2) {
+ // to catch comparison of -0 with +0
+ if (z1->len == 0 && z2->len == 0) {
+ return 0;
+ }
+ int cmp = (int)z2->neg - (int)z1->neg;
+ if (cmp != 0) {
+ return cmp;
+ }
+ cmp = mpn_cmp(z1->dig, z1->len, z2->dig, z2->len);
+ if (z1->neg != 0) {
+ cmp = -cmp;
+ }
+ return cmp;
+}
+
+#if 0
+// obsolete
+// compares mpz with an integer that fits within DIG_SIZE bits
+mp_int_t mpz_cmp_sml_int(const mpz_t *z, mp_int_t sml_int) {
+ mp_int_t cmp;
+ if (z->neg == 0) {
+ if (sml_int < 0) return 1;
+ if (sml_int == 0) {
+ if (z->len == 0) return 0;
+ return 1;
+ }
+ if (z->len == 0) return -1;
+ assert(sml_int < (1 << DIG_SIZE));
+ if (z->len != 1) return 1;
+ cmp = z->dig[0] - sml_int;
+ } else {
+ if (sml_int > 0) return -1;
+ if (sml_int == 0) {
+ if (z->len == 0) return 0;
+ return -1;
+ }
+ if (z->len == 0) return 1;
+ assert(sml_int > -(1 << DIG_SIZE));
+ if (z->len != 1) return -1;
+ cmp = -z->dig[0] - sml_int;
+ }
+ if (cmp < 0) return -1;
+ if (cmp > 0) return 1;
+ return 0;
+}
+#endif
+
+#if 0
+these functions are unused
+
+/* returns abs(z)
+*/
+mpz_t *mpz_abs(const mpz_t *z) {
+ mpz_t *z2 = mpz_clone(z);
+ z2->neg = 0;
+ return z2;
+}
+
+/* returns -z
+*/
+mpz_t *mpz_neg(const mpz_t *z) {
+ mpz_t *z2 = mpz_clone(z);
+ z2->neg = 1 - z2->neg;
+ return z2;
+}
+
+/* returns lhs + rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_add(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_add_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* returns lhs - rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_sub(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_sub_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* returns lhs * rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_mul(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_mul_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* returns lhs ** rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_pow(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_pow_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* computes new integers in quo and rem such that:
+ quo * rhs + rem = lhs
+ 0 <= rem < rhs
+ can have lhs, rhs the same
+*/
+void mpz_divmod(const mpz_t *lhs, const mpz_t *rhs, mpz_t **quo, mpz_t **rem) {
+ *quo = mpz_zero();
+ *rem = mpz_zero();
+ mpz_divmod_inpl(*quo, *rem, lhs, rhs);
+}
+#endif
+
+/* computes dest = abs(z)
+ can have dest, z the same
+*/
+void mpz_abs_inpl(mpz_t *dest, const mpz_t *z) {
+ if (dest != z) {
+ mpz_set(dest, z);
+ }
+ dest->neg = 0;
+}
+
+/* computes dest = -z
+ can have dest, z the same
+*/
+void mpz_neg_inpl(mpz_t *dest, const mpz_t *z) {
+ if (dest != z) {
+ mpz_set(dest, z);
+ }
+ dest->neg = 1 - dest->neg;
+}
+
+/* computes dest = ~z (= -z - 1)
+ can have dest, z the same
+*/
+void mpz_not_inpl(mpz_t *dest, const mpz_t *z) {
+ if (dest != z) {
+ mpz_set(dest, z);
+ }
+ if (dest->len == 0) {
+ mpz_need_dig(dest, 1);
+ dest->dig[0] = 1;
+ dest->len = 1;
+ dest->neg = 1;
+ } else if (dest->neg) {
+ dest->neg = 0;
+ mpz_dig_t k = 1;
+ dest->len = mpn_sub(dest->dig, dest->dig, dest->len, &k, 1);
+ } else {
+ mpz_need_dig(dest, dest->len + 1);
+ mpz_dig_t k = 1;
+ dest->len = mpn_add(dest->dig, dest->dig, dest->len, &k, 1);
+ dest->neg = 1;
+ }
+}
+
+/* computes dest = lhs << rhs
+ can have dest, lhs the same
+*/
+void mpz_shl_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs) {
+ if (lhs->len == 0 || rhs == 0) {
+ mpz_set(dest, lhs);
+ } else {
+ mpz_need_dig(dest, lhs->len + (rhs + DIG_SIZE - 1) / DIG_SIZE);
+ dest->len = mpn_shl(dest->dig, lhs->dig, lhs->len, rhs);
+ dest->neg = lhs->neg;
+ }
+}
+
+/* computes dest = lhs >> rhs
+ can have dest, lhs the same
+*/
+void mpz_shr_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs) {
+ if (lhs->len == 0 || rhs == 0) {
+ mpz_set(dest, lhs);
+ } else {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_shr(dest->dig, lhs->dig, lhs->len, rhs);
+ dest->neg = lhs->neg;
+ if (dest->neg) {
+ // arithmetic shift right, rounding to negative infinity
+ mp_uint_t n_whole = rhs / DIG_SIZE;
+ mp_uint_t n_part = rhs % DIG_SIZE;
+ mpz_dig_t round_up = 0;
+ for (mp_uint_t i = 0; i < lhs->len && i < n_whole; i++) {
+ if (lhs->dig[i] != 0) {
+ round_up = 1;
+ break;
+ }
+ }
+ if (n_whole < lhs->len && (lhs->dig[n_whole] & ((1 << n_part) - 1)) != 0) {
+ round_up = 1;
+ }
+ if (round_up) {
+ if (dest->len == 0) {
+ // dest == 0, so need to add 1 by hand (answer will be -1)
+ dest->dig[0] = 1;
+ dest->len = 1;
+ } else {
+ // dest > 0, so can use mpn_add to add 1
+ dest->len = mpn_add(dest->dig, dest->dig, dest->len, &round_up, 1);
+ }
+ }
+ }
+ }
+}
+
+/* computes dest = lhs + rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_add_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ if (mpn_cmp(lhs->dig, lhs->len, rhs->dig, rhs->len) < 0) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ if (lhs->neg == rhs->neg) {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_add(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ } else {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_sub(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ }
+
+ dest->neg = lhs->neg;
+}
+
+/* computes dest = lhs - rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_sub_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ bool neg = false;
+
+ if (mpn_cmp(lhs->dig, lhs->len, rhs->dig, rhs->len) < 0) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ neg = true;
+ }
+
+ if (lhs->neg != rhs->neg) {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_add(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ } else {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_sub(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ }
+
+ if (neg) {
+ dest->neg = 1 - lhs->neg;
+ } else {
+ dest->neg = lhs->neg;
+ }
+}
+
+/* computes dest = lhs & rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_and_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ // make sure lhs has the most digits
+ if (lhs->len < rhs->len) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ #if MICROPY_OPT_MPZ_BITWISE
+
+ if ((0 == lhs->neg) && (0 == rhs->neg)) {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_and(dest->dig, lhs->dig, rhs->dig, rhs->len);
+ dest->neg = 0;
+ } else {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_and_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ lhs->neg == rhs->neg, 0 != lhs->neg, 0 != rhs->neg);
+ dest->neg = lhs->neg & rhs->neg;
+ }
+
+ #else
+
+ mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
+ dest->len = mpn_and_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ (lhs->neg == rhs->neg) ? lhs->neg : 0, lhs->neg, rhs->neg);
+ dest->neg = lhs->neg & rhs->neg;
+
+ #endif
+}
+
+/* computes dest = lhs | rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ // make sure lhs has the most digits
+ if (lhs->len < rhs->len) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ #if MICROPY_OPT_MPZ_BITWISE
+
+ if ((0 == lhs->neg) && (0 == rhs->neg)) {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_or(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ dest->neg = 0;
+ } else {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_or_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ 0 != lhs->neg, 0 != rhs->neg);
+ dest->neg = 1;
+ }
+
+ #else
+
+ mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
+ dest->len = mpn_or_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ (lhs->neg || rhs->neg), lhs->neg, rhs->neg);
+ dest->neg = lhs->neg | rhs->neg;
+
+ #endif
+}
+
+/* computes dest = lhs ^ rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ // make sure lhs has the most digits
+ if (lhs->len < rhs->len) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ #if MICROPY_OPT_MPZ_BITWISE
+
+ if (lhs->neg == rhs->neg) {
+ mpz_need_dig(dest, lhs->len);
+ if (lhs->neg == 0) {
+ dest->len = mpn_xor(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ } else {
+ dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len, 0, 0, 0);
+ }
+ dest->neg = 0;
+ } else {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len, 1,
+ 0 == lhs->neg, 0 == rhs->neg);
+ dest->neg = 1;
+ }
+
+ #else
+
+ mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
+ dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ (lhs->neg != rhs->neg), 0 == lhs->neg, 0 == rhs->neg);
+ dest->neg = lhs->neg ^ rhs->neg;
+
+ #endif
+}
+
+/* computes dest = lhs * rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_mul_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ if (lhs->len == 0 || rhs->len == 0) {
+ mpz_set_from_int(dest, 0);
+ return;
+ }
+
+ mpz_t *temp = NULL;
+ if (lhs == dest) {
+ lhs = temp = mpz_clone(lhs);
+ if (rhs == dest) {
+ rhs = lhs;
+ }
+ } else if (rhs == dest) {
+ rhs = temp = mpz_clone(rhs);
+ }
+
+ mpz_need_dig(dest, lhs->len + rhs->len); // min mem l+r-1, max mem l+r
+ memset(dest->dig, 0, dest->alloc * sizeof(mpz_dig_t));
+ dest->len = mpn_mul(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+
+ if (lhs->neg == rhs->neg) {
+ dest->neg = 0;
+ } else {
+ dest->neg = 1;
+ }
+
+ mpz_free(temp);
+}
+
+/* computes dest = lhs ** rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_pow_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ if (lhs->len == 0 || rhs->neg != 0) {
+ mpz_set_from_int(dest, 0);
+ return;
+ }
+
+ if (rhs->len == 0) {
+ mpz_set_from_int(dest, 1);
+ return;
+ }
+
+ mpz_t *x = mpz_clone(lhs);
+ mpz_t *n = mpz_clone(rhs);
+
+ mpz_set_from_int(dest, 1);
+
+ while (n->len > 0) {
+ if ((n->dig[0] & 1) != 0) {
+ mpz_mul_inpl(dest, dest, x);
+ }
+ n->len = mpn_shr(n->dig, n->dig, n->len, 1);
+ if (n->len == 0) {
+ break;
+ }
+ mpz_mul_inpl(x, x, x);
+ }
+
+ mpz_free(x);
+ mpz_free(n);
+}
+
+#if 0
+these functions are unused
+
+/* computes dest = (lhs ** rhs) % mod
+ can have dest, lhs, rhs the same; mod can't be the same as dest
+*/
+void mpz_pow3_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs, const mpz_t *mod) {
+ if (lhs->len == 0 || rhs->neg != 0) {
+ mpz_set_from_int(dest, 0);
+ return;
+ }
+
+ if (rhs->len == 0) {
+ mpz_set_from_int(dest, 1);
+ return;
+ }
+
+ mpz_t *x = mpz_clone(lhs);
+ mpz_t *n = mpz_clone(rhs);
+ mpz_t quo; mpz_init_zero(&quo);
+
+ mpz_set_from_int(dest, 1);
+
+ while (n->len > 0) {
+ if ((n->dig[0] & 1) != 0) {
+ mpz_mul_inpl(dest, dest, x);
+ mpz_divmod_inpl(&quo, dest, dest, mod);
+ }
+ n->len = mpn_shr(n->dig, n->dig, n->len, 1);
+ if (n->len == 0) {
+ break;
+ }
+ mpz_mul_inpl(x, x, x);
+ mpz_divmod_inpl(&quo, x, x, mod);
+ }
+
+ mpz_deinit(&quo);
+ mpz_free(x);
+ mpz_free(n);
+}
+
+/* computes gcd(z1, z2)
+ based on Knuth's modified gcd algorithm (I think?)
+ gcd(z1, z2) >= 0
+ gcd(0, 0) = 0
+ gcd(z, 0) = abs(z)
+*/
+mpz_t *mpz_gcd(const mpz_t *z1, const mpz_t *z2) {
+ if (z1->len == 0) {
+ mpz_t *a = mpz_clone(z2);
+ a->neg = 0;
+ return a;
+ } else if (z2->len == 0) {
+ mpz_t *a = mpz_clone(z1);
+ a->neg = 0;
+ return a;
+ }
+
+ mpz_t *a = mpz_clone(z1);
+ mpz_t *b = mpz_clone(z2);
+ mpz_t c; mpz_init_zero(&c);
+ a->neg = 0;
+ b->neg = 0;
+
+ for (;;) {
+ if (mpz_cmp(a, b) < 0) {
+ if (a->len == 0) {
+ mpz_free(a);
+ mpz_deinit(&c);
+ return b;
+ }
+ mpz_t *t = a; a = b; b = t;
+ }
+ if (!(b->len >= 2 || (b->len == 1 && b->dig[0] > 1))) { // compute b > 0; could be mpz_cmp_small_int(b, 1) > 0
+ break;
+ }
+ mpz_set(&c, b);
+ do {
+ mpz_add_inpl(&c, &c, &c);
+ } while (mpz_cmp(&c, a) <= 0);
+ c.len = mpn_shr(c.dig, c.dig, c.len, 1);
+ mpz_sub_inpl(a, a, &c);
+ }
+
+ mpz_deinit(&c);
+
+ if (b->len == 1 && b->dig[0] == 1) { // compute b == 1; could be mpz_cmp_small_int(b, 1) == 0
+ mpz_free(a);
+ return b;
+ } else {
+ mpz_free(b);
+ return a;
+ }
+}
+
+/* computes lcm(z1, z2)
+ = abs(z1) / gcd(z1, z2) * abs(z2)
+ lcm(z1, z1) >= 0
+ lcm(0, 0) = 0
+ lcm(z, 0) = 0
+*/
+mpz_t *mpz_lcm(const mpz_t *z1, const mpz_t *z2) {
+ if (z1->len == 0 || z2->len == 0) {
+ return mpz_zero();
+ }
+
+ mpz_t *gcd = mpz_gcd(z1, z2);
+ mpz_t *quo = mpz_zero();
+ mpz_t *rem = mpz_zero();
+ mpz_divmod_inpl(quo, rem, z1, gcd);
+ mpz_mul_inpl(rem, quo, z2);
+ mpz_free(gcd);
+ mpz_free(quo);
+ rem->neg = 0;
+ return rem;
+}
+#endif
+
+/* computes new integers in quo and rem such that:
+ quo * rhs + rem = lhs
+ 0 <= rem < rhs
+ can have lhs, rhs the same
+*/
+void mpz_divmod_inpl(mpz_t *dest_quo, mpz_t *dest_rem, const mpz_t *lhs, const mpz_t *rhs) {
+ if (rhs->len == 0) {
+ mpz_set_from_int(dest_quo, 0);
+ mpz_set_from_int(dest_rem, 0);
+ return;
+ }
+
+ mpz_need_dig(dest_quo, lhs->len + 1); // +1 necessary?
+ memset(dest_quo->dig, 0, (lhs->len + 1) * sizeof(mpz_dig_t));
+ dest_quo->len = 0;
+ mpz_need_dig(dest_rem, lhs->len + 1); // +1 necessary?
+ mpz_set(dest_rem, lhs);
+ //rhs->dig[rhs->len] = 0;
+ mpn_div(dest_rem->dig, &dest_rem->len, rhs->dig, rhs->len, dest_quo->dig, &dest_quo->len);
+
+ if (lhs->neg != rhs->neg) {
+ dest_quo->neg = 1;
+ }
+}
+
+#if 0
+these functions are unused
+
+/* computes floor(lhs / rhs)
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_div(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *quo = mpz_zero();
+ mpz_t rem; mpz_init_zero(&rem);
+ mpz_divmod_inpl(quo, &rem, lhs, rhs);
+ mpz_deinit(&rem);
+ return quo;
+}
+
+/* computes lhs % rhs ( >= 0)
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_mod(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t quo; mpz_init_zero(&quo);
+ mpz_t *rem = mpz_zero();
+ mpz_divmod_inpl(&quo, rem, lhs, rhs);
+ mpz_deinit(&quo);
+ return rem;
+}
+#endif
+
+// must return actual int value if it fits in mp_int_t
+mp_int_t mpz_hash(const mpz_t *z) {
+ mp_int_t val = 0;
+ mpz_dig_t *d = z->dig + z->len;
+
+ while (d-- > z->dig) {
+ val = (val << DIG_SIZE) | *d;
+ }
+
+ if (z->neg != 0) {
+ val = -val;
+ }
+
+ return val;
+}
+
+bool mpz_as_int_checked(const mpz_t *i, mp_int_t *value) {
+ mp_uint_t val = 0;
+ mpz_dig_t *d = i->dig + i->len;
+
+ while (d-- > i->dig) {
+ if (val > (~(WORD_MSBIT_HIGH) >> DIG_SIZE)) {
+ // will overflow
+ return false;
+ }
+ val = (val << DIG_SIZE) | *d;
+ }
+
+ if (i->neg != 0) {
+ val = -val;
+ }
+
+ *value = val;
+ return true;
+}
+
+bool mpz_as_uint_checked(const mpz_t *i, mp_uint_t *value) {
+ if (i->neg != 0) {
+ // can't represent signed values
+ return false;
+ }
+
+ mp_uint_t val = 0;
+ mpz_dig_t *d = i->dig + i->len;
+
+ while (d-- > i->dig) {
+ if (val > (~(WORD_MSBIT_HIGH) >> (DIG_SIZE - 1))) {
+ // will overflow
+ return false;
+ }
+ val = (val << DIG_SIZE) | *d;
+ }
+
+ *value = val;
+ return true;
+}
+
+// writes at most len bytes to buf (so buf should be zeroed before calling)
+void mpz_as_bytes(const mpz_t *z, bool big_endian, mp_uint_t len, byte *buf) {
+ byte *b = buf;
+ if (big_endian) {
+ b += len;
+ }
+ mpz_dig_t *zdig = z->dig;
+ int bits = 0;
+ mpz_dbl_dig_t d = 0;
+ mpz_dbl_dig_t carry = 1;
+ for (mp_uint_t zlen = z->len; zlen > 0; --zlen) {
+ bits += DIG_SIZE;
+ d = (d << DIG_SIZE) | *zdig++;
+ for (; bits >= 8; bits -= 8, d >>= 8) {
+ mpz_dig_t val = d;
+ if (z->neg) {
+ val = (~val & 0xff) + carry;
+ carry = val >> 8;
+ }
+ if (big_endian) {
+ *--b = val;
+ if (b == buf) {
+ return;
+ }
+ } else {
+ *b++ = val;
+ if (b == buf + len) {
+ return;
+ }
+ }
+ }
+ }
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mpz_as_float(const mpz_t *i) {
+ mp_float_t val = 0;
+ mpz_dig_t *d = i->dig + i->len;
+
+ while (d-- > i->dig) {
+ val = val * DIG_BASE + *d;
+ }
+
+ if (i->neg != 0) {
+ val = -val;
+ }
+
+ return val;
+}
+#endif
+
+mp_uint_t mpz_as_str_size(const mpz_t *i, mp_uint_t base, const char *prefix, char comma) {
+ if (base < 2 || base > 32) {
+ return 0;
+ }
+
+ mp_uint_t num_digits = i->len * DIG_SIZE / log_base2_floor[base] + 1;
+ mp_uint_t num_commas = comma ? num_digits / 3: 0;
+ mp_uint_t prefix_len = prefix ? strlen(prefix) : 0;
+
+ return num_digits + num_commas + prefix_len + 2; // +1 for sign, +1 for null byte
+}
+
+#if 0
+this function is unused
+char *mpz_as_str(const mpz_t *i, mp_uint_t base) {
+ char *s = m_new(char, mpz_as_str_size(i, base, NULL, '\0'));
+ mpz_as_str_inpl(i, base, NULL, 'a', '\0', s);
+ return s;
+}
+#endif
+
+// assumes enough space as calculated by mpz_as_str_size
+// returns length of string, not including null byte
+mp_uint_t mpz_as_str_inpl(const mpz_t *i, mp_uint_t base, const char *prefix, char base_char, char comma, char *str) {
+ if (str == NULL || base < 2 || base > 32) {
+ str[0] = 0;
+ return 0;
+ }
+
+ mp_uint_t ilen = i->len;
+
+ char *s = str;
+ if (ilen == 0) {
+ if (prefix) {
+ while (*prefix)
+ *s++ = *prefix++;
+ }
+ *s++ = '0';
+ *s = '\0';
+ return s - str;
+ }
+
+ // make a copy of mpz digits, so we can do the div/mod calculation
+ mpz_dig_t *dig = m_new(mpz_dig_t, ilen);
+ memcpy(dig, i->dig, ilen * sizeof(mpz_dig_t));
+
+ // convert
+ char *last_comma = str;
+ bool done;
+ do {
+ mpz_dig_t *d = dig + ilen;
+ mpz_dbl_dig_t a = 0;
+
+ // compute next remainder
+ while (--d >= dig) {
+ a = (a << DIG_SIZE) | *d;
+ *d = a / base;
+ a %= base;
+ }
+
+ // convert to character
+ a += '0';
+ if (a > '9') {
+ a += base_char - '9' - 1;
+ }
+ *s++ = a;
+
+ // check if number is zero
+ done = true;
+ for (d = dig; d < dig + ilen; ++d) {
+ if (*d != 0) {
+ done = false;
+ break;
+ }
+ }
+ if (comma && (s - last_comma) == 3) {
+ *s++ = comma;
+ last_comma = s;
+ }
+ }
+ while (!done);
+
+ // free the copy of the digits array
+ m_del(mpz_dig_t, dig, ilen);
+
+ if (prefix) {
+ const char *p = &prefix[strlen(prefix)];
+ while (p > prefix) {
+ *s++ = *--p;
+ }
+ }
+ if (i->neg != 0) {
+ *s++ = '-';
+ }
+
+ // reverse string
+ for (char *u = str, *v = s - 1; u < v; ++u, --v) {
+ char temp = *u;
+ *u = *v;
+ *v = temp;
+ }
+
+ *s = '\0'; // null termination
+
+ return s - str;
+}
+
+#endif // MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/mpz.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,140 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_MPZ_H__
+#define __MICROPY_INCLUDED_PY_MPZ_H__
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+// This mpz module implements arbitrary precision integers.
+//
+// The storage for each digit is defined by mpz_dig_t. The actual number of
+// bits in mpz_dig_t that are used is defined by MPZ_DIG_SIZE. The machine must
+// also provide a type that is twice as wide as mpz_dig_t, in both signed and
+// unsigned versions.
+//
+// MPZ_DIG_SIZE can be between 4 and 8*sizeof(mpz_dig_t), but it makes most
+// sense to have it as large as possible. If MPZ_DIG_SIZE is not already
+// defined then it is auto-detected below, depending on the machine. The types
+// are then set based on the value of MPZ_DIG_SIZE (although they can be freely
+// changed so long as the constraints mentioned above are met).
+
+#ifndef MPZ_DIG_SIZE
+ #if defined(__x86_64__) || defined(_WIN64)
+ // 64-bit machine, using 32-bit storage for digits
+ #define MPZ_DIG_SIZE (32)
+ #else
+ // default: 32-bit machine, using 16-bit storage for digits
+ #define MPZ_DIG_SIZE (16)
+ #endif
+#endif
+
+#if MPZ_DIG_SIZE > 16
+typedef uint32_t mpz_dig_t;
+typedef uint64_t mpz_dbl_dig_t;
+typedef int64_t mpz_dbl_dig_signed_t;
+#elif MPZ_DIG_SIZE > 8
+typedef uint16_t mpz_dig_t;
+typedef uint32_t mpz_dbl_dig_t;
+typedef int32_t mpz_dbl_dig_signed_t;
+#elif MPZ_DIG_SIZE > 4
+typedef uint8_t mpz_dig_t;
+typedef uint16_t mpz_dbl_dig_t;
+typedef int16_t mpz_dbl_dig_signed_t;
+#else
+typedef uint8_t mpz_dig_t;
+typedef uint8_t mpz_dbl_dig_t;
+typedef int8_t mpz_dbl_dig_signed_t;
+#endif
+
+#ifdef _WIN64
+ #ifdef __MINGW32__
+ #define MPZ_LONG_1 1LL
+ #else
+ #define MPZ_LONG_1 1i64
+ #endif
+#else
+ #define MPZ_LONG_1 1L
+#endif
+
+// these define the maximum storage needed to hold an int or long long
+#define MPZ_NUM_DIG_FOR_INT ((sizeof(mp_int_t) * 8 + MPZ_DIG_SIZE - 1) / MPZ_DIG_SIZE)
+#define MPZ_NUM_DIG_FOR_LL ((sizeof(long long) * 8 + MPZ_DIG_SIZE - 1) / MPZ_DIG_SIZE)
+
+typedef struct _mpz_t {
+ mp_uint_t neg : 1;
+ mp_uint_t fixed_dig : 1;
+ mp_uint_t alloc : BITS_PER_WORD - 2;
+ mp_uint_t len;
+ mpz_dig_t *dig;
+} mpz_t;
+
+// convenience macro to declare an mpz with a digit array from the stack, initialised by an integer
+#define MPZ_CONST_INT(z, val) mpz_t z; mpz_dig_t z ## _digits[MPZ_NUM_DIG_FOR_INT]; mpz_init_fixed_from_int(&z, z_digits, MPZ_NUM_DIG_FOR_INT, val);
+
+void mpz_init_zero(mpz_t *z);
+void mpz_init_from_int(mpz_t *z, mp_int_t val);
+void mpz_init_fixed_from_int(mpz_t *z, mpz_dig_t *dig, mp_uint_t dig_alloc, mp_int_t val);
+void mpz_deinit(mpz_t *z);
+
+void mpz_set(mpz_t *dest, const mpz_t *src);
+void mpz_set_from_int(mpz_t *z, mp_int_t src);
+void mpz_set_from_ll(mpz_t *z, long long i, bool is_signed);
+#if MICROPY_PY_BUILTINS_FLOAT
+void mpz_set_from_float(mpz_t *z, mp_float_t src);
+#endif
+mp_uint_t mpz_set_from_str(mpz_t *z, const char *str, mp_uint_t len, bool neg, mp_uint_t base);
+
+bool mpz_is_zero(const mpz_t *z);
+int mpz_cmp(const mpz_t *lhs, const mpz_t *rhs);
+
+void mpz_abs_inpl(mpz_t *dest, const mpz_t *z);
+void mpz_neg_inpl(mpz_t *dest, const mpz_t *z);
+void mpz_not_inpl(mpz_t *dest, const mpz_t *z);
+void mpz_shl_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs);
+void mpz_shr_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs);
+void mpz_add_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_sub_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_mul_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_pow_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_and_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_divmod_inpl(mpz_t *dest_quo, mpz_t *dest_rem, const mpz_t *lhs, const mpz_t *rhs);
+
+mp_int_t mpz_hash(const mpz_t *z);
+bool mpz_as_int_checked(const mpz_t *z, mp_int_t *value);
+bool mpz_as_uint_checked(const mpz_t *z, mp_uint_t *value);
+void mpz_as_bytes(const mpz_t *z, bool big_endian, mp_uint_t len, byte *buf);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mpz_as_float(const mpz_t *z);
+#endif
+mp_uint_t mpz_as_str_size(const mpz_t *i, mp_uint_t base, const char *prefix, char comma);
+mp_uint_t mpz_as_str_inpl(const mpz_t *z, mp_uint_t base, const char *prefix, char base_char, char comma, char *str);
+
+#endif // __MICROPY_INCLUDED_PY_MPZ_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/nativeglue.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,156 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/emitglue.h"
+#include "py/bc.h"
+
+#if 0 // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_EMIT_NATIVE
+
+// convert a Micro Python object to a valid native value based on type
+mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type) {
+ DEBUG_printf("mp_convert_obj_to_native(%p, " UINT_FMT ")\n", obj, type);
+ switch (type & 0xf) {
+ case MP_NATIVE_TYPE_OBJ: return (mp_uint_t)obj;
+ case MP_NATIVE_TYPE_BOOL:
+ case MP_NATIVE_TYPE_INT:
+ case MP_NATIVE_TYPE_UINT: return mp_obj_get_int_truncated(obj);
+ default: { // cast obj to a pointer
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_RW)) {
+ return (mp_uint_t)bufinfo.buf;
+ } else {
+ // assume obj is an integer that represents an address
+ return mp_obj_get_int_truncated(obj);
+ }
+ }
+ }
+}
+
+#endif
+
+#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB
+
+// convert a native value to a Micro Python object based on type
+mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type) {
+ DEBUG_printf("mp_convert_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type);
+ switch (type & 0xf) {
+ case MP_NATIVE_TYPE_OBJ: return (mp_obj_t)val;
+ case MP_NATIVE_TYPE_BOOL: return mp_obj_new_bool(val);
+ case MP_NATIVE_TYPE_INT: return mp_obj_new_int(val);
+ case MP_NATIVE_TYPE_UINT: return mp_obj_new_int_from_uint(val);
+ default: // a pointer
+ // we return just the value of the pointer as an integer
+ return mp_obj_new_int_from_uint(val);
+ }
+}
+
+#endif
+
+#if MICROPY_EMIT_NATIVE
+
+// wrapper that accepts n_args and n_kw in one argument
+// (native emitter can only pass at most 3 arguments to a function)
+mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, mp_uint_t n_args_kw, const mp_obj_t *args) {
+ return mp_call_function_n_kw(fun_in, n_args_kw & 0xff, (n_args_kw >> 8) & 0xff, args);
+}
+
+// wrapper that makes raise obj and raises it
+// END_FINALLY opcode requires that we don't raise if o==None
+void mp_native_raise(mp_obj_t o) {
+ if (o != mp_const_none) {
+ nlr_raise(mp_make_raise_obj(o));
+ }
+}
+
+// these must correspond to the respective enum in runtime0.h
+void *const mp_fun_table[MP_F_NUMBER_OF] = {
+ mp_convert_obj_to_native,
+ mp_convert_native_to_obj,
+ mp_load_name,
+ mp_load_global,
+ mp_load_build_class,
+ mp_load_attr,
+ mp_load_method,
+ mp_store_name,
+ mp_store_global,
+ mp_store_attr,
+ mp_obj_subscr,
+ mp_obj_is_true,
+ mp_unary_op,
+ mp_binary_op,
+ mp_obj_new_tuple,
+ mp_obj_new_list,
+ mp_obj_list_append,
+ mp_obj_new_dict,
+ mp_obj_dict_store,
+#if MICROPY_PY_BUILTINS_SET
+ mp_obj_new_set,
+ mp_obj_set_store,
+#endif
+ mp_make_function_from_raw_code,
+ mp_native_call_function_n_kw,
+ mp_call_method_n_kw,
+ mp_call_method_n_kw_var,
+ mp_getiter,
+ mp_iternext,
+ nlr_push,
+ nlr_pop,
+ mp_native_raise,
+ mp_import_name,
+ mp_import_from,
+ mp_import_all,
+#if MICROPY_PY_BUILTINS_SLICE
+ mp_obj_new_slice,
+#endif
+ mp_unpack_sequence,
+ mp_unpack_ex,
+ mp_delete_name,
+ mp_delete_global,
+ mp_obj_new_cell,
+ mp_make_closure_from_raw_code,
+ mp_setup_code_state,
+};
+
+/*
+void mp_f_vector(mp_fun_kind_t fun_kind) {
+ (mp_f_table[fun_kind])();
+}
+*/
+
+#endif // MICROPY_EMIT_NATIVE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/nlr.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,115 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_NLR_H__
+#define __MICROPY_INCLUDED_PY_NLR_H__
+
+// non-local return
+// exception handling, basically a stack of setjmp/longjmp buffers
+
+#include <limits.h>
+#include <setjmp.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+
+typedef struct _nlr_buf_t nlr_buf_t;
+struct _nlr_buf_t {
+ // the entries here must all be machine word size
+ nlr_buf_t *prev;
+ void *ret_val; // always a concrete object (an exception instance)
+#if !defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP
+#if defined(__i386__)
+ void *regs[6];
+#elif defined(__x86_64__)
+ #if defined(__CYGWIN__)
+ void *regs[12];
+ #else
+ void *regs[8];
+ #endif
+#elif defined(__thumb2__) || defined(__thumb__) || defined(__arm__)
+ void *regs[10];
+#elif defined(__xtensa__)
+ void *regs[10];
+#else
+ #define MICROPY_NLR_SETJMP (1)
+ //#warning "No native NLR support for this arch, using setjmp implementation"
+#endif
+#endif
+
+#if MICROPY_NLR_SETJMP
+ jmp_buf jmpbuf;
+#endif
+};
+
+#if MICROPY_NLR_SETJMP
+#include "py/mpstate.h"
+
+NORETURN void nlr_setjmp_jump(void *val);
+// nlr_push() must be defined as a macro, because "The stack context will be
+// invalidated if the function which called setjmp() returns."
+#define nlr_push(buf) ((buf)->prev = MP_STATE_VM(nlr_top), MP_STATE_VM(nlr_top) = (buf), setjmp((buf)->jmpbuf))
+#define nlr_pop() { MP_STATE_VM(nlr_top) = MP_STATE_VM(nlr_top)->prev; }
+#define nlr_jump(val) nlr_setjmp_jump(val)
+#else
+unsigned int nlr_push(nlr_buf_t *);
+void nlr_pop(void);
+NORETURN void nlr_jump(void *val);
+#endif
+
+// This must be implemented by a port. It's called by nlr_jump
+// if no nlr buf has been pushed. It must not return, but rather
+// should bail out with a fatal error.
+void nlr_jump_fail(void *val);
+
+// use nlr_raise instead of nlr_jump so that debugging is easier
+#ifndef DEBUG
+#define nlr_raise(val) nlr_jump(MP_OBJ_TO_PTR(val))
+#else
+#include "mpstate.h"
+#define nlr_raise(val) \
+ do { \
+ /*printf("nlr_raise: nlr_top=%p\n", MP_STATE_VM(nlr_top)); \
+ fflush(stdout);*/ \
+ void *_val = MP_OBJ_TO_PTR(val); \
+ assert(_val != NULL); \
+ assert(mp_obj_is_exception_instance(val)); \
+ nlr_jump(_val); \
+ } while (0)
+
+#if !MICROPY_NLR_SETJMP
+#define nlr_push(val) \
+ assert(MP_STATE_VM(nlr_top) != val),nlr_push(val)
+
+/*
+#define nlr_push(val) \
+ printf("nlr_push: before: nlr_top=%p, val=%p\n", MP_STATE_VM(nlr_top), val),assert(MP_STATE_VM(nlr_top) != val),nlr_push(val)
+#endif
+*/
+#endif
+
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_NLR_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/nlrsetjmp.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/nlr.h"
+
+#if MICROPY_NLR_SETJMP
+
+void nlr_setjmp_jump(void *val) {
+ nlr_buf_t *buf = MP_STATE_VM(nlr_top);
+ MP_STATE_VM(nlr_top) = buf->prev;
+ buf->ret_val = val;
+ longjmp(buf->jmpbuf, 1);
+}
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/obj.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,508 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/objtype.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+#include "py/stream.h" // for mp_obj_print
+
+mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in) {
+ if (MP_OBJ_IS_SMALL_INT(o_in)) {
+ return (mp_obj_type_t*)&mp_type_int;
+ } else if (MP_OBJ_IS_QSTR(o_in)) {
+ return (mp_obj_type_t*)&mp_type_str;
+ #if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(o_in)) {
+ return (mp_obj_type_t*)&mp_type_float;
+ #endif
+ } else {
+ const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
+ return (mp_obj_type_t*)o->type;
+ }
+}
+
+const char *mp_obj_get_type_str(mp_const_obj_t o_in) {
+ return qstr_str(mp_obj_get_type(o_in)->name);
+}
+
+void mp_obj_print_helper(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ // There can be data structures nested too deep, or just recursive
+ MP_STACK_CHECK();
+#ifndef NDEBUG
+ if (o_in == MP_OBJ_NULL) {
+ mp_print_str(print, "(nil)");
+ return;
+ }
+#endif
+ mp_obj_type_t *type = mp_obj_get_type(o_in);
+ if (type->print != NULL) {
+ type->print((mp_print_t*)print, o_in, kind);
+ } else {
+ mp_printf(print, "<%q>", type->name);
+ }
+}
+
+void mp_obj_print(mp_obj_t o_in, mp_print_kind_t kind) {
+#if MICROPY_PY_IO
+ mp_obj_print_helper(&mp_sys_stdout_print, o_in, kind);
+#else
+ mp_obj_print_helper(&mp_plat_print, o_in, kind);
+#endif
+}
+
+// helper function to print an exception with traceback
+void mp_obj_print_exception(const mp_print_t *print, mp_obj_t exc) {
+ if (mp_obj_is_exception_instance(exc)) {
+ size_t n, *values;
+ mp_obj_exception_get_traceback(exc, &n, &values);
+ if (n > 0) {
+ assert(n % 3 == 0);
+ mp_print_str(print, "Traceback (most recent call last):\n");
+ for (int i = n - 3; i >= 0; i -= 3) {
+#if MICROPY_ENABLE_SOURCE_LINE
+ mp_printf(print, " File \"%q\", line %d", values[i], (int)values[i + 1]);
+#else
+ mp_printf(print, " File \"%q\"", values[i]);
+#endif
+ // the block name can be NULL if it's unknown
+ qstr block = values[i + 2];
+ if (block == MP_QSTR_NULL) {
+ mp_print_str(print, "\n");
+ } else {
+ mp_printf(print, ", in %q\n", block);
+ }
+ }
+ }
+ }
+ mp_obj_print_helper(print, exc, PRINT_EXC);
+ mp_print_str(print, "\n");
+}
+
+bool mp_obj_is_true(mp_obj_t arg) {
+ if (arg == mp_const_false) {
+ return 0;
+ } else if (arg == mp_const_true) {
+ return 1;
+ } else if (arg == mp_const_none) {
+ return 0;
+ } else if (MP_OBJ_IS_SMALL_INT(arg)) {
+ if (MP_OBJ_SMALL_INT_VALUE(arg) == 0) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else {
+ mp_obj_type_t *type = mp_obj_get_type(arg);
+ if (type->unary_op != NULL) {
+ mp_obj_t result = type->unary_op(MP_UNARY_OP_BOOL, arg);
+ if (result != MP_OBJ_NULL) {
+ return result == mp_const_true;
+ }
+ }
+
+ mp_obj_t len = mp_obj_len_maybe(arg);
+ if (len != MP_OBJ_NULL) {
+ // obj has a length, truth determined if len != 0
+ return len != MP_OBJ_NEW_SMALL_INT(0);
+ } else {
+ // any other obj is true per Python semantics
+ return 1;
+ }
+ }
+}
+
+bool mp_obj_is_callable(mp_obj_t o_in) {
+ mp_call_fun_t call = mp_obj_get_type(o_in)->call;
+ if (call != mp_obj_instance_call) {
+ return call != NULL;
+ }
+ return mp_obj_instance_is_callable(o_in);
+}
+
+// This function implements the '==' operator (and so the inverse of '!=').
+//
+// From the Python language reference:
+// (https://docs.python.org/3/reference/expressions.html#not-in)
+// "The objects need not have the same type. If both are numbers, they are converted
+// to a common type. Otherwise, the == and != operators always consider objects of
+// different types to be unequal."
+//
+// This means that False==0 and True==1 are true expressions.
+//
+// Furthermore, from the v3.4.2 code for object.c: "Practical amendments: If rich
+// comparison returns NotImplemented, == and != are decided by comparing the object
+// pointer."
+bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) {
+ if (o1 == o2) {
+ return true;
+ }
+ if (o1 == mp_const_none || o2 == mp_const_none) {
+ return false;
+ }
+
+ // fast path for small ints
+ if (MP_OBJ_IS_SMALL_INT(o1)) {
+ if (MP_OBJ_IS_SMALL_INT(o2)) {
+ // both SMALL_INT, and not equal if we get here
+ return false;
+ } else {
+ mp_obj_t temp = o2; o2 = o1; o1 = temp;
+ // o2 is now the SMALL_INT, o1 is not
+ // fall through to generic op
+ }
+ }
+
+ // fast path for strings
+ if (MP_OBJ_IS_STR(o1)) {
+ if (MP_OBJ_IS_STR(o2)) {
+ // both strings, use special function
+ return mp_obj_str_equal(o1, o2);
+ } else {
+ // a string is never equal to anything else
+ return false;
+ }
+ } else if (MP_OBJ_IS_STR(o2)) {
+ // o1 is not a string (else caught above), so the objects are not equal
+ return false;
+ }
+
+ // generic type, call binary_op(MP_BINARY_OP_EQUAL)
+ mp_obj_type_t *type = mp_obj_get_type(o1);
+ if (type->binary_op != NULL) {
+ mp_obj_t r = type->binary_op(MP_BINARY_OP_EQUAL, o1, o2);
+ if (r != MP_OBJ_NULL) {
+ return r == mp_const_true ? true : false;
+ }
+ }
+
+ // equality not implemented, and objects are not the same object, so
+ // they are defined as not equal
+ return false;
+}
+
+mp_int_t mp_obj_get_int(mp_const_obj_t arg) {
+ // This function essentially performs implicit type conversion to int
+ // Note that Python does NOT provide implicit type conversion from
+ // float to int in the core expression language, try some_list[1.0].
+ if (arg == mp_const_false) {
+ return 0;
+ } else if (arg == mp_const_true) {
+ return 1;
+ } else if (MP_OBJ_IS_SMALL_INT(arg)) {
+ return MP_OBJ_SMALL_INT_VALUE(arg);
+ } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) {
+ return mp_obj_int_get_checked(arg);
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "can't convert to int"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "can't convert %s to int", mp_obj_get_type_str(arg)));
+ }
+ }
+}
+
+mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg) {
+ if (MP_OBJ_IS_INT(arg)) {
+ return mp_obj_int_get_truncated(arg);
+ } else {
+ return mp_obj_get_int(arg);
+ }
+}
+
+// returns false if arg is not of integral type
+// returns true and sets *value if it is of integral type
+// can throw OverflowError if arg is of integral type, but doesn't fit in a mp_int_t
+bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value) {
+ if (arg == mp_const_false) {
+ *value = 0;
+ } else if (arg == mp_const_true) {
+ *value = 1;
+ } else if (MP_OBJ_IS_SMALL_INT(arg)) {
+ *value = MP_OBJ_SMALL_INT_VALUE(arg);
+ } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) {
+ *value = mp_obj_int_get_checked(arg);
+ } else {
+ return false;
+ }
+ return true;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_get_float(mp_obj_t arg) {
+ if (arg == mp_const_false) {
+ return 0;
+ } else if (arg == mp_const_true) {
+ return 1;
+ } else if (MP_OBJ_IS_SMALL_INT(arg)) {
+ return MP_OBJ_SMALL_INT_VALUE(arg);
+ } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) {
+ return mp_obj_int_as_float(arg);
+ } else if (mp_obj_is_float(arg)) {
+ return mp_obj_float_get(arg);
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "can't convert to float"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "can't convert %s to float", mp_obj_get_type_str(arg)));
+ }
+ }
+}
+
+#if MICROPY_PY_BUILTINS_COMPLEX
+void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
+ if (arg == mp_const_false) {
+ *real = 0;
+ *imag = 0;
+ } else if (arg == mp_const_true) {
+ *real = 1;
+ *imag = 0;
+ } else if (MP_OBJ_IS_SMALL_INT(arg)) {
+ *real = MP_OBJ_SMALL_INT_VALUE(arg);
+ *imag = 0;
+ } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) {
+ *real = mp_obj_int_as_float(arg);
+ *imag = 0;
+ } else if (mp_obj_is_float(arg)) {
+ *real = mp_obj_float_get(arg);
+ *imag = 0;
+ } else if (MP_OBJ_IS_TYPE(arg, &mp_type_complex)) {
+ mp_obj_complex_get(arg, real, imag);
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "can't convert to complex"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "can't convert %s to complex", mp_obj_get_type_str(arg)));
+ }
+ }
+}
+#endif
+#endif
+
+void mp_obj_get_array(mp_obj_t o, mp_uint_t *len, mp_obj_t **items) {
+ if (MP_OBJ_IS_TYPE(o, &mp_type_tuple)) {
+ mp_obj_tuple_get(o, len, items);
+ } else if (MP_OBJ_IS_TYPE(o, &mp_type_list)) {
+ mp_obj_list_get(o, len, items);
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "expected tuple/list"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "object '%s' is not a tuple or list", mp_obj_get_type_str(o)));
+ }
+ }
+}
+
+void mp_obj_get_array_fixed_n(mp_obj_t o, mp_uint_t len, mp_obj_t **items) {
+ mp_uint_t seq_len;
+ mp_obj_get_array(o, &seq_len, items);
+ if (seq_len != len) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "tuple/list has wrong length"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "requested length %d but object has length %d", (int)len, (int)seq_len));
+ }
+ }
+}
+
+// is_slice determines whether the index is a slice index
+mp_uint_t mp_get_index(const mp_obj_type_t *type, mp_uint_t len, mp_obj_t index, bool is_slice) {
+ mp_int_t i;
+ if (MP_OBJ_IS_SMALL_INT(index)) {
+ i = MP_OBJ_SMALL_INT_VALUE(index);
+ } else if (!mp_obj_get_int_maybe(index, &i)) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "indices must be integers"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "%q indices must be integers, not %s",
+ type->name, mp_obj_get_type_str(index)));
+ }
+ }
+
+ if (i < 0) {
+ i += len;
+ }
+ if (is_slice) {
+ if (i < 0) {
+ i = 0;
+ } else if ((mp_uint_t)i > len) {
+ i = len;
+ }
+ } else {
+ if (i < 0 || (mp_uint_t)i >= len) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_IndexError, "index out of range"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_IndexError,
+ "%q index out of range", type->name));
+ }
+ }
+ }
+ return i;
+}
+
+mp_obj_t mp_obj_id(mp_obj_t o_in) {
+ mp_int_t id = (mp_int_t)o_in;
+ if (!MP_OBJ_IS_OBJ(o_in)) {
+ return mp_obj_new_int(id);
+ } else if (id >= 0) {
+ // Many OSes and CPUs have affinity for putting "user" memories
+ // into low half of address space, and "system" into upper half.
+ // We're going to take advantage of that and return small int
+ // (signed) for such "user" addresses.
+ return MP_OBJ_NEW_SMALL_INT(id);
+ } else {
+ // If that didn't work, well, let's return long int, just as
+ // a (big) positve value, so it will never clash with the range
+ // of small int returned in previous case.
+ return mp_obj_new_int_from_uint((mp_uint_t)id);
+ }
+}
+
+// will raise a TypeError if object has no length
+mp_obj_t mp_obj_len(mp_obj_t o_in) {
+ mp_obj_t len = mp_obj_len_maybe(o_in);
+ if (len == MP_OBJ_NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object has no len"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "object of type '%s' has no len()", mp_obj_get_type_str(o_in)));
+ }
+ } else {
+ return len;
+ }
+}
+
+// may return MP_OBJ_NULL
+mp_obj_t mp_obj_len_maybe(mp_obj_t o_in) {
+ if (
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+ // It's simple - unicode is slow, non-unicode is fast
+ MP_OBJ_IS_STR(o_in) ||
+#endif
+ MP_OBJ_IS_TYPE(o_in, &mp_type_bytes)) {
+ GET_STR_LEN(o_in, l);
+ return MP_OBJ_NEW_SMALL_INT(l);
+ } else {
+ mp_obj_type_t *type = mp_obj_get_type(o_in);
+ if (type->unary_op != NULL) {
+ return type->unary_op(MP_UNARY_OP_LEN, o_in);
+ } else {
+ return MP_OBJ_NULL;
+ }
+ }
+}
+
+mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value) {
+ mp_obj_type_t *type = mp_obj_get_type(base);
+ if (type->subscr != NULL) {
+ mp_obj_t ret = type->subscr(base, index, value);
+ if (ret != MP_OBJ_NULL) {
+ return ret;
+ }
+ // TODO: call base classes here?
+ }
+ if (value == MP_OBJ_NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object does not support item deletion"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object does not support item deletion", mp_obj_get_type_str(base)));
+ }
+ } else if (value == MP_OBJ_SENTINEL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "object is not subscriptable"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not subscriptable", mp_obj_get_type_str(base)));
+ }
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object does not support item assignment"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object does not support item assignment", mp_obj_get_type_str(base)));
+ }
+ }
+}
+
+// Return input argument. Useful as .getiter for objects which are
+// their own iterators, etc.
+mp_obj_t mp_identity(mp_obj_t self) {
+ return self;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_identity_obj, mp_identity);
+
+bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ mp_obj_type_t *type = mp_obj_get_type(obj);
+ if (type->buffer_p.get_buffer == NULL) {
+ return false;
+ }
+ int ret = type->buffer_p.get_buffer(obj, bufinfo, flags);
+ if (ret != 0) {
+ return false;
+ }
+ return true;
+}
+
+void mp_get_buffer_raise(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ if (!mp_get_buffer(obj, bufinfo, flags)) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "object with buffer protocol required"));
+ }
+}
+
+mp_obj_t mp_generic_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ switch (op) {
+ case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT((mp_uint_t)o_in);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/obj.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,820 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJ_H__
+#define __MICROPY_INCLUDED_PY_OBJ_H__
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/qstr.h"
+#include "py/mpprint.h"
+
+// All Micro Python objects are at least this type
+// The bit-size must be at least pointer size
+
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+typedef uint64_t mp_obj_t;
+typedef uint64_t mp_const_obj_t;
+#else
+typedef machine_ptr_t mp_obj_t;
+typedef machine_const_ptr_t mp_const_obj_t;
+#endif
+
+// This mp_obj_type_t struct is a concrete MicroPython object which holds info
+// about a type. See below for actual definition of the struct.
+typedef struct _mp_obj_type_t mp_obj_type_t;
+
+// Anything that wants to be a concrete MicroPython object must have mp_obj_base_t
+// as its first member (small ints, qstr objs and inline floats are not concrete).
+struct _mp_obj_base_t {
+ const mp_obj_type_t *type MICROPY_OBJ_BASE_ALIGNMENT;
+};
+typedef struct _mp_obj_base_t mp_obj_base_t;
+
+// These fake objects are used to indicate certain things in arguments or return
+// values, and should only be used when explicitly allowed.
+//
+// - MP_OBJ_NULL : used to indicate the absence of an object, or unsupported operation.
+// - MP_OBJ_STOP_ITERATION : used instead of throwing a StopIteration, for efficiency.
+// - MP_OBJ_SENTINEL : used for various internal purposes where one needs
+// an object which is unique from all other objects, including MP_OBJ_NULL.
+//
+// For debugging purposes they are all different. For non-debug mode, we alias
+// as many as we can to MP_OBJ_NULL because it's cheaper to load/compare 0.
+
+#ifdef NDEBUG
+#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0))
+#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)0))
+#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)4))
+#else
+#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0))
+#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)4))
+#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)8))
+#endif
+
+// These macros/inline functions operate on objects and depend on the
+// particular object representation. They are used to query, pack and
+// unpack small ints, qstrs and full object pointers.
+
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A
+
+static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 1) != 0); }
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_int_t)(small_int)) << 1) | 1))
+
+static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 3) == 2); }
+#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 2)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 2))
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#define mp_const_float_e MP_ROM_PTR(&mp_const_float_e_obj)
+#define mp_const_float_pi MP_ROM_PTR(&mp_const_float_pi_obj)
+extern const struct _mp_obj_float_t mp_const_float_e_obj;
+extern const struct _mp_obj_float_t mp_const_float_pi_obj;
+
+#define mp_obj_is_float(o) MP_OBJ_IS_TYPE((o), &mp_type_float)
+mp_float_t mp_obj_float_get(mp_obj_t self_in);
+mp_obj_t mp_obj_new_float(mp_float_t value);
+#endif
+
+static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 3) == 0); }
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_B
+
+static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 3) == 1); }
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 2)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_int_t)(small_int)) << 2) | 1))
+
+static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 3) == 3); }
+#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 2)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 3))
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#define mp_const_float_e MP_ROM_PTR(&mp_const_float_e_obj)
+#define mp_const_float_pi MP_ROM_PTR(&mp_const_float_pi_obj)
+extern const struct _mp_obj_float_t mp_const_float_e_obj;
+extern const struct _mp_obj_float_t mp_const_float_pi_obj;
+
+#define mp_obj_is_float(o) MP_OBJ_IS_TYPE((o), &mp_type_float)
+mp_float_t mp_obj_float_get(mp_obj_t self_in);
+mp_obj_t mp_obj_new_float(mp_float_t value);
+#endif
+
+static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 1) == 0); }
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+
+static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 1) != 0); }
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_int_t)(small_int)) << 1) | 1))
+
+#define mp_const_float_e MP_ROM_PTR((mp_obj_t)(((0x402df854 & ~3) | 2) + 0x80800000))
+#define mp_const_float_pi MP_ROM_PTR((mp_obj_t)(((0x40490fdb & ~3) | 2) + 0x80800000))
+
+static inline bool mp_obj_is_float(mp_const_obj_t o)
+ { return (((mp_uint_t)(o)) & 3) == 2 && (((mp_uint_t)(o)) & 0xff800007) != 0x00000006; }
+static inline mp_float_t mp_obj_float_get(mp_const_obj_t o) {
+ union {
+ mp_float_t f;
+ mp_uint_t u;
+ } num = {.u = ((mp_uint_t)o - 0x80800000) & ~3};
+ return num.f;
+}
+static inline mp_obj_t mp_obj_new_float(mp_float_t f) {
+ union {
+ mp_float_t f;
+ mp_uint_t u;
+ } num = {.f = f};
+ return (mp_obj_t)(((num.u & ~0x3) | 2) + 0x80800000);
+}
+
+static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o)
+ { return (((mp_uint_t)(o)) & 0xff800007) == 0x00000006; }
+#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 0x00000006))
+
+static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 3) == 0); }
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+
+static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 0xffff000000000000) == 0x0001000000000000); }
+#define MP_OBJ_SMALL_INT_VALUE(o) (((intptr_t)(o)) >> 1)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)(((uintptr_t)(small_int)) << 1) | 0x0001000000000001)
+
+static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o)
+ { return ((((mp_int_t)(o)) & 0xffff000000000000) == 0x0002000000000000); }
+#define MP_OBJ_QSTR_VALUE(o) ((((uint32_t)(o)) >> 1) & 0xffffffff)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 1) | 0x0002000000000001))
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#define mp_const_float_e {((mp_obj_t)((uint64_t)0x4005bf0a8b125769 + 0x8004000000000000))}
+#define mp_const_float_pi {((mp_obj_t)((uint64_t)0x400921fb54442d18 + 0x8004000000000000))}
+
+static inline bool mp_obj_is_float(mp_const_obj_t o) {
+ return ((uint64_t)(o) & 0xfffc000000000000) != 0;
+}
+static inline mp_float_t mp_obj_float_get(mp_const_obj_t o) {
+ union {
+ mp_float_t f;
+ uint64_t r;
+ } num = {.r = o - 0x8004000000000000};
+ return num.f;
+}
+static inline mp_obj_t mp_obj_new_float(mp_float_t f) {
+ union {
+ mp_float_t f;
+ uint64_t r;
+ } num = {.f = f};
+ return num.r + 0x8004000000000000;
+}
+#endif
+
+static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o)
+ { return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0000000000000000); }
+#define MP_OBJ_TO_PTR(o) ((void*)(uintptr_t)(o))
+#define MP_OBJ_FROM_PTR(p) ((mp_obj_t)((uintptr_t)(p)))
+
+// rom object storage needs special handling to widen 32-bit pointer to 64-bits
+typedef union _mp_rom_obj_t { uint64_t u64; struct { const void *lo, *hi; } u32; } mp_rom_obj_t;
+#define MP_ROM_INT(i) {MP_OBJ_NEW_SMALL_INT(i)}
+#define MP_ROM_QSTR(q) {MP_OBJ_NEW_QSTR(q)}
+#if MP_ENDIANNESS_LITTLE
+#define MP_ROM_PTR(p) {.u32 = {.lo = (p), .hi = NULL}}
+#else
+#define MP_ROM_PTR(p) {.u32 = {.lo = NULL, .hi = (p)}}
+#endif
+
+#endif
+
+// Macros to convert between mp_obj_t and concrete object types.
+// These are identity operations in MicroPython, but ability to override
+// these operations are provided to experiment with other methods of
+// object representation and memory management.
+
+// Cast mp_obj_t to object pointer
+#ifndef MP_OBJ_TO_PTR
+#define MP_OBJ_TO_PTR(o) ((void*)o)
+#endif
+
+// Cast object pointer to mp_obj_t
+#ifndef MP_OBJ_FROM_PTR
+#define MP_OBJ_FROM_PTR(p) ((mp_obj_t)p)
+#endif
+
+// Macros to create objects that are stored in ROM.
+
+#ifndef MP_ROM_INT
+typedef mp_const_obj_t mp_rom_obj_t;
+#define MP_ROM_INT(i) MP_OBJ_NEW_SMALL_INT(i)
+#define MP_ROM_QSTR(q) MP_OBJ_NEW_QSTR(q)
+#define MP_ROM_PTR(p) (p)
+/* for testing
+typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t;
+#define MP_ROM_INT(i) {MP_OBJ_NEW_SMALL_INT(i)}
+#define MP_ROM_QSTR(q) {MP_OBJ_NEW_QSTR(q)}
+#define MP_ROM_PTR(p) {.o = p}
+*/
+#endif
+
+// The macros below are derived from the ones above and are used to
+// check for more specific object types.
+
+#define MP_OBJ_IS_TYPE(o, t) (MP_OBJ_IS_OBJ(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type == (t))) // this does not work for checking int, str or fun; use below macros for that
+#define MP_OBJ_IS_INT(o) (MP_OBJ_IS_SMALL_INT(o) || MP_OBJ_IS_TYPE(o, &mp_type_int))
+#define MP_OBJ_IS_STR(o) (MP_OBJ_IS_QSTR(o) || MP_OBJ_IS_TYPE(o, &mp_type_str))
+#define MP_OBJ_IS_STR_OR_BYTES(o) (MP_OBJ_IS_QSTR(o) || (MP_OBJ_IS_OBJ(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->binary_op == mp_obj_str_binary_op))
+#define MP_OBJ_IS_FUN(o) (MP_OBJ_IS_OBJ(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->name == MP_QSTR_function))
+
+// Note: inline functions sometimes use much more code space than the
+// equivalent macros, depending on the compiler.
+//static inline bool MP_OBJ_IS_TYPE(mp_const_obj_t o, const mp_obj_type_t *t) { return (MP_OBJ_IS_OBJ(o) && (((mp_obj_base_t*)(o))->type == (t))); } // this does not work for checking a string, use below macro for that
+//static inline bool MP_OBJ_IS_INT(mp_const_obj_t o) { return (MP_OBJ_IS_SMALL_INT(o) || MP_OBJ_IS_TYPE(o, &mp_type_int)); } // returns true if o is a small int or long int
+// Need to forward declare these for the inline function to compile.
+extern const mp_obj_type_t mp_type_int;
+extern const mp_obj_type_t mp_type_bool;
+static inline bool mp_obj_is_integer(mp_const_obj_t o) { return MP_OBJ_IS_INT(o) || MP_OBJ_IS_TYPE(o, &mp_type_bool); } // returns true if o is bool, small int or long int
+//static inline bool MP_OBJ_IS_STR(mp_const_obj_t o) { return (MP_OBJ_IS_QSTR(o) || MP_OBJ_IS_TYPE(o, &mp_type_str)); }
+
+
+// These macros are used to declare and define constant function objects
+// You can put "static" in front of the definitions to make them local
+
+#define MP_DECLARE_CONST_FUN_OBJ(obj_name) extern const mp_obj_fun_builtin_t obj_name
+
+#define MP_DEFINE_CONST_FUN_OBJ_0(obj_name, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, false, 0, 0, .fun._0 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_1(obj_name, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, false, 1, 1, .fun._1 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_2(obj_name, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, false, 2, 2, .fun._2 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_3(obj_name, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, false, 3, 3, .fun._3 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_VAR(obj_name, n_args_min, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, false, n_args_min, MP_OBJ_FUN_ARGS_MAX, .fun.var = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(obj_name, n_args_min, n_args_max, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, false, n_args_min, n_args_max, .fun.var = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_KW(obj_name, n_args_min, fun_name) \
+ const mp_obj_fun_builtin_t obj_name = \
+ {{&mp_type_fun_builtin}, true, n_args_min, MP_OBJ_FUN_ARGS_MAX, .fun.kw = fun_name}
+
+// These macros are used to define constant map/dict objects
+// You can put "static" in front of the definition to make it local
+
+#define MP_DEFINE_CONST_MAP(map_name, table_name) \
+ const mp_map_t map_name = { \
+ .all_keys_are_qstrs = 1, \
+ .is_fixed = 1, \
+ .is_ordered = 1, \
+ .used = MP_ARRAY_SIZE(table_name), \
+ .alloc = MP_ARRAY_SIZE(table_name), \
+ .table = (mp_map_elem_t*)(mp_rom_map_elem_t*)table_name, \
+ }
+
+#define MP_DEFINE_CONST_DICT(dict_name, table_name) \
+ const mp_obj_dict_t dict_name = { \
+ .base = {&mp_type_dict}, \
+ .map = { \
+ .all_keys_are_qstrs = 1, \
+ .is_fixed = 1, \
+ .is_ordered = 1, \
+ .used = MP_ARRAY_SIZE(table_name), \
+ .alloc = MP_ARRAY_SIZE(table_name), \
+ .table = (mp_map_elem_t*)(mp_rom_map_elem_t*)table_name, \
+ }, \
+ }
+
+// These macros are used to declare and define constant staticmethond and classmethod objects
+// You can put "static" in front of the definitions to make them local
+
+#define MP_DECLARE_CONST_STATICMETHOD_OBJ(obj_name) extern const mp_rom_obj_static_class_method_t obj_name
+#define MP_DECLARE_CONST_CLASSMETHOD_OBJ(obj_name) extern const mp_rom_obj_static_class_method_t obj_name
+
+#define MP_DEFINE_CONST_STATICMETHOD_OBJ(obj_name, fun_name) const mp_rom_obj_static_class_method_t obj_name = {{&mp_type_staticmethod}, fun_name}
+#define MP_DEFINE_CONST_CLASSMETHOD_OBJ(obj_name, fun_name) const mp_rom_obj_static_class_method_t obj_name = {{&mp_type_classmethod}, fun_name}
+
+// Underlying map/hash table implementation (not dict object or map function)
+
+typedef struct _mp_map_elem_t {
+ mp_obj_t key;
+ mp_obj_t value;
+} mp_map_elem_t;
+
+typedef struct _mp_rom_map_elem_t {
+ mp_rom_obj_t key;
+ mp_rom_obj_t value;
+} mp_rom_map_elem_t;
+
+// TODO maybe have a truncated mp_map_t for fixed tables, since alloc=used
+// put alloc last in the structure, so the truncated version does not need it
+// this would save 1 ROM word for all ROM objects that have a locals_dict
+// would also need a trucated dict structure
+
+typedef struct _mp_map_t {
+ mp_uint_t all_keys_are_qstrs : 1;
+ mp_uint_t is_fixed : 1; // a fixed array that can't be modified; must also be ordered
+ mp_uint_t is_ordered : 1; // an ordered array
+ mp_uint_t used : (8 * sizeof(mp_uint_t) - 3);
+ mp_uint_t alloc;
+ mp_map_elem_t *table;
+} mp_map_t;
+
+// mp_set_lookup requires these constants to have the values they do
+typedef enum _mp_map_lookup_kind_t {
+ MP_MAP_LOOKUP = 0,
+ MP_MAP_LOOKUP_ADD_IF_NOT_FOUND = 1,
+ MP_MAP_LOOKUP_REMOVE_IF_FOUND = 2,
+ MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND = 3, // only valid for mp_set_lookup
+} mp_map_lookup_kind_t;
+
+extern const mp_map_t mp_const_empty_map;
+
+static inline bool MP_MAP_SLOT_IS_FILLED(const mp_map_t *map, mp_uint_t pos) { return ((map)->table[pos].key != MP_OBJ_NULL && (map)->table[pos].key != MP_OBJ_SENTINEL); }
+
+void mp_map_init(mp_map_t *map, mp_uint_t n);
+void mp_map_init_fixed_table(mp_map_t *map, mp_uint_t n, const mp_obj_t *table);
+mp_map_t *mp_map_new(mp_uint_t n);
+void mp_map_deinit(mp_map_t *map);
+void mp_map_free(mp_map_t *map);
+mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t lookup_kind);
+void mp_map_clear(mp_map_t *map);
+void mp_map_dump(mp_map_t *map);
+
+// Underlying set implementation (not set object)
+
+typedef struct _mp_set_t {
+ mp_uint_t alloc;
+ mp_uint_t used;
+ mp_obj_t *table;
+} mp_set_t;
+
+static inline bool MP_SET_SLOT_IS_FILLED(const mp_set_t *set, mp_uint_t pos) { return ((set)->table[pos] != MP_OBJ_NULL && (set)->table[pos] != MP_OBJ_SENTINEL); }
+
+void mp_set_init(mp_set_t *set, mp_uint_t n);
+mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind);
+mp_obj_t mp_set_remove_first(mp_set_t *set);
+void mp_set_clear(mp_set_t *set);
+
+// Type definitions for methods
+
+typedef mp_obj_t (*mp_fun_0_t)(void);
+typedef mp_obj_t (*mp_fun_1_t)(mp_obj_t);
+typedef mp_obj_t (*mp_fun_2_t)(mp_obj_t, mp_obj_t);
+typedef mp_obj_t (*mp_fun_3_t)(mp_obj_t, mp_obj_t, mp_obj_t);
+typedef mp_obj_t (*mp_fun_var_t)(size_t n, const mp_obj_t *);
+// mp_fun_kw_t takes mp_map_t* (and not const mp_map_t*) to ease passing
+// this arg to mp_map_lookup().
+typedef mp_obj_t (*mp_fun_kw_t)(size_t n, const mp_obj_t *, mp_map_t *);
+
+typedef enum {
+ PRINT_STR = 0,
+ PRINT_REPR = 1,
+ PRINT_EXC = 2, // Special format for printing exception in unhandled exception message
+ PRINT_JSON = 3,
+ PRINT_RAW = 4, // Special format for printing bytes as an undercorated string
+ PRINT_EXC_SUBCLASS = 0x80, // Internal flag for printing exception subclasses
+} mp_print_kind_t;
+
+typedef void (*mp_print_fun_t)(const mp_print_t *print, mp_obj_t o, mp_print_kind_t kind);
+typedef mp_obj_t (*mp_make_new_fun_t)(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args);
+typedef mp_obj_t (*mp_call_fun_t)(mp_obj_t fun, size_t n_args, size_t n_kw, const mp_obj_t *args);
+typedef mp_obj_t (*mp_unary_op_fun_t)(mp_uint_t op, mp_obj_t);
+typedef mp_obj_t (*mp_binary_op_fun_t)(mp_uint_t op, mp_obj_t, mp_obj_t);
+typedef void (*mp_attr_fun_t)(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
+typedef mp_obj_t (*mp_subscr_fun_t)(mp_obj_t self_in, mp_obj_t index, mp_obj_t value);
+
+// Buffer protocol
+typedef struct _mp_buffer_info_t {
+ // if we'd bother to support various versions of structure
+ // (with different number of fields), we can distinguish
+ // them with ver = sizeof(struct). Cons: overkill for *micro*?
+ //int ver; // ?
+
+ void *buf; // can be NULL if len == 0
+ size_t len; // in bytes
+ int typecode; // as per binary.h
+
+ // Rationale: to load arbitrary-sized sprites directly to LCD
+ // Cons: a bit adhoc usecase
+ // int stride;
+} mp_buffer_info_t;
+#define MP_BUFFER_READ (1)
+#define MP_BUFFER_WRITE (2)
+#define MP_BUFFER_RW (MP_BUFFER_READ | MP_BUFFER_WRITE)
+typedef struct _mp_buffer_p_t {
+ mp_int_t (*get_buffer)(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+} mp_buffer_p_t;
+bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+void mp_get_buffer_raise(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+
+// Stream protocol
+typedef struct _mp_stream_p_t {
+ // On error, functions should return MP_STREAM_ERROR and fill in *errcode (values
+ // are implementation-dependent, but will be exposed to user, e.g. via exception).
+ mp_uint_t (*read)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode);
+ mp_uint_t (*write)(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode);
+ mp_uint_t (*ioctl)(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode);
+ mp_uint_t is_text : 1; // default is bytes, set this for text stream
+} mp_stream_p_t;
+
+struct _mp_obj_type_t {
+ mp_obj_base_t base;
+ qstr name;
+ mp_print_fun_t print;
+ mp_make_new_fun_t make_new; // to make an instance of the type
+
+ mp_call_fun_t call;
+ mp_unary_op_fun_t unary_op; // can return MP_OBJ_NULL if op not supported
+ mp_binary_op_fun_t binary_op; // can return MP_OBJ_NULL if op not supported
+
+ // implements load, store and delete attribute
+ //
+ // dest[0] = MP_OBJ_NULL means load
+ // return: for fail, do nothing
+ // for attr, dest[0] = value
+ // for method, dest[0] = method, dest[1] = self
+ //
+ // dest[0,1] = {MP_OBJ_SENTINEL, MP_OBJ_NULL} means delete
+ // dest[0,1] = {MP_OBJ_SENTINEL, object} means store
+ // return: for fail, do nothing
+ // for success set dest[0] = MP_OBJ_NULL
+ mp_attr_fun_t attr;
+
+ mp_subscr_fun_t subscr; // implements load, store, delete subscripting
+ // value=MP_OBJ_NULL means delete, value=MP_OBJ_SENTINEL means load, else store
+ // can return MP_OBJ_NULL if op not supported
+
+ mp_fun_1_t getiter; // corresponds to __iter__ special method
+ mp_fun_1_t iternext; // may return MP_OBJ_STOP_ITERATION as an optimisation instead of raising StopIteration() (with no args)
+
+ mp_buffer_p_t buffer_p;
+ const mp_stream_p_t *stream_p;
+
+ // these are for dynamically created types (classes)
+ struct _mp_obj_tuple_t *bases_tuple;
+ struct _mp_obj_dict_t *locals_dict;
+
+ /*
+ What we might need to add here:
+
+ len str tuple list map
+ abs float complex
+ hash bool int none str
+ equal int str
+
+ unpack seq list tuple
+ */
+};
+
+// Constant types, globally accessible
+extern const mp_obj_type_t mp_type_type;
+extern const mp_obj_type_t mp_type_object;
+extern const mp_obj_type_t mp_type_NoneType;
+extern const mp_obj_type_t mp_type_bool;
+extern const mp_obj_type_t mp_type_int;
+extern const mp_obj_type_t mp_type_str;
+extern const mp_obj_type_t mp_type_bytes;
+extern const mp_obj_type_t mp_type_bytearray;
+extern const mp_obj_type_t mp_type_memoryview;
+extern const mp_obj_type_t mp_type_float;
+extern const mp_obj_type_t mp_type_complex;
+extern const mp_obj_type_t mp_type_tuple;
+extern const mp_obj_type_t mp_type_list;
+extern const mp_obj_type_t mp_type_map; // map (the python builtin, not the dict implementation detail)
+extern const mp_obj_type_t mp_type_enumerate;
+extern const mp_obj_type_t mp_type_filter;
+extern const mp_obj_type_t mp_type_dict;
+extern const mp_obj_type_t mp_type_ordereddict;
+extern const mp_obj_type_t mp_type_range;
+extern const mp_obj_type_t mp_type_set;
+extern const mp_obj_type_t mp_type_frozenset;
+extern const mp_obj_type_t mp_type_slice;
+extern const mp_obj_type_t mp_type_zip;
+extern const mp_obj_type_t mp_type_array;
+extern const mp_obj_type_t mp_type_super;
+extern const mp_obj_type_t mp_type_gen_instance;
+extern const mp_obj_type_t mp_type_fun_builtin;
+extern const mp_obj_type_t mp_type_fun_bc;
+extern const mp_obj_type_t mp_type_module;
+extern const mp_obj_type_t mp_type_staticmethod;
+extern const mp_obj_type_t mp_type_classmethod;
+extern const mp_obj_type_t mp_type_property;
+extern const mp_obj_type_t mp_type_stringio;
+extern const mp_obj_type_t mp_type_bytesio;
+extern const mp_obj_type_t mp_type_reversed;
+extern const mp_obj_type_t mp_type_polymorph_iter;
+
+// Exceptions
+extern const mp_obj_type_t mp_type_BaseException;
+extern const mp_obj_type_t mp_type_ArithmeticError;
+extern const mp_obj_type_t mp_type_AssertionError;
+extern const mp_obj_type_t mp_type_AttributeError;
+extern const mp_obj_type_t mp_type_EOFError;
+extern const mp_obj_type_t mp_type_Exception;
+extern const mp_obj_type_t mp_type_GeneratorExit;
+extern const mp_obj_type_t mp_type_ImportError;
+extern const mp_obj_type_t mp_type_IndentationError;
+extern const mp_obj_type_t mp_type_IndexError;
+extern const mp_obj_type_t mp_type_KeyboardInterrupt;
+extern const mp_obj_type_t mp_type_KeyError;
+extern const mp_obj_type_t mp_type_LookupError;
+extern const mp_obj_type_t mp_type_MemoryError;
+extern const mp_obj_type_t mp_type_NameError;
+extern const mp_obj_type_t mp_type_NotImplementedError;
+extern const mp_obj_type_t mp_type_OSError;
+extern const mp_obj_type_t mp_type_TimeoutError;
+extern const mp_obj_type_t mp_type_OverflowError;
+extern const mp_obj_type_t mp_type_RuntimeError;
+extern const mp_obj_type_t mp_type_StopIteration;
+extern const mp_obj_type_t mp_type_SyntaxError;
+extern const mp_obj_type_t mp_type_SystemExit;
+extern const mp_obj_type_t mp_type_TypeError;
+extern const mp_obj_type_t mp_type_UnicodeError;
+extern const mp_obj_type_t mp_type_ValueError;
+extern const mp_obj_type_t mp_type_ViperTypeError;
+extern const mp_obj_type_t mp_type_ZeroDivisionError;
+
+// Constant objects, globally accessible
+// The macros are for convenience only
+#define mp_const_none (MP_OBJ_FROM_PTR(&mp_const_none_obj))
+#define mp_const_false (MP_OBJ_FROM_PTR(&mp_const_false_obj))
+#define mp_const_true (MP_OBJ_FROM_PTR(&mp_const_true_obj))
+#define mp_const_empty_bytes (MP_OBJ_FROM_PTR(&mp_const_empty_bytes_obj))
+#define mp_const_empty_tuple (MP_OBJ_FROM_PTR(&mp_const_empty_tuple_obj))
+extern const struct _mp_obj_none_t mp_const_none_obj;
+extern const struct _mp_obj_bool_t mp_const_false_obj;
+extern const struct _mp_obj_bool_t mp_const_true_obj;
+extern const struct _mp_obj_str_t mp_const_empty_bytes_obj;
+extern const struct _mp_obj_tuple_t mp_const_empty_tuple_obj;
+extern const struct _mp_obj_singleton_t mp_const_ellipsis_obj;
+extern const struct _mp_obj_singleton_t mp_const_notimplemented_obj;
+extern const struct _mp_obj_exception_t mp_const_MemoryError_obj;
+extern const struct _mp_obj_exception_t mp_const_GeneratorExit_obj;
+
+// General API for objects
+
+mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict);
+mp_obj_t mp_obj_new_none(void);
+static inline mp_obj_t mp_obj_new_bool(mp_int_t x) { return x ? mp_const_true : mp_const_false; }
+mp_obj_t mp_obj_new_cell(mp_obj_t obj);
+mp_obj_t mp_obj_new_int(mp_int_t value);
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value);
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, mp_uint_t len, bool neg, mp_uint_t base);
+mp_obj_t mp_obj_new_int_from_ll(long long val); // this must return a multi-precision integer object (or raise an overflow exception)
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val); // this must return a multi-precision integer object (or raise an overflow exception)
+mp_obj_t mp_obj_new_str(const char* data, mp_uint_t len, bool make_qstr_if_not_already);
+mp_obj_t mp_obj_new_str_from_vstr(const mp_obj_type_t *type, vstr_t *vstr);
+mp_obj_t mp_obj_new_bytes(const byte* data, mp_uint_t len);
+mp_obj_t mp_obj_new_bytearray(mp_uint_t n, void *items);
+mp_obj_t mp_obj_new_bytearray_by_ref(mp_uint_t n, void *items);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_obj_t mp_obj_new_int_from_float(mp_float_t val);
+mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag);
+#endif
+mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type);
+mp_obj_t mp_obj_new_exception_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg);
+mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, mp_uint_t n_args, const mp_obj_t *args);
+mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg);
+mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
+mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table);
+mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table);
+mp_obj_t mp_obj_new_fun_viper(mp_uint_t n_args, void *fun_data, mp_uint_t type_sig);
+mp_obj_t mp_obj_new_fun_asm(mp_uint_t n_args, void *fun_data, mp_uint_t type_sig);
+mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun);
+mp_obj_t mp_obj_new_closure(mp_obj_t fun, mp_uint_t n_closed, const mp_obj_t *closed);
+mp_obj_t mp_obj_new_tuple(mp_uint_t n, const mp_obj_t *items);
+mp_obj_t mp_obj_new_list(mp_uint_t n, mp_obj_t *items);
+mp_obj_t mp_obj_new_dict(mp_uint_t n_args);
+mp_obj_t mp_obj_new_set(mp_uint_t n_args, mp_obj_t *items);
+mp_obj_t mp_obj_new_slice(mp_obj_t start, mp_obj_t stop, mp_obj_t step);
+mp_obj_t mp_obj_new_super(mp_obj_t type, mp_obj_t obj);
+mp_obj_t mp_obj_new_bound_meth(mp_obj_t meth, mp_obj_t self);
+mp_obj_t mp_obj_new_getitem_iter(mp_obj_t *args);
+mp_obj_t mp_obj_new_module(qstr module_name);
+mp_obj_t mp_obj_new_memoryview(byte typecode, mp_uint_t nitems, void *items);
+
+mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in);
+const char *mp_obj_get_type_str(mp_const_obj_t o_in);
+bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo); // arguments should be type objects
+mp_obj_t mp_instance_cast_to_native_base(mp_const_obj_t self_in, mp_const_obj_t native_type);
+
+void mp_obj_print_helper(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
+void mp_obj_print(mp_obj_t o, mp_print_kind_t kind);
+void mp_obj_print_exception(const mp_print_t *print, mp_obj_t exc);
+
+bool mp_obj_is_true(mp_obj_t arg);
+bool mp_obj_is_callable(mp_obj_t o_in);
+bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2);
+
+mp_int_t mp_obj_get_int(mp_const_obj_t arg);
+mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg);
+bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_get_float(mp_obj_t self_in);
+void mp_obj_get_complex(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
+#endif
+//qstr mp_obj_get_qstr(mp_obj_t arg);
+void mp_obj_get_array(mp_obj_t o, mp_uint_t *len, mp_obj_t **items);
+void mp_obj_get_array_fixed_n(mp_obj_t o, mp_uint_t len, mp_obj_t **items);
+mp_uint_t mp_get_index(const mp_obj_type_t *type, mp_uint_t len, mp_obj_t index, bool is_slice);
+mp_obj_t mp_obj_id(mp_obj_t o_in);
+mp_obj_t mp_obj_len(mp_obj_t o_in);
+mp_obj_t mp_obj_len_maybe(mp_obj_t o_in); // may return MP_OBJ_NULL
+mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t val);
+mp_obj_t mp_generic_unary_op(mp_uint_t op, mp_obj_t o_in);
+
+// cell
+mp_obj_t mp_obj_cell_get(mp_obj_t self_in);
+void mp_obj_cell_set(mp_obj_t self_in, mp_obj_t obj);
+
+// int
+// For long int, returns value truncated to mp_int_t
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in);
+// Will raise exception if value doesn't fit into mp_int_t
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float(mp_obj_t self_in);
+#endif
+
+// exception
+#define mp_obj_is_native_exception_instance(o) (mp_obj_get_type(o)->make_new == mp_obj_exception_make_new)
+bool mp_obj_is_exception_type(mp_obj_t self_in);
+bool mp_obj_is_exception_instance(mp_obj_t self_in);
+bool mp_obj_exception_match(mp_obj_t exc, mp_const_obj_t exc_type);
+void mp_obj_exception_clear_traceback(mp_obj_t self_in);
+void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qstr block);
+void mp_obj_exception_get_traceback(mp_obj_t self_in, size_t *n, size_t **values);
+mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in);
+mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in);
+void mp_init_emergency_exception_buf(void);
+
+// str
+bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2);
+qstr mp_obj_str_get_qstr(mp_obj_t self_in); // use this if you will anyway convert the string to a qstr
+const char *mp_obj_str_get_str(mp_obj_t self_in); // use this only if you need the string to be null terminated
+const char *mp_obj_str_get_data(mp_obj_t self_in, mp_uint_t *len);
+mp_obj_t mp_obj_str_intern(mp_obj_t str);
+void mp_str_print_quoted(const mp_print_t *print, const byte *str_data, mp_uint_t str_len, bool is_bytes);
+
+#if MICROPY_PY_BUILTINS_FLOAT
+// float
+mp_obj_t mp_obj_float_binary_op(mp_uint_t op, mp_float_t lhs_val, mp_obj_t rhs); // can return MP_OBJ_NULL if op not supported
+
+// complex
+void mp_obj_complex_get(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
+mp_obj_t mp_obj_complex_binary_op(mp_uint_t op, mp_float_t lhs_real, mp_float_t lhs_imag, mp_obj_t rhs_in); // can return MP_OBJ_NULL if op not supported
+#else
+#define mp_obj_is_float(o) (false)
+#endif
+
+// tuple
+void mp_obj_tuple_get(mp_obj_t self_in, mp_uint_t *len, mp_obj_t **items);
+void mp_obj_tuple_del(mp_obj_t self_in);
+mp_int_t mp_obj_tuple_hash(mp_obj_t self_in);
+
+// list
+struct _mp_obj_list_t;
+void mp_obj_list_init(struct _mp_obj_list_t *o, mp_uint_t n);
+mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg);
+mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value);
+void mp_obj_list_get(mp_obj_t self_in, mp_uint_t *len, mp_obj_t **items);
+void mp_obj_list_set_len(mp_obj_t self_in, mp_uint_t len);
+void mp_obj_list_store(mp_obj_t self_in, mp_obj_t index, mp_obj_t value);
+mp_obj_t mp_obj_list_sort(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+
+// dict
+typedef struct _mp_obj_dict_t {
+ mp_obj_base_t base;
+ mp_map_t map;
+} mp_obj_dict_t;
+void mp_obj_dict_init(mp_obj_dict_t *dict, mp_uint_t n_args);
+mp_uint_t mp_obj_dict_len(mp_obj_t self_in);
+mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index);
+mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value);
+mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key);
+mp_map_t *mp_obj_dict_get_map(mp_obj_t self_in);
+
+// set
+void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item);
+
+// slice
+void mp_obj_slice_get(mp_obj_t self_in, mp_obj_t *start, mp_obj_t *stop, mp_obj_t *step);
+
+// functions
+#define MP_OBJ_FUN_ARGS_MAX (0xffff) // to set maximum value in n_args_max below
+typedef struct _mp_obj_fun_builtin_t { // use this to make const objects that go in ROM
+ mp_obj_base_t base;
+ bool is_kw : 1;
+ mp_uint_t n_args_min : 15; // inclusive
+ mp_uint_t n_args_max : 16; // inclusive
+ union {
+ mp_fun_0_t _0;
+ mp_fun_1_t _1;
+ mp_fun_2_t _2;
+ mp_fun_3_t _3;
+ mp_fun_var_t var;
+ mp_fun_kw_t kw;
+ } fun;
+} mp_obj_fun_builtin_t;
+
+qstr mp_obj_fun_get_name(mp_const_obj_t fun);
+qstr mp_obj_code_get_name(const byte *code_info);
+
+mp_obj_t mp_identity(mp_obj_t self);
+MP_DECLARE_CONST_FUN_OBJ(mp_identity_obj);
+
+// module
+typedef struct _mp_obj_module_t {
+ mp_obj_base_t base;
+ qstr name;
+ mp_obj_dict_t *globals;
+} mp_obj_module_t;
+mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in);
+// check if given module object is a package
+bool mp_obj_is_package(mp_obj_t module);
+
+// staticmethod and classmethod types; defined here so we can make const versions
+// this structure is used for instances of both staticmethod and classmethod
+typedef struct _mp_obj_static_class_method_t {
+ mp_obj_base_t base;
+ mp_obj_t fun;
+} mp_obj_static_class_method_t;
+typedef struct _mp_rom_obj_static_class_method_t {
+ mp_obj_base_t base;
+ mp_rom_obj_t fun;
+} mp_rom_obj_static_class_method_t;
+
+// property
+const mp_obj_t *mp_obj_property_get(mp_obj_t self_in);
+
+// sequence helpers
+
+// slice indexes resolved to particular sequence
+typedef struct {
+ mp_uint_t start;
+ mp_uint_t stop;
+ mp_int_t step;
+} mp_bound_slice_t;
+
+void mp_seq_multiply(const void *items, mp_uint_t item_sz, mp_uint_t len, mp_uint_t times, void *dest);
+#if MICROPY_PY_BUILTINS_SLICE
+bool mp_seq_get_fast_slice_indexes(mp_uint_t len, mp_obj_t slice, mp_bound_slice_t *indexes);
+#endif
+#define mp_seq_copy(dest, src, len, item_t) memcpy(dest, src, len * sizeof(item_t))
+#define mp_seq_cat(dest, src1, len1, src2, len2, item_t) { memcpy(dest, src1, (len1) * sizeof(item_t)); memcpy(dest + (len1), src2, (len2) * sizeof(item_t)); }
+bool mp_seq_cmp_bytes(mp_uint_t op, const byte *data1, mp_uint_t len1, const byte *data2, mp_uint_t len2);
+bool mp_seq_cmp_objs(mp_uint_t op, const mp_obj_t *items1, mp_uint_t len1, const mp_obj_t *items2, mp_uint_t len2);
+mp_obj_t mp_seq_index_obj(const mp_obj_t *items, mp_uint_t len, mp_uint_t n_args, const mp_obj_t *args);
+mp_obj_t mp_seq_count_obj(const mp_obj_t *items, mp_uint_t len, mp_obj_t value);
+mp_obj_t mp_seq_extract_slice(mp_uint_t len, const mp_obj_t *seq, mp_bound_slice_t *indexes);
+// Helper to clear stale pointers from allocated, but unused memory, to preclude GC problems
+#define mp_seq_clear(start, len, alloc_len, item_sz) memset((byte*)(start) + (len) * (item_sz), 0, ((alloc_len) - (len)) * (item_sz))
+#define mp_seq_replace_slice_no_grow(dest, dest_len, beg, end, slice, slice_len, item_sz) \
+ /*printf("memcpy(%p, %p, %d)\n", dest + beg, slice, slice_len * (item_sz));*/ \
+ memcpy(((char*)dest) + (beg) * (item_sz), slice, slice_len * (item_sz)); \
+ /*printf("memmove(%p, %p, %d)\n", dest + (beg + slice_len), dest + end, (dest_len - end) * (item_sz));*/ \
+ memmove(((char*)dest) + (beg + slice_len) * (item_sz), ((char*)dest) + (end) * (item_sz), (dest_len - end) * (item_sz));
+
+#define mp_seq_replace_slice_grow_inplace(dest, dest_len, beg, end, slice, slice_len, len_adj, item_sz) \
+ /*printf("memmove(%p, %p, %d)\n", dest + beg + len_adj, dest + beg, (dest_len - beg) * (item_sz));*/ \
+ memmove(((char*)dest) + (beg + len_adj) * (item_sz), ((char*)dest) + (beg) * (item_sz), (dest_len - beg) * (item_sz)); \
+ memcpy(((char*)dest) + (beg) * (item_sz), slice, slice_len * (item_sz));
+
+#endif // __MICROPY_INCLUDED_PY_OBJ_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objarray.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,640 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include "py/nlr.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/binary.h"
+#include "py/objstr.h"
+
+#if MICROPY_PY_ARRAY || MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_BUILTINS_MEMORYVIEW
+
+// About memoryview object: We want to reuse as much code as possible from
+// array, and keep the memoryview object 4 words in size so it fits in 1 GC
+// block. Also, memoryview must keep a pointer to the base of the buffer so
+// that the buffer is not GC'd if the original parent object is no longer
+// around (we are assuming that all memoryview'able objects return a pointer
+// which points to the start of a GC chunk). Given the above constraints we
+// do the following:
+// - typecode high bit is set if the buffer is read-write (else read-only)
+// - free is the offset in elements to the first item in the memoryview
+// - len is the length in elements
+// - items points to the start of the original buffer
+// Note that we don't handle the case where the original buffer might change
+// size due to a resize of the original parent object.
+
+// make (& TYPECODE_MASK) a null operation if memorview not enabled
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+#define TYPECODE_MASK (0x7f)
+#else
+#define TYPECODE_MASK (~(mp_uint_t)0)
+#endif
+
+typedef struct _mp_obj_array_t {
+ mp_obj_base_t base;
+ mp_uint_t typecode : 8;
+ // free is number of unused elements after len used elements
+ // alloc size = len + free
+ mp_uint_t free : (8 * sizeof(mp_uint_t) - 8);
+ mp_uint_t len; // in elements
+ void *items;
+} mp_obj_array_t;
+
+STATIC mp_obj_t array_iterator_new(mp_obj_t array_in);
+STATIC mp_obj_t array_append(mp_obj_t self_in, mp_obj_t arg);
+STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in);
+STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+
+/******************************************************************************/
+// array
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC void array_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
+ if (o->typecode == BYTEARRAY_TYPECODE) {
+ mp_print_str(print, "bytearray(b");
+ mp_str_print_quoted(print, o->items, o->len, true);
+ } else {
+ mp_printf(print, "array('%c'", o->typecode);
+ if (o->len > 0) {
+ mp_print_str(print, ", [");
+ for (mp_uint_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, ", ");
+ }
+ mp_obj_print_helper(print, mp_binary_get_val_array(o->typecode, o->items, i), PRINT_REPR);
+ }
+ mp_print_str(print, "]");
+ }
+ }
+ mp_print_str(print, ")");
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC mp_obj_array_t *array_new(char typecode, mp_uint_t n) {
+ int typecode_size = mp_binary_get_size('@', typecode, NULL);
+ if (typecode_size == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "bad typecode"));
+ }
+ mp_obj_array_t *o = m_new_obj(mp_obj_array_t);
+ #if MICROPY_PY_BUILTINS_BYTEARRAY && MICROPY_PY_ARRAY
+ o->base.type = (typecode == BYTEARRAY_TYPECODE) ? &mp_type_bytearray : &mp_type_array;
+ #elif MICROPY_PY_BUILTINS_BYTEARRAY
+ o->base.type = &mp_type_bytearray;
+ #else
+ o->base.type = &mp_type_array;
+ #endif
+ o->typecode = typecode;
+ o->free = 0;
+ o->len = n;
+ o->items = m_new(byte, typecode_size * o->len);
+ return o;
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC mp_obj_t array_construct(char typecode, mp_obj_t initializer) {
+ // bytearrays can be raw-initialised from anything with the buffer protocol
+ // other arrays can only be raw-initialised from bytes and bytearray objects
+ mp_buffer_info_t bufinfo;
+ if (((MICROPY_PY_BUILTINS_BYTEARRAY
+ && typecode == BYTEARRAY_TYPECODE)
+ || (MICROPY_PY_ARRAY
+ && (MP_OBJ_IS_TYPE(initializer, &mp_type_bytes)
+ || (MICROPY_PY_BUILTINS_BYTEARRAY && MP_OBJ_IS_TYPE(initializer, &mp_type_bytearray)))))
+ && mp_get_buffer(initializer, &bufinfo, MP_BUFFER_READ)) {
+ // construct array from raw bytes
+ // we round-down the len to make it a multiple of sz (CPython raises error)
+ size_t sz = mp_binary_get_size('@', typecode, NULL);
+ mp_uint_t len = bufinfo.len / sz;
+ mp_obj_array_t *o = array_new(typecode, len);
+ memcpy(o->items, bufinfo.buf, len * sz);
+ return MP_OBJ_FROM_PTR(o);
+ }
+
+ mp_uint_t len;
+ // Try to create array of exact len if initializer len is known
+ mp_obj_t len_in = mp_obj_len_maybe(initializer);
+ if (len_in == MP_OBJ_NULL) {
+ len = 0;
+ } else {
+ len = MP_OBJ_SMALL_INT_VALUE(len_in);
+ }
+
+ mp_obj_array_t *array = array_new(typecode, len);
+
+ mp_obj_t iterable = mp_getiter(initializer);
+ mp_obj_t item;
+ mp_uint_t i = 0;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (len == 0) {
+ array_append(MP_OBJ_FROM_PTR(array), item);
+ } else {
+ mp_binary_set_val_array(typecode, array->items, i++, item);
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(array);
+}
+#endif
+
+#if MICROPY_PY_ARRAY
+STATIC mp_obj_t array_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 1, 2, false);
+
+ // get typecode
+ mp_uint_t l;
+ const char *typecode = mp_obj_str_get_data(args[0], &l);
+
+ if (n_args == 1) {
+ // 1 arg: make an empty array
+ return MP_OBJ_FROM_PTR(array_new(*typecode, 0));
+ } else {
+ // 2 args: construct the array from the given object
+ return array_construct(*typecode, args[1]);
+ }
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+STATIC mp_obj_t bytearray_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ if (n_args == 0) {
+ // no args: construct an empty bytearray
+ return MP_OBJ_FROM_PTR(array_new(BYTEARRAY_TYPECODE, 0));
+ } else if (MP_OBJ_IS_INT(args[0])) {
+ // 1 arg, an integer: construct a blank bytearray of that length
+ mp_uint_t len = mp_obj_get_int(args[0]);
+ mp_obj_array_t *o = array_new(BYTEARRAY_TYPECODE, len);
+ memset(o->items, 0, len);
+ return MP_OBJ_FROM_PTR(o);
+ } else {
+ // 1 arg: construct the bytearray from that
+ return array_construct(BYTEARRAY_TYPECODE, args[0]);
+ }
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+
+mp_obj_t mp_obj_new_memoryview(byte typecode, mp_uint_t nitems, void *items) {
+ mp_obj_array_t *self = m_new_obj(mp_obj_array_t);
+ self->base.type = &mp_type_memoryview;
+ self->typecode = typecode;
+ self->free = 0;
+ self->len = nitems;
+ self->items = items;
+ return MP_OBJ_FROM_PTR(self);
+}
+
+STATIC mp_obj_t memoryview_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ // TODO possibly allow memoryview constructor to take start/stop so that one
+ // can do memoryview(b, 4, 8) instead of memoryview(b)[4:8] (uses less RAM)
+
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(mp_obj_new_memoryview(bufinfo.typecode,
+ bufinfo.len / mp_binary_get_size('@', bufinfo.typecode, NULL),
+ bufinfo.buf));
+
+ // test if the object can be written to
+ if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_RW)) {
+ self->typecode |= 0x80; // used to indicate writable buffer
+ }
+
+ return MP_OBJ_FROM_PTR(self);
+}
+#endif
+
+STATIC mp_obj_t array_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(o->len != 0);
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(o->len);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t array_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_array_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+ switch (op) {
+ case MP_BINARY_OP_ADD: {
+ // allow to add anything that has the buffer protocol (extension to CPython)
+ mp_buffer_info_t lhs_bufinfo;
+ mp_buffer_info_t rhs_bufinfo;
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ mp_get_buffer_raise(rhs_in, &rhs_bufinfo, MP_BUFFER_READ);
+
+ size_t sz = mp_binary_get_size('@', lhs_bufinfo.typecode, NULL);
+
+ // convert byte count to element count (in case rhs is not multiple of sz)
+ mp_uint_t rhs_len = rhs_bufinfo.len / sz;
+
+ // note: lhs->len is element count of lhs, lhs_bufinfo.len is byte count
+ mp_obj_array_t *res = array_new(lhs_bufinfo.typecode, lhs->len + rhs_len);
+ mp_seq_cat((byte*)res->items, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_len * sz, byte);
+ return MP_OBJ_FROM_PTR(res);
+ }
+
+ case MP_BINARY_OP_INPLACE_ADD: {
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (lhs->base.type == &mp_type_memoryview) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ #endif
+ array_extend(lhs_in, rhs_in);
+ return lhs_in;
+ }
+
+ case MP_BINARY_OP_IN: {
+ /* NOTE `a in b` is `b.__contains__(a)` */
+ mp_buffer_info_t lhs_bufinfo;
+ mp_buffer_info_t rhs_bufinfo;
+
+ // Can search string only in bytearray
+ if (mp_get_buffer(rhs_in, &rhs_bufinfo, MP_BUFFER_READ)) {
+ if (!MP_OBJ_IS_TYPE(lhs_in, &mp_type_bytearray)) {
+ return mp_const_false;
+ }
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ return mp_obj_new_bool(
+ find_subbytes(lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len, 1) != NULL);
+ }
+
+ // Otherwise, can only look for a scalar numeric value in an array
+ if (MP_OBJ_IS_INT(rhs_in) || mp_obj_is_float(rhs_in)) {
+ mp_not_implemented("");
+ }
+
+ return mp_const_false;
+ }
+
+ case MP_BINARY_OP_EQUAL: {
+ mp_buffer_info_t lhs_bufinfo;
+ mp_buffer_info_t rhs_bufinfo;
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ if (!mp_get_buffer(rhs_in, &rhs_bufinfo, MP_BUFFER_READ)) {
+ return mp_const_false;
+ }
+ return mp_obj_new_bool(mp_seq_cmp_bytes(op, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len));
+ }
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC mp_obj_t array_append(mp_obj_t self_in, mp_obj_t arg) {
+ // self is not a memoryview, so we don't need to use (& TYPECODE_MASK)
+ assert((MICROPY_PY_BUILTINS_BYTEARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_bytearray))
+ || (MICROPY_PY_ARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_array)));
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->free == 0) {
+ size_t item_sz = mp_binary_get_size('@', self->typecode, NULL);
+ // TODO: alloc policy
+ self->free = 8;
+ self->items = m_renew(byte, self->items, item_sz * self->len, item_sz * (self->len + self->free));
+ mp_seq_clear(self->items, self->len + 1, self->len + self->free, item_sz);
+ }
+ mp_binary_set_val_array(self->typecode, self->items, self->len, arg);
+ // only update length/free if set succeeded
+ self->len++;
+ self->free--;
+ return mp_const_none; // return None, as per CPython
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(array_append_obj, array_append);
+
+STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in) {
+ // self is not a memoryview, so we don't need to use (& TYPECODE_MASK)
+ assert((MICROPY_PY_BUILTINS_BYTEARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_bytearray))
+ || (MICROPY_PY_ARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_array)));
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // allow to extend by anything that has the buffer protocol (extension to CPython)
+ mp_buffer_info_t arg_bufinfo;
+ mp_get_buffer_raise(arg_in, &arg_bufinfo, MP_BUFFER_READ);
+
+ size_t sz = mp_binary_get_size('@', self->typecode, NULL);
+
+ // convert byte count to element count
+ mp_uint_t len = arg_bufinfo.len / sz;
+
+ // make sure we have enough room to extend
+ // TODO: alloc policy; at the moment we go conservative
+ if (self->free < len) {
+ self->items = m_renew(byte, self->items, (self->len + self->free) * sz, (self->len + len) * sz);
+ self->free = 0;
+ } else {
+ self->free -= len;
+ }
+
+ // extend
+ mp_seq_copy((byte*)self->items + self->len * sz, arg_bufinfo.buf, len * sz, byte);
+ self->len += len;
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(array_extend_obj, array_extend);
+#endif
+
+STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value) {
+ if (value == MP_OBJ_NULL) {
+ // delete item
+ // TODO implement
+ // TODO: confirmed that both bytearray and array.array support
+ // slice deletion
+ return MP_OBJ_NULL; // op not supported
+ } else {
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(self_in);
+ if (0) {
+#if MICROPY_PY_BUILTINS_SLICE
+ } else if (MP_OBJ_IS_TYPE(index_in, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(o->len, index_in, &slice)) {
+ mp_not_implemented("only slices with step=1 (aka None) are supported");
+ }
+ if (value != MP_OBJ_SENTINEL) {
+ #if MICROPY_PY_ARRAY_SLICE_ASSIGN
+ // Assign
+ mp_uint_t src_len;
+ void *src_items;
+ size_t item_sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
+ if (MP_OBJ_IS_OBJ(value) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(value))->type->subscr == array_subscr) {
+ // value is array, bytearray or memoryview
+ mp_obj_array_t *src_slice = MP_OBJ_TO_PTR(value);
+ if (item_sz != mp_binary_get_size('@', src_slice->typecode & TYPECODE_MASK, NULL)) {
+ compat_error:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "lhs and rhs should be compatible"));
+ }
+ src_len = src_slice->len;
+ src_items = src_slice->items;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (MP_OBJ_IS_TYPE(value, &mp_type_memoryview)) {
+ src_items = (uint8_t*)src_items + (src_slice->free * item_sz);
+ }
+ #endif
+ } else if (MP_OBJ_IS_TYPE(value, &mp_type_bytes)) {
+ if (item_sz != 1) {
+ goto compat_error;
+ }
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(value, &bufinfo, MP_BUFFER_READ);
+ src_len = bufinfo.len;
+ src_items = bufinfo.buf;
+ } else {
+ mp_not_implemented("array/bytes required on right side");
+ }
+
+ // TODO: check src/dst compat
+ mp_int_t len_adj = src_len - (slice.stop - slice.start);
+ uint8_t* dest_items = o->items;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ if (len_adj != 0) {
+ goto compat_error;
+ }
+ dest_items += o->free * item_sz;
+ }
+ #endif
+ if (len_adj > 0) {
+ if (len_adj > o->free) {
+ // TODO: alloc policy; at the moment we go conservative
+ o->items = m_renew(byte, o->items, (o->len + o->free) * item_sz, (o->len + len_adj) * item_sz);
+ o->free = 0;
+ dest_items = o->items;
+ }
+ mp_seq_replace_slice_grow_inplace(dest_items, o->len,
+ slice.start, slice.stop, src_items, src_len, len_adj, item_sz);
+ } else {
+ mp_seq_replace_slice_no_grow(dest_items, o->len,
+ slice.start, slice.stop, src_items, src_len, item_sz);
+ // Clear "freed" elements at the end of list
+ // TODO: This is actually only needed for typecode=='O'
+ mp_seq_clear(dest_items, o->len + len_adj, o->len, item_sz);
+ // TODO: alloc policy after shrinking
+ }
+ o->len += len_adj;
+ return mp_const_none;
+ #else
+ return MP_OBJ_NULL; // op not supported
+ #endif
+ }
+
+ mp_obj_array_t *res;
+ size_t sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
+ assert(sz > 0);
+ if (0) {
+ // dummy
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ } else if (o->base.type == &mp_type_memoryview) {
+ res = m_new_obj(mp_obj_array_t);
+ *res = *o;
+ res->free += slice.start;
+ res->len = slice.stop - slice.start;
+ #endif
+ } else {
+ res = array_new(o->typecode, slice.stop - slice.start);
+ memcpy(res->items, (uint8_t*)o->items + slice.start * sz, (slice.stop - slice.start) * sz);
+ }
+ return MP_OBJ_FROM_PTR(res);
+#endif
+ } else {
+ mp_uint_t index = mp_get_index(o->base.type, o->len, index_in, false);
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ index += o->free;
+ if (value != MP_OBJ_SENTINEL && (o->typecode & 0x80) == 0) {
+ // store to read-only memoryview
+ return MP_OBJ_NULL;
+ }
+ }
+ #endif
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ return mp_binary_get_val_array(o->typecode & TYPECODE_MASK, o->items, index);
+ } else {
+ // store
+ mp_binary_set_val_array(o->typecode & TYPECODE_MASK, o->items, index, value);
+ return mp_const_none;
+ }
+ }
+ }
+}
+
+STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
+ size_t sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
+ bufinfo->buf = o->items;
+ bufinfo->len = o->len * sz;
+ bufinfo->typecode = o->typecode & TYPECODE_MASK;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ if ((o->typecode & 0x80) == 0 && (flags & MP_BUFFER_WRITE)) {
+ // read-only memoryview
+ return 1;
+ }
+ bufinfo->buf = (uint8_t*)bufinfo->buf + (mp_uint_t)o->free * sz;
+ }
+ #else
+ (void)flags;
+ #endif
+ return 0;
+}
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC const mp_rom_map_elem_t array_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&array_append_obj) },
+ { MP_ROM_QSTR(MP_QSTR_extend), MP_ROM_PTR(&array_extend_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(array_locals_dict, array_locals_dict_table);
+#endif
+
+#if MICROPY_PY_ARRAY
+const mp_obj_type_t mp_type_array = {
+ { &mp_type_type },
+ .name = MP_QSTR_array,
+ .print = array_print,
+ .make_new = array_make_new,
+ .getiter = array_iterator_new,
+ .unary_op = array_unary_op,
+ .binary_op = array_binary_op,
+ .subscr = array_subscr,
+ .buffer_p = { .get_buffer = array_get_buffer },
+ .locals_dict = (mp_obj_dict_t*)&array_locals_dict,
+};
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+const mp_obj_type_t mp_type_bytearray = {
+ { &mp_type_type },
+ .name = MP_QSTR_bytearray,
+ .print = array_print,
+ .make_new = bytearray_make_new,
+ .getiter = array_iterator_new,
+ .unary_op = array_unary_op,
+ .binary_op = array_binary_op,
+ .subscr = array_subscr,
+ .buffer_p = { .get_buffer = array_get_buffer },
+ .locals_dict = (mp_obj_dict_t*)&array_locals_dict,
+};
+#endif
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+const mp_obj_type_t mp_type_memoryview = {
+ { &mp_type_type },
+ .name = MP_QSTR_memoryview,
+ .make_new = memoryview_make_new,
+ .getiter = array_iterator_new,
+ .unary_op = array_unary_op,
+ .binary_op = array_binary_op,
+ .subscr = array_subscr,
+ .buffer_p = { .get_buffer = array_get_buffer },
+};
+#endif
+
+/* unused
+mp_uint_t mp_obj_array_len(mp_obj_t self_in) {
+ return ((mp_obj_array_t *)self_in)->len;
+}
+*/
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+mp_obj_t mp_obj_new_bytearray(mp_uint_t n, void *items) {
+ mp_obj_array_t *o = array_new(BYTEARRAY_TYPECODE, n);
+ memcpy(o->items, items, n);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Create bytearray which references specified memory area
+mp_obj_t mp_obj_new_bytearray_by_ref(mp_uint_t n, void *items) {
+ mp_obj_array_t *o = m_new_obj(mp_obj_array_t);
+ o->base.type = &mp_type_bytearray;
+ o->typecode = BYTEARRAY_TYPECODE;
+ o->free = 0;
+ o->len = n;
+ o->items = items;
+ return MP_OBJ_FROM_PTR(o);
+}
+#endif
+
+/******************************************************************************/
+// array iterator
+
+typedef struct _mp_obj_array_it_t {
+ mp_obj_base_t base;
+ mp_obj_array_t *array;
+ mp_uint_t offset;
+ mp_uint_t cur;
+} mp_obj_array_it_t;
+
+STATIC mp_obj_t array_it_iternext(mp_obj_t self_in) {
+ mp_obj_array_it_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->cur < self->array->len) {
+ return mp_binary_get_val_array(self->array->typecode & TYPECODE_MASK, self->array->items, self->offset + self->cur++);
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC const mp_obj_type_t array_it_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_iterator,
+ .getiter = mp_identity,
+ .iternext = array_it_iternext,
+};
+
+STATIC mp_obj_t array_iterator_new(mp_obj_t array_in) {
+ mp_obj_array_t *array = MP_OBJ_TO_PTR(array_in);
+ mp_obj_array_it_t *o = m_new0(mp_obj_array_it_t, 1);
+ o->base.type = &array_it_type;
+ o->array = array;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (array->base.type == &mp_type_memoryview) {
+ o->offset = array->free;
+ }
+ #endif
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_PY_ARRAY || MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_BUILTINS_MEMORYVIEW
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objattrtuple.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,95 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/objtuple.h"
+
+#if MICROPY_PY_ATTRTUPLE || MICROPY_PY_COLLECTIONS
+
+// this helper function is used by collections.namedtuple
+#if !MICROPY_PY_COLLECTIONS
+STATIC
+#endif
+void mp_obj_attrtuple_print_helper(const mp_print_t *print, const qstr *fields, mp_obj_tuple_t *o) {
+ mp_print_str(print, "(");
+ for (mp_uint_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, ", ");
+ }
+ mp_printf(print, "%q=", fields[i]);
+ mp_obj_print_helper(print, o->items[i], PRINT_REPR);
+ }
+ mp_print_str(print, ")");
+}
+
+#endif
+
+#if MICROPY_PY_ATTRTUPLE
+
+STATIC void mp_obj_attrtuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_tuple_t *o = MP_OBJ_TO_PTR(o_in);
+ const qstr *fields = (const qstr*)MP_OBJ_TO_PTR(o->items[o->len]);
+ mp_obj_attrtuple_print_helper(print, fields, o);
+}
+
+STATIC void mp_obj_attrtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_uint_t len = self->len;
+ const qstr *fields = (const qstr*)MP_OBJ_TO_PTR(self->items[len]);
+ for (mp_uint_t i = 0; i < len; i++) {
+ if (fields[i] == attr) {
+ dest[0] = self->items[i];
+ return;
+ }
+ }
+ }
+}
+
+mp_obj_t mp_obj_new_attrtuple(const qstr *fields, mp_uint_t n, const mp_obj_t *items) {
+ mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n + 1);
+ o->base.type = &mp_type_attrtuple;
+ o->len = n;
+ for (mp_uint_t i = 0; i < n; i++) {
+ o->items[i] = items[i];
+ }
+ o->items[n] = MP_OBJ_FROM_PTR(fields);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_attrtuple = {
+ { &mp_type_type },
+ .name = MP_QSTR_tuple, // reuse tuple to save on a qstr
+ .print = mp_obj_attrtuple_print,
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .attr = mp_obj_attrtuple_attr,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+};
+
+#endif // MICROPY_PY_ATTRTUPLE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objbool.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,96 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/obj.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_bool_t {
+ mp_obj_base_t base;
+ bool value;
+} mp_obj_bool_t;
+
+STATIC void bool_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ mp_obj_bool_t *self = MP_OBJ_TO_PTR(self_in);
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ if (self->value) {
+ mp_print_str(print, "true");
+ } else {
+ mp_print_str(print, "false");
+ }
+ } else {
+ if (self->value) {
+ mp_print_str(print, "True");
+ } else {
+ mp_print_str(print, "False");
+ }
+ }
+}
+
+STATIC mp_obj_t bool_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ return mp_const_false;
+ case 1:
+ default: // must be 0 or 1
+ if (mp_obj_is_true(args[0])) { return mp_const_true; } else { return mp_const_false; }
+ }
+}
+
+STATIC mp_obj_t bool_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ mp_int_t value = ((mp_obj_bool_t*)MP_OBJ_TO_PTR(o_in))->value;
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return o_in;
+ // needs to hash to the same value as if converting to an integer
+ case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT(value);
+ case MP_UNARY_OP_POSITIVE: return MP_OBJ_NEW_SMALL_INT(value);
+ case MP_UNARY_OP_NEGATIVE: return MP_OBJ_NEW_SMALL_INT(-value);
+ case MP_UNARY_OP_INVERT: return MP_OBJ_NEW_SMALL_INT(~value);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t bool_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_bool_t *self = MP_OBJ_TO_PTR(lhs_in);
+ return mp_binary_op(op, MP_OBJ_NEW_SMALL_INT(self->value), rhs_in);
+}
+
+const mp_obj_type_t mp_type_bool = {
+ { &mp_type_type },
+ .name = MP_QSTR_bool,
+ .print = bool_print,
+ .make_new = bool_make_new,
+ .unary_op = bool_unary_op,
+ .binary_op = bool_binary_op,
+};
+
+const mp_obj_bool_t mp_const_false_obj = {{&mp_type_bool}, false};
+const mp_obj_bool_t mp_const_true_obj = {{&mp_type_bool}, true};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objboundmeth.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,107 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_bound_meth_t {
+ mp_obj_base_t base;
+ mp_obj_t meth;
+ mp_obj_t self;
+} mp_obj_bound_meth_t;
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+STATIC void bound_meth_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_bound_meth_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<bound_method %p ", o);
+ mp_obj_print_helper(print, o->self, PRINT_REPR);
+ mp_print_str(print, ".");
+ mp_obj_print_helper(print, o->meth, PRINT_REPR);
+ mp_print_str(print, ">");
+}
+#endif
+
+STATIC mp_obj_t bound_meth_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_bound_meth_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // need to insert self->self before all other args and then call self->meth
+
+ size_t n_total = n_args + 2 * n_kw;
+ mp_obj_t *args2 = NULL;
+ mp_obj_t *free_args2 = NULL;
+ if (n_total > 4) {
+ // try to use heap to allocate temporary args array
+ args2 = m_new_maybe(mp_obj_t, 1 + n_total);
+ free_args2 = args2;
+ }
+ if (args2 == NULL) {
+ // (fallback to) use stack to allocate temporary args array
+ args2 = alloca(sizeof(mp_obj_t) * (1 + n_total));
+ }
+ args2[0] = self->self;
+ memcpy(args2 + 1, args, n_total * sizeof(mp_obj_t));
+ mp_obj_t res = mp_call_function_n_kw(self->meth, n_args + 1, n_kw, &args2[0]);
+ if (free_args2 != NULL) {
+ m_del(mp_obj_t, free_args2, 1 + n_total);
+ }
+ return res;
+}
+
+#if MICROPY_PY_FUNCTION_ATTRS
+STATIC void bound_meth_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ if (attr == MP_QSTR___name__) {
+ mp_obj_bound_meth_t *o = MP_OBJ_TO_PTR(self_in);
+ dest[0] = MP_OBJ_NEW_QSTR(mp_obj_fun_get_name(o->meth));
+ }
+}
+#endif
+
+STATIC const mp_obj_type_t mp_type_bound_meth = {
+ { &mp_type_type },
+ .name = MP_QSTR_bound_method,
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ .print = bound_meth_print,
+#endif
+ .call = bound_meth_call,
+#if MICROPY_PY_FUNCTION_ATTRS
+ .attr = bound_meth_attr,
+#endif
+};
+
+mp_obj_t mp_obj_new_bound_meth(mp_obj_t meth, mp_obj_t self) {
+ mp_obj_bound_meth_t *o = m_new_obj(mp_obj_bound_meth_t);
+ o->base.type = &mp_type_bound_meth;
+ o->meth = meth;
+ o->self = self;
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objcell.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,71 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_cell_t {
+ mp_obj_base_t base;
+ mp_obj_t obj;
+} mp_obj_cell_t;
+
+mp_obj_t mp_obj_cell_get(mp_obj_t self_in) {
+ mp_obj_cell_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->obj;
+}
+
+void mp_obj_cell_set(mp_obj_t self_in, mp_obj_t obj) {
+ mp_obj_cell_t *self = MP_OBJ_TO_PTR(self_in);
+ self->obj = obj;
+}
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+STATIC void cell_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_cell_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<cell %p ", o->obj);
+ if (o->obj == MP_OBJ_NULL) {
+ mp_print_str(print, "(nil)");
+ } else {
+ mp_obj_print_helper(print, o->obj, PRINT_REPR);
+ }
+ mp_print_str(print, ">");
+}
+#endif
+
+STATIC const mp_obj_type_t mp_type_cell = {
+ { &mp_type_type },
+ .name = MP_QSTR_, // cell representation is just value in < >
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ .print = cell_print,
+#endif
+};
+
+mp_obj_t mp_obj_new_cell(mp_obj_t obj) {
+ mp_obj_cell_t *o = m_new_obj(mp_obj_cell_t);
+ o->base.type = &mp_type_cell;
+ o->obj = obj;
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objclosure.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,97 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_closure_t {
+ mp_obj_base_t base;
+ mp_obj_t fun;
+ mp_uint_t n_closed;
+ mp_obj_t closed[];
+} mp_obj_closure_t;
+
+STATIC mp_obj_t closure_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_closure_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // need to concatenate closed-over-vars and args
+
+ mp_uint_t n_total = self->n_closed + n_args + 2 * n_kw;
+ if (n_total <= 5) {
+ // use stack to allocate temporary args array
+ mp_obj_t args2[5];
+ memcpy(args2, self->closed, self->n_closed * sizeof(mp_obj_t));
+ memcpy(args2 + self->n_closed, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ return mp_call_function_n_kw(self->fun, self->n_closed + n_args, n_kw, args2);
+ } else {
+ // use heap to allocate temporary args array
+ mp_obj_t *args2 = m_new(mp_obj_t, n_total);
+ memcpy(args2, self->closed, self->n_closed * sizeof(mp_obj_t));
+ memcpy(args2 + self->n_closed, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ mp_obj_t res = mp_call_function_n_kw(self->fun, self->n_closed + n_args, n_kw, args2);
+ m_del(mp_obj_t, args2, n_total);
+ return res;
+ }
+}
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+STATIC void closure_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_closure_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_print_str(print, "<closure ");
+ mp_obj_print_helper(print, o->fun, PRINT_REPR);
+ mp_printf(print, " at %p, n_closed=%u ", o, (int)o->n_closed);
+ for (mp_uint_t i = 0; i < o->n_closed; i++) {
+ if (o->closed[i] == MP_OBJ_NULL) {
+ mp_print_str(print, "(nil)");
+ } else {
+ mp_obj_print_helper(print, o->closed[i], PRINT_REPR);
+ }
+ mp_print_str(print, " ");
+ }
+ mp_print_str(print, ">");
+}
+#endif
+
+const mp_obj_type_t closure_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_closure,
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ .print = closure_print,
+#endif
+ .call = closure_call,
+};
+
+mp_obj_t mp_obj_new_closure(mp_obj_t fun, mp_uint_t n_closed_over, const mp_obj_t *closed) {
+ mp_obj_closure_t *o = m_new_obj_var(mp_obj_closure_t, mp_obj_t, n_closed_over);
+ o->base.type = &closure_type;
+ o->fun = fun;
+ o->n_closed = n_closed_over;
+ memcpy(o->closed, closed, n_closed_over * sizeof(mp_obj_t));
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objcomplex.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,251 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/parsenum.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_COMPLEX
+
+#include <math.h>
+#include "py/formatfloat.h"
+
+typedef struct _mp_obj_complex_t {
+ mp_obj_base_t base;
+ mp_float_t real;
+ mp_float_t imag;
+} mp_obj_complex_t;
+
+STATIC void complex_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_complex_t *o = MP_OBJ_TO_PTR(o_in);
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ char buf[16];
+ const int precision = 7;
+#else
+ char buf[32];
+ const int precision = 16;
+#endif
+ if (o->real == 0) {
+ mp_format_float(o->imag, buf, sizeof(buf), 'g', precision, '\0');
+ mp_printf(print, "%sj", buf);
+ } else {
+ mp_format_float(o->real, buf, sizeof(buf), 'g', precision, '\0');
+ mp_printf(print, "(%s", buf);
+ if (o->imag >= 0 || isnan(o->imag)) {
+ mp_print_str(print, "+");
+ }
+ mp_format_float(o->imag, buf, sizeof(buf), 'g', precision, '\0');
+ mp_printf(print, "%sj)", buf);
+ }
+}
+
+STATIC mp_obj_t complex_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 2, false);
+
+ switch (n_args) {
+ case 0:
+ return mp_obj_new_complex(0, 0);
+
+ case 1:
+ if (MP_OBJ_IS_STR(args[0])) {
+ // a string, parse it
+ mp_uint_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_decimal(s, l, true, true, NULL);
+ } else if (MP_OBJ_IS_TYPE(args[0], &mp_type_complex)) {
+ // a complex, just return it
+ return args[0];
+ } else {
+ // something else, try to cast it to a complex
+ return mp_obj_new_complex(mp_obj_get_float(args[0]), 0);
+ }
+
+ case 2:
+ default: {
+ mp_float_t real, imag;
+ if (MP_OBJ_IS_TYPE(args[0], &mp_type_complex)) {
+ mp_obj_complex_get(args[0], &real, &imag);
+ } else {
+ real = mp_obj_get_float(args[0]);
+ imag = 0;
+ }
+ if (MP_OBJ_IS_TYPE(args[1], &mp_type_complex)) {
+ mp_float_t real2, imag2;
+ mp_obj_complex_get(args[1], &real2, &imag2);
+ real -= imag2;
+ imag += real2;
+ } else {
+ imag += mp_obj_get_float(args[1]);
+ }
+ return mp_obj_new_complex(real, imag);
+ }
+ }
+}
+
+STATIC mp_obj_t complex_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ mp_obj_complex_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(o->real != 0 || o->imag != 0);
+ case MP_UNARY_OP_POSITIVE: return o_in;
+ case MP_UNARY_OP_NEGATIVE: return mp_obj_new_complex(-o->real, -o->imag);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t complex_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_complex_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+ return mp_obj_complex_binary_op(op, lhs->real, lhs->imag, rhs_in);
+}
+
+STATIC void complex_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_complex_t *self = MP_OBJ_TO_PTR(self_in);
+ if (attr == MP_QSTR_real) {
+ dest[0] = mp_obj_new_float(self->real);
+ } else if (attr == MP_QSTR_imag) {
+ dest[0] = mp_obj_new_float(self->imag);
+ }
+}
+
+const mp_obj_type_t mp_type_complex = {
+ { &mp_type_type },
+ .name = MP_QSTR_complex,
+ .print = complex_print,
+ .make_new = complex_make_new,
+ .unary_op = complex_unary_op,
+ .binary_op = complex_binary_op,
+ .attr = complex_attr,
+};
+
+mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag) {
+ mp_obj_complex_t *o = m_new_obj(mp_obj_complex_t);
+ o->base.type = &mp_type_complex;
+ o->real = real;
+ o->imag = imag;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_complex_get(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_complex));
+ mp_obj_complex_t *self = MP_OBJ_TO_PTR(self_in);
+ *real = self->real;
+ *imag = self->imag;
+}
+
+mp_obj_t mp_obj_complex_binary_op(mp_uint_t op, mp_float_t lhs_real, mp_float_t lhs_imag, mp_obj_t rhs_in) {
+ mp_float_t rhs_real, rhs_imag;
+ mp_obj_get_complex(rhs_in, &rhs_real, &rhs_imag); // can be any type, this function will convert to float (if possible)
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ lhs_real += rhs_real;
+ lhs_imag += rhs_imag;
+ break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ lhs_real -= rhs_real;
+ lhs_imag -= rhs_imag;
+ break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: {
+ mp_float_t real;
+ multiply:
+ real = lhs_real * rhs_real - lhs_imag * rhs_imag;
+ lhs_imag = lhs_real * rhs_imag + lhs_imag * rhs_real;
+ lhs_real = real;
+ break;
+ }
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "can't do truncated division of a complex number"));
+
+ case MP_BINARY_OP_TRUE_DIVIDE:
+ case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
+ if (rhs_imag == 0) {
+ if (rhs_real == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ZeroDivisionError, "complex division by zero"));
+ }
+ lhs_real /= rhs_real;
+ lhs_imag /= rhs_real;
+ } else if (rhs_real == 0) {
+ mp_float_t real = lhs_imag / rhs_imag;
+ lhs_imag = -lhs_real / rhs_imag;
+ lhs_real = real;
+ } else {
+ mp_float_t rhs_len_sq = rhs_real*rhs_real + rhs_imag*rhs_imag;
+ rhs_real /= rhs_len_sq;
+ rhs_imag /= -rhs_len_sq;
+ goto multiply;
+ }
+ break;
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER: {
+ // z1**z2 = exp(z2*ln(z1))
+ // = exp(z2*(ln(|z1|)+i*arg(z1)))
+ // = exp( (x2*ln1 - y2*arg1) + i*(y2*ln1 + x2*arg1) )
+ // = exp(x3 + i*y3)
+ // = exp(x3)*(cos(y3) + i*sin(y3))
+ mp_float_t abs1 = MICROPY_FLOAT_C_FUN(sqrt)(lhs_real*lhs_real + lhs_imag*lhs_imag);
+ if (abs1 == 0) {
+ if (rhs_imag == 0) {
+ lhs_real = 1;
+ rhs_real = 0;
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ZeroDivisionError, "0.0 to a complex power"));
+ }
+ } else {
+ mp_float_t ln1 = MICROPY_FLOAT_C_FUN(log)(abs1);
+ mp_float_t arg1 = MICROPY_FLOAT_C_FUN(atan2)(lhs_imag, lhs_real);
+ mp_float_t x3 = rhs_real * ln1 - rhs_imag * arg1;
+ mp_float_t y3 = rhs_imag * ln1 + rhs_real * arg1;
+ mp_float_t exp_x3 = MICROPY_FLOAT_C_FUN(exp)(x3);
+ lhs_real = exp_x3 * MICROPY_FLOAT_C_FUN(cos)(y3);
+ lhs_imag = exp_x3 * MICROPY_FLOAT_C_FUN(sin)(y3);
+ }
+ break;
+ }
+
+ case MP_BINARY_OP_EQUAL: return mp_obj_new_bool(lhs_real == rhs_real && lhs_imag == rhs_imag);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+ return mp_obj_new_complex(lhs_real, lhs_imag);
+}
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objdict.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,624 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/objtype.h"
+
+#define MP_OBJ_IS_DICT_TYPE(o) (MP_OBJ_IS_OBJ(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->make_new == dict_make_new)
+
+STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+
+// This is a helper function to iterate through a dictionary. The state of
+// the iteration is held in *cur and should be initialised with zero for the
+// first call. Will return NULL when no more elements are available.
+STATIC mp_map_elem_t *dict_iter_next(mp_obj_dict_t *dict, mp_uint_t *cur) {
+ mp_uint_t max = dict->map.alloc;
+ mp_map_t *map = &dict->map;
+
+ for (mp_uint_t i = *cur; i < max; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(map, i)) {
+ *cur = i + 1;
+ return &(map->table[i]);
+ }
+ }
+
+ return NULL;
+}
+
+STATIC void dict_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ bool first = true;
+ if (!(MICROPY_PY_UJSON && kind == PRINT_JSON)) {
+ kind = PRINT_REPR;
+ }
+ if (MICROPY_PY_COLLECTIONS_ORDEREDDICT && self->base.type != &mp_type_dict) {
+ mp_printf(print, "%q(", self->base.type->name);
+ }
+ mp_print_str(print, "{");
+ mp_uint_t cur = 0;
+ mp_map_elem_t *next = NULL;
+ while ((next = dict_iter_next(self, &cur)) != NULL) {
+ if (!first) {
+ mp_print_str(print, ", ");
+ }
+ first = false;
+ mp_obj_print_helper(print, next->key, kind);
+ mp_print_str(print, ": ");
+ mp_obj_print_helper(print, next->value, kind);
+ }
+ mp_print_str(print, "}");
+ if (MICROPY_PY_COLLECTIONS_ORDEREDDICT && self->base.type != &mp_type_dict) {
+ mp_print_str(print, ")");
+ }
+}
+
+STATIC mp_obj_t dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_t dict_out = mp_obj_new_dict(0);
+ mp_obj_dict_t *dict = MP_OBJ_TO_PTR(dict_out);
+ dict->base.type = type;
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (type == &mp_type_ordereddict) {
+ dict->map.is_ordered = 1;
+ }
+ #endif
+ if (n_args > 0 || n_kw > 0) {
+ mp_obj_t args2[2] = {dict_out, args[0]}; // args[0] is always valid, even if it's not a positional arg
+ mp_map_t kwargs;
+ mp_map_init_fixed_table(&kwargs, n_kw, args + n_args);
+ dict_update(n_args + 1, args2, &kwargs); // dict_update will check that n_args + 1 == 1 or 2
+ }
+ return dict_out;
+}
+
+STATIC mp_obj_t dict_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(self->map.used != 0);
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(self->map.used);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t dict_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_dict_t *o = MP_OBJ_TO_PTR(lhs_in);
+ switch (op) {
+ case MP_BINARY_OP_IN: {
+ mp_map_elem_t *elem = mp_map_lookup(&o->map, rhs_in, MP_MAP_LOOKUP);
+ return mp_obj_new_bool(elem != NULL);
+ }
+ case MP_BINARY_OP_EQUAL: {
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (MP_UNLIKELY(MP_OBJ_IS_TYPE(lhs_in, &mp_type_ordereddict) && MP_OBJ_IS_TYPE(rhs_in, &mp_type_ordereddict))) {
+ //TODO: implement
+ return MP_OBJ_NULL;
+ } else
+ #endif
+ if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_dict)) {
+ mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in);
+ if (o->map.used != rhs->map.used) {
+ return mp_const_false;
+ }
+
+ mp_uint_t cur = 0;
+ mp_map_elem_t *next = NULL;
+ while ((next = dict_iter_next(o, &cur)) != NULL) {
+ mp_map_elem_t *elem = mp_map_lookup(&rhs->map, next->key, MP_MAP_LOOKUP);
+ if (elem == NULL || !mp_obj_equal(next->value, elem->value)) {
+ return mp_const_false;
+ }
+ }
+ return mp_const_true;
+ } else {
+ // dict is not equal to instance of any other type
+ return mp_const_false;
+ }
+ }
+ default:
+ // op not supported
+ return MP_OBJ_NULL;
+ }
+}
+
+// TODO: Make sure this is inlined in dict_subscr() below.
+mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_KeyError, "<value>"));
+ } else {
+ return elem->value;
+ }
+}
+
+STATIC mp_obj_t dict_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_NULL) {
+ // delete
+ mp_obj_dict_delete(self_in, index);
+ return mp_const_none;
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_KeyError, "<value>"));
+ } else {
+ return elem->value;
+ }
+ } else {
+ // store
+ mp_obj_dict_store(self_in, index, value);
+ return mp_const_none;
+ }
+}
+
+/******************************************************************************/
+/* dict iterator */
+
+typedef struct _mp_obj_dict_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t dict;
+ mp_uint_t cur;
+} mp_obj_dict_it_t;
+
+STATIC mp_obj_t dict_it_iternext(mp_obj_t self_in) {
+ mp_obj_dict_it_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *next = dict_iter_next(MP_OBJ_TO_PTR(self->dict), &self->cur);
+
+ if (next == NULL) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ return next->key;
+ }
+}
+
+STATIC mp_obj_t dict_getiter(mp_obj_t self_in) {
+ mp_obj_dict_it_t *o = m_new_obj(mp_obj_dict_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = dict_it_iternext;
+ o->dict = self_in;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* dict methods */
+
+STATIC mp_obj_t dict_clear(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_map_clear(&self->map);
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_clear_obj, dict_clear);
+
+STATIC mp_obj_t dict_copy(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t other_out = mp_obj_new_dict(self->map.alloc);
+ mp_obj_dict_t *other = MP_OBJ_TO_PTR(other_out);
+ other->base.type = self->base.type;
+ other->map.used = self->map.used;
+ other->map.all_keys_are_qstrs = self->map.all_keys_are_qstrs;
+ other->map.is_fixed = 0;
+ other->map.is_ordered = self->map.is_ordered;
+ memcpy(other->map.table, self->map.table, self->map.alloc * sizeof(mp_map_elem_t));
+ return other_out;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_copy_obj, dict_copy);
+
+// this is a classmethod
+STATIC mp_obj_t dict_fromkeys(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args && n_args <= 3);
+ mp_obj_t iter = mp_getiter(args[1]);
+ mp_obj_t len = mp_obj_len_maybe(iter);
+ mp_obj_t value = mp_const_none;
+ mp_obj_t next = MP_OBJ_NULL;
+ mp_obj_t self_out;
+
+ if (n_args > 2) {
+ value = args[2];
+ }
+
+ if (len == MP_OBJ_NULL) {
+ /* object's type doesn't have a __len__ slot */
+ self_out = mp_obj_new_dict(0);
+ } else {
+ self_out = mp_obj_new_dict(MP_OBJ_SMALL_INT_VALUE(len));
+ }
+
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_out);
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_map_lookup(&self->map, next, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ }
+
+ return self_out;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_fromkeys_fun_obj, 2, 3, dict_fromkeys);
+STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(dict_fromkeys_obj, MP_ROM_PTR(&dict_fromkeys_fun_obj));
+
+STATIC mp_obj_t dict_get_helper(mp_map_t *self, mp_obj_t key, mp_obj_t deflt, mp_map_lookup_kind_t lookup_kind) {
+ mp_map_elem_t *elem = mp_map_lookup(self, key, lookup_kind);
+ mp_obj_t value;
+ if (elem == NULL || elem->value == MP_OBJ_NULL) {
+ if (deflt == MP_OBJ_NULL) {
+ if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_KeyError, "<value>"));
+ } else {
+ value = mp_const_none;
+ }
+ } else {
+ value = deflt;
+ }
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ elem->value = value;
+ }
+ } else {
+ value = elem->value;
+ if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ elem->value = MP_OBJ_NULL; // so that GC can collect the deleted value
+ }
+ }
+ return value;
+}
+
+STATIC mp_obj_t dict_get(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args && n_args <= 3);
+ assert(MP_OBJ_IS_DICT_TYPE(args[0]));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
+
+ return dict_get_helper(&self->map,
+ args[1],
+ n_args == 3 ? args[2] : MP_OBJ_NULL,
+ MP_MAP_LOOKUP);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_get_obj, 2, 3, dict_get);
+
+STATIC mp_obj_t dict_pop(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args && n_args <= 3);
+ assert(MP_OBJ_IS_DICT_TYPE(args[0]));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
+
+ return dict_get_helper(&self->map,
+ args[1],
+ n_args == 3 ? args[2] : MP_OBJ_NULL,
+ MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_pop_obj, 2, 3, dict_pop);
+
+
+STATIC mp_obj_t dict_setdefault(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args && n_args <= 3);
+ assert(MP_OBJ_IS_DICT_TYPE(args[0]));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
+
+ return dict_get_helper(&self->map,
+ args[1],
+ n_args == 3 ? args[2] : MP_OBJ_NULL,
+ MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_setdefault_obj, 2, 3, dict_setdefault);
+
+
+STATIC mp_obj_t dict_popitem(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_uint_t cur = 0;
+ mp_map_elem_t *next = dict_iter_next(self, &cur);
+ if (next == NULL) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_KeyError, "popitem(): dictionary is empty"));
+ }
+ self->map.used--;
+ mp_obj_t items[] = {next->key, next->value};
+ next->key = MP_OBJ_SENTINEL; // must mark key as sentinel to indicate that it was deleted
+ next->value = MP_OBJ_NULL;
+ mp_obj_t tuple = mp_obj_new_tuple(2, items);
+
+ return tuple;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_popitem_obj, dict_popitem);
+
+STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ assert(MP_OBJ_IS_DICT_TYPE(args[0]));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
+
+ mp_arg_check_num(n_args, kwargs->used, 1, 2, true);
+
+ if (n_args == 2) {
+ // given a positional argument
+
+ if (MP_OBJ_IS_DICT_TYPE(args[1])) {
+ // update from other dictionary (make sure other is not self)
+ if (args[1] != args[0]) {
+ mp_uint_t cur = 0;
+ mp_map_elem_t *elem = NULL;
+ while ((elem = dict_iter_next((mp_obj_dict_t*)MP_OBJ_TO_PTR(args[1]), &cur)) != NULL) {
+ mp_map_lookup(&self->map, elem->key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = elem->value;
+ }
+ }
+ } else {
+ // update from a generic iterable of pairs
+ mp_obj_t iter = mp_getiter(args[1]);
+ mp_obj_t next = MP_OBJ_NULL;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_t inneriter = mp_getiter(next);
+ mp_obj_t key = mp_iternext(inneriter);
+ mp_obj_t value = mp_iternext(inneriter);
+ mp_obj_t stop = mp_iternext(inneriter);
+ if (key == MP_OBJ_STOP_ITERATION
+ || value == MP_OBJ_STOP_ITERATION
+ || stop != MP_OBJ_STOP_ITERATION) {
+ nlr_raise(mp_obj_new_exception_msg(
+ &mp_type_ValueError,
+ "dictionary update sequence has the wrong length"));
+ } else {
+ mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ }
+ }
+ }
+ }
+
+ // update the dict with any keyword args
+ for (mp_uint_t i = 0; i < kwargs->alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(kwargs, i)) {
+ mp_map_lookup(&self->map, kwargs->table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = kwargs->table[i].value;
+ }
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_KW(dict_update_obj, 1, dict_update);
+
+
+/******************************************************************************/
+/* dict views */
+
+STATIC const mp_obj_type_t dict_view_type;
+STATIC const mp_obj_type_t dict_view_it_type;
+
+typedef enum _mp_dict_view_kind_t {
+ MP_DICT_VIEW_ITEMS,
+ MP_DICT_VIEW_KEYS,
+ MP_DICT_VIEW_VALUES,
+} mp_dict_view_kind_t;
+
+STATIC char *mp_dict_view_names[] = {"dict_items", "dict_keys", "dict_values"};
+
+typedef struct _mp_obj_dict_view_it_t {
+ mp_obj_base_t base;
+ mp_dict_view_kind_t kind;
+ mp_obj_t dict;
+ mp_uint_t cur;
+} mp_obj_dict_view_it_t;
+
+typedef struct _mp_obj_dict_view_t {
+ mp_obj_base_t base;
+ mp_obj_t dict;
+ mp_dict_view_kind_t kind;
+} mp_obj_dict_view_t;
+
+STATIC mp_obj_t dict_view_it_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &dict_view_it_type));
+ mp_obj_dict_view_it_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *next = dict_iter_next(MP_OBJ_TO_PTR(self->dict), &self->cur);
+
+ if (next == NULL) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ switch (self->kind) {
+ case MP_DICT_VIEW_ITEMS:
+ default: {
+ mp_obj_t items[] = {next->key, next->value};
+ return mp_obj_new_tuple(2, items);
+ }
+ case MP_DICT_VIEW_KEYS:
+ return next->key;
+ case MP_DICT_VIEW_VALUES:
+ return next->value;
+ }
+ }
+}
+
+STATIC const mp_obj_type_t dict_view_it_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_iterator,
+ .getiter = mp_identity,
+ .iternext = dict_view_it_iternext,
+};
+
+STATIC mp_obj_t dict_view_getiter(mp_obj_t view_in) {
+ assert(MP_OBJ_IS_TYPE(view_in, &dict_view_type));
+ mp_obj_dict_view_t *view = MP_OBJ_TO_PTR(view_in);
+ mp_obj_dict_view_it_t *o = m_new_obj(mp_obj_dict_view_it_t);
+ o->base.type = &dict_view_it_type;
+ o->kind = view->kind;
+ o->dict = view->dict;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC void dict_view_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ assert(MP_OBJ_IS_TYPE(self_in, &dict_view_type));
+ mp_obj_dict_view_t *self = MP_OBJ_TO_PTR(self_in);
+ bool first = true;
+ mp_print_str(print, mp_dict_view_names[self->kind]);
+ mp_print_str(print, "([");
+ mp_obj_t self_iter = dict_view_getiter(self_in);
+ mp_obj_t next = MP_OBJ_NULL;
+ while ((next = dict_view_it_iternext(self_iter)) != MP_OBJ_STOP_ITERATION) {
+ if (!first) {
+ mp_print_str(print, ", ");
+ }
+ first = false;
+ mp_obj_print_helper(print, next, PRINT_REPR);
+ }
+ mp_print_str(print, "])");
+}
+
+STATIC mp_obj_t dict_view_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ // only supported for the 'keys' kind until sets and dicts are refactored
+ mp_obj_dict_view_t *o = MP_OBJ_TO_PTR(lhs_in);
+ if (o->kind != MP_DICT_VIEW_KEYS) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (op != MP_BINARY_OP_IN) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ return dict_binary_op(op, o->dict, rhs_in);
+}
+
+STATIC const mp_obj_type_t dict_view_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_dict_view,
+ .print = dict_view_print,
+ .binary_op = dict_view_binary_op,
+ .getiter = dict_view_getiter,
+};
+
+STATIC mp_obj_t mp_obj_new_dict_view(mp_obj_t dict, mp_dict_view_kind_t kind) {
+ mp_obj_dict_view_t *o = m_new_obj(mp_obj_dict_view_t);
+ o->base.type = &dict_view_type;
+ o->dict = dict;
+ o->kind = kind;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t dict_view(mp_obj_t self_in, mp_dict_view_kind_t kind) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ return mp_obj_new_dict_view(self_in, kind);
+}
+
+STATIC mp_obj_t dict_items(mp_obj_t self_in) {
+ return dict_view(self_in, MP_DICT_VIEW_ITEMS);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_items_obj, dict_items);
+
+STATIC mp_obj_t dict_keys(mp_obj_t self_in) {
+ return dict_view(self_in, MP_DICT_VIEW_KEYS);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_keys_obj, dict_keys);
+
+STATIC mp_obj_t dict_values(mp_obj_t self_in) {
+ return dict_view(self_in, MP_DICT_VIEW_VALUES);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_values_obj, dict_values);
+
+/******************************************************************************/
+/* dict constructors & public C API */
+
+STATIC const mp_rom_map_elem_t dict_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&dict_clear_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&dict_copy_obj) },
+ { MP_ROM_QSTR(MP_QSTR_fromkeys), MP_ROM_PTR(&dict_fromkeys_obj) },
+ { MP_ROM_QSTR(MP_QSTR_get), MP_ROM_PTR(&dict_get_obj) },
+ { MP_ROM_QSTR(MP_QSTR_items), MP_ROM_PTR(&dict_items_obj) },
+ { MP_ROM_QSTR(MP_QSTR_keys), MP_ROM_PTR(&dict_keys_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pop), MP_ROM_PTR(&dict_pop_obj) },
+ { MP_ROM_QSTR(MP_QSTR_popitem), MP_ROM_PTR(&dict_popitem_obj) },
+ { MP_ROM_QSTR(MP_QSTR_setdefault), MP_ROM_PTR(&dict_setdefault_obj) },
+ { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&dict_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_values), MP_ROM_PTR(&dict_values_obj) },
+ { MP_ROM_QSTR(MP_QSTR___getitem__), MP_ROM_PTR(&mp_op_getitem_obj) },
+ { MP_ROM_QSTR(MP_QSTR___setitem__), MP_ROM_PTR(&mp_op_setitem_obj) },
+ { MP_ROM_QSTR(MP_QSTR___delitem__), MP_ROM_PTR(&mp_op_delitem_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(dict_locals_dict, dict_locals_dict_table);
+
+const mp_obj_type_t mp_type_dict = {
+ { &mp_type_type },
+ .name = MP_QSTR_dict,
+ .print = dict_print,
+ .make_new = dict_make_new,
+ .unary_op = dict_unary_op,
+ .binary_op = dict_binary_op,
+ .subscr = dict_subscr,
+ .getiter = dict_getiter,
+ .locals_dict = (mp_obj_dict_t*)&dict_locals_dict,
+};
+
+#if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+STATIC const mp_rom_obj_tuple_t ordereddict_base_tuple = {{&mp_type_tuple}, 1, {MP_ROM_PTR(&mp_type_dict)}};
+
+const mp_obj_type_t mp_type_ordereddict = {
+ { &mp_type_type },
+ .name = MP_QSTR_OrderedDict,
+ .print = dict_print,
+ .make_new = dict_make_new,
+ .unary_op = dict_unary_op,
+ .binary_op = dict_binary_op,
+ .subscr = dict_subscr,
+ .getiter = dict_getiter,
+ .bases_tuple = (mp_obj_tuple_t*)(mp_rom_obj_tuple_t*)&ordereddict_base_tuple,
+ .locals_dict = (mp_obj_dict_t*)&dict_locals_dict,
+};
+#endif
+
+void mp_obj_dict_init(mp_obj_dict_t *dict, mp_uint_t n_args) {
+ dict->base.type = &mp_type_dict;
+ mp_map_init(&dict->map, n_args);
+}
+
+mp_obj_t mp_obj_new_dict(mp_uint_t n_args) {
+ mp_obj_dict_t *o = m_new_obj(mp_obj_dict_t);
+ mp_obj_dict_init(o, n_args);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_uint_t mp_obj_dict_len(mp_obj_t self_in) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->map.used;
+}
+
+mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ return self_in;
+}
+
+mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ dict_get_helper(&self->map, key, MP_OBJ_NULL, MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ return self_in;
+}
+
+mp_map_t *mp_obj_dict_get_map(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_DICT_TYPE(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ return &self->map;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objenumerate.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,92 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_ENUMERATE
+
+typedef struct _mp_obj_enumerate_t {
+ mp_obj_base_t base;
+ mp_obj_t iter;
+ mp_int_t cur;
+} mp_obj_enumerate_t;
+
+STATIC mp_obj_t enumerate_iternext(mp_obj_t self_in);
+
+STATIC mp_obj_t enumerate_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+#if MICROPY_CPYTHON_COMPAT
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_iterable, MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} },
+ { MP_QSTR_start, MP_ARG_INT, {.u_int = 0} },
+ };
+
+ // parse args
+ struct {
+ mp_arg_val_t iterable, start;
+ } arg_vals;
+ mp_arg_parse_all_kw_array(n_args, n_kw, args,
+ MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t*)&arg_vals);
+
+ // create enumerate object
+ mp_obj_enumerate_t *o = m_new_obj(mp_obj_enumerate_t);
+ o->base.type = type;
+ o->iter = mp_getiter(arg_vals.iterable.u_obj);
+ o->cur = arg_vals.start.u_int;
+#else
+ (void)n_kw;
+ mp_obj_enumerate_t *o = m_new_obj(mp_obj_enumerate_t);
+ o->base.type = type;
+ o->iter = mp_getiter(args[0]);
+ o->cur = n_args > 1 ? mp_obj_get_int(args[1]) : 0;
+#endif
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_enumerate = {
+ { &mp_type_type },
+ .name = MP_QSTR_enumerate,
+ .make_new = enumerate_make_new,
+ .iternext = enumerate_iternext,
+ .getiter = mp_identity,
+};
+
+STATIC mp_obj_t enumerate_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_enumerate));
+ mp_obj_enumerate_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t next = mp_iternext(self->iter);
+ if (next == MP_OBJ_STOP_ITERATION) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ mp_obj_t items[] = {MP_OBJ_NEW_SMALL_INT(self->cur++), next};
+ return mp_obj_new_tuple(2, items);
+ }
+}
+
+#endif // MICROPY_PY_BUILTINS_ENUMERATE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objexcept.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,472 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <stdio.h>
+
+#include "py/mpstate.h"
+#include "py/objlist.h"
+#include "py/objstr.h"
+#include "py/objtuple.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/gc.h"
+
+// Instance of MemoryError exception - needed by mp_malloc_fail
+const mp_obj_exception_t mp_const_MemoryError_obj = {{&mp_type_MemoryError}, 0, 0, NULL, (mp_obj_tuple_t*)&mp_const_empty_tuple_obj};
+
+// Optionally allocated buffer for storing the first argument of an exception
+// allocated when the heap is locked.
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+# if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
+#define mp_emergency_exception_buf_size MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
+
+void mp_init_emergency_exception_buf(void) {
+ // Nothing to do since the buffer was declared statically. We put this
+ // definition here so that the calling code can call this function
+ // regardless of how its configured (makes the calling code a bit cleaner).
+}
+
+#else
+#define mp_emergency_exception_buf_size MP_STATE_VM(mp_emergency_exception_buf_size)
+
+void mp_init_emergency_exception_buf(void) {
+ mp_emergency_exception_buf_size = 0;
+ MP_STATE_VM(mp_emergency_exception_buf) = NULL;
+}
+
+mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) {
+ mp_int_t size = mp_obj_get_int(size_in);
+ void *buf = NULL;
+ if (size > 0) {
+ buf = m_new(byte, size);
+ }
+
+ int old_size = mp_emergency_exception_buf_size;
+ void *old_buf = MP_STATE_VM(mp_emergency_exception_buf);
+
+ // Update the 2 variables atomically so that an interrupt can't occur
+ // between the assignments.
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ mp_emergency_exception_buf_size = size;
+ MP_STATE_VM(mp_emergency_exception_buf) = buf;
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+
+ if (old_buf != NULL) {
+ m_del(byte, old_buf, old_size);
+ }
+ return mp_const_none;
+}
+#endif
+#endif // MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+
+// Instance of GeneratorExit exception - needed by generator.close()
+// This would belong to objgenerator.c, but to keep mp_obj_exception_t
+// definition module-private so far, have it here.
+const mp_obj_exception_t mp_const_GeneratorExit_obj = {{&mp_type_GeneratorExit}, 0, 0, NULL, (mp_obj_tuple_t*)&mp_const_empty_tuple_obj};
+
+STATIC void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_exception_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_print_kind_t k = kind & ~PRINT_EXC_SUBCLASS;
+ bool is_subclass = kind & PRINT_EXC_SUBCLASS;
+ if (!is_subclass && (k == PRINT_REPR || k == PRINT_EXC)) {
+ mp_print_str(print, qstr_str(o->base.type->name));
+ }
+
+ if (k == PRINT_EXC) {
+ mp_print_str(print, ": ");
+ }
+
+ if (k == PRINT_STR || k == PRINT_EXC) {
+ if (o->args == NULL || o->args->len == 0) {
+ mp_print_str(print, "");
+ return;
+ } else if (o->args->len == 1) {
+ mp_obj_print_helper(print, o->args->items[0], PRINT_STR);
+ return;
+ }
+ }
+ mp_obj_tuple_print(print, MP_OBJ_FROM_PTR(o->args), kind);
+}
+
+mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);
+ mp_obj_exception_t *o = m_new_obj_var_maybe(mp_obj_exception_t, mp_obj_t, 0);
+ if (o == NULL) {
+ // Couldn't allocate heap memory; use local data instead.
+ o = &MP_STATE_VM(mp_emergency_exception_obj);
+ // We can't store any args.
+ o->args = (mp_obj_tuple_t*)&mp_const_empty_tuple_obj;
+ } else {
+ o->args = MP_OBJ_TO_PTR(mp_obj_new_tuple(n_args, args));
+ }
+ o->base.type = type;
+ o->traceback_data = NULL;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Get exception "value" - that is, first argument, or None
+mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in) {
+ mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->args->len == 0) {
+ return mp_const_none;
+ } else {
+ return self->args->items[0];
+ }
+}
+
+STATIC void exception_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in);
+ if (attr == MP_QSTR_args) {
+ dest[0] = MP_OBJ_FROM_PTR(self->args);
+ } else if (self->base.type == &mp_type_StopIteration && attr == MP_QSTR_value) {
+ dest[0] = mp_obj_exception_get_value(self_in);
+ }
+}
+
+STATIC mp_obj_t exc___init__(size_t n_args, const mp_obj_t *args) {
+ mp_obj_exception_t *self = MP_OBJ_TO_PTR(args[0]);
+ mp_obj_t argst = mp_obj_new_tuple(n_args - 1, args + 1);
+ self->args = MP_OBJ_TO_PTR(argst);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(exc___init___obj, 1, MP_OBJ_FUN_ARGS_MAX, exc___init__);
+
+STATIC const mp_rom_map_elem_t exc_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___init__), MP_ROM_PTR(&exc___init___obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(exc_locals_dict, exc_locals_dict_table);
+
+const mp_obj_type_t mp_type_BaseException = {
+ { &mp_type_type },
+ .name = MP_QSTR_BaseException,
+ .print = mp_obj_exception_print,
+ .make_new = mp_obj_exception_make_new,
+ .attr = exception_attr,
+ .locals_dict = (mp_obj_dict_t*)&exc_locals_dict,
+};
+
+#define MP_DEFINE_EXCEPTION_BASE(base_name) \
+STATIC const mp_rom_obj_tuple_t mp_type_ ## base_name ## _base_tuple = {{&mp_type_tuple}, 1, {MP_ROM_PTR(&mp_type_ ## base_name)}};\
+
+#define MP_DEFINE_EXCEPTION(exc_name, base_name) \
+const mp_obj_type_t mp_type_ ## exc_name = { \
+ { &mp_type_type }, \
+ .name = MP_QSTR_ ## exc_name, \
+ .print = mp_obj_exception_print, \
+ .make_new = mp_obj_exception_make_new, \
+ .attr = exception_attr, \
+ .bases_tuple = (mp_obj_tuple_t*)(mp_rom_obj_tuple_t*)&mp_type_ ## base_name ## _base_tuple, \
+};
+
+// List of all exceptions, arranged as in the table at:
+// http://docs.python.org/3/library/exceptions.html
+MP_DEFINE_EXCEPTION_BASE(BaseException)
+MP_DEFINE_EXCEPTION(SystemExit, BaseException)
+MP_DEFINE_EXCEPTION(KeyboardInterrupt, BaseException)
+MP_DEFINE_EXCEPTION(GeneratorExit, BaseException)
+MP_DEFINE_EXCEPTION(Exception, BaseException)
+ MP_DEFINE_EXCEPTION_BASE(Exception)
+ MP_DEFINE_EXCEPTION(StopIteration, Exception)
+ MP_DEFINE_EXCEPTION(ArithmeticError, Exception)
+ MP_DEFINE_EXCEPTION_BASE(ArithmeticError)
+ //MP_DEFINE_EXCEPTION(FloatingPointError, ArithmeticError)
+ MP_DEFINE_EXCEPTION(OverflowError, ArithmeticError)
+ MP_DEFINE_EXCEPTION(ZeroDivisionError, ArithmeticError)
+ MP_DEFINE_EXCEPTION(AssertionError, Exception)
+ MP_DEFINE_EXCEPTION(AttributeError, Exception)
+ //MP_DEFINE_EXCEPTION(BufferError, Exception)
+ //MP_DEFINE_EXCEPTION(EnvironmentError, Exception) use OSError instead
+ MP_DEFINE_EXCEPTION(EOFError, Exception)
+ MP_DEFINE_EXCEPTION(ImportError, Exception)
+ //MP_DEFINE_EXCEPTION(IOError, Exception) use OSError instead
+ MP_DEFINE_EXCEPTION(LookupError, Exception)
+ MP_DEFINE_EXCEPTION_BASE(LookupError)
+ MP_DEFINE_EXCEPTION(IndexError, LookupError)
+ MP_DEFINE_EXCEPTION(KeyError, LookupError)
+ MP_DEFINE_EXCEPTION(MemoryError, Exception)
+ MP_DEFINE_EXCEPTION(NameError, Exception)
+ /*
+ MP_DEFINE_EXCEPTION_BASE(NameError)
+ MP_DEFINE_EXCEPTION(UnboundLocalError, NameError)
+ */
+ MP_DEFINE_EXCEPTION(OSError, Exception)
+#if MICROPY_PY_BUILTINS_TIMEOUTERROR
+ MP_DEFINE_EXCEPTION_BASE(OSError)
+ MP_DEFINE_EXCEPTION(TimeoutError, OSError)
+#endif
+ /*
+ MP_DEFINE_EXCEPTION(BlockingIOError, OSError)
+ MP_DEFINE_EXCEPTION(ChildProcessError, OSError)
+ MP_DEFINE_EXCEPTION(ConnectionError, OSError)
+ MP_DEFINE_EXCEPTION(BrokenPipeError, ConnectionError)
+ MP_DEFINE_EXCEPTION(ConnectionAbortedError, ConnectionError)
+ MP_DEFINE_EXCEPTION(ConnectionRefusedError, ConnectionError)
+ MP_DEFINE_EXCEPTION(ConnectionResetError, ConnectionError)
+ MP_DEFINE_EXCEPTION(InterruptedError, OSError)
+ MP_DEFINE_EXCEPTION(IsADirectoryError, OSError)
+ MP_DEFINE_EXCEPTION(NotADirectoryError, OSError)
+ MP_DEFINE_EXCEPTION(PermissionError, OSError)
+ MP_DEFINE_EXCEPTION(ProcessLookupError, OSError)
+ MP_DEFINE_EXCEPTION(FileExistsError, OSError)
+ MP_DEFINE_EXCEPTION(FileNotFoundError, OSError)
+ MP_DEFINE_EXCEPTION(ReferenceError, Exception)
+ */
+ MP_DEFINE_EXCEPTION(RuntimeError, Exception)
+ MP_DEFINE_EXCEPTION_BASE(RuntimeError)
+ MP_DEFINE_EXCEPTION(NotImplementedError, RuntimeError)
+ MP_DEFINE_EXCEPTION(SyntaxError, Exception)
+ MP_DEFINE_EXCEPTION_BASE(SyntaxError)
+ MP_DEFINE_EXCEPTION(IndentationError, SyntaxError)
+ /*
+ MP_DEFINE_EXCEPTION_BASE(IndentationError)
+ MP_DEFINE_EXCEPTION(TabError, IndentationError)
+ */
+ //MP_DEFINE_EXCEPTION(SystemError, Exception)
+ MP_DEFINE_EXCEPTION(TypeError, Exception)
+#if MICROPY_EMIT_NATIVE
+ MP_DEFINE_EXCEPTION_BASE(TypeError)
+ MP_DEFINE_EXCEPTION(ViperTypeError, TypeError)
+#endif
+ MP_DEFINE_EXCEPTION(ValueError, Exception)
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+ MP_DEFINE_EXCEPTION_BASE(ValueError)
+ MP_DEFINE_EXCEPTION(UnicodeError, ValueError)
+ //TODO: Implement more UnicodeError subclasses which take arguments
+#endif
+ /*
+ MP_DEFINE_EXCEPTION(Warning, Exception)
+ MP_DEFINE_EXCEPTION_BASE(Warning)
+ MP_DEFINE_EXCEPTION(DeprecationWarning, Warning)
+ MP_DEFINE_EXCEPTION(PendingDeprecationWarning, Warning)
+ MP_DEFINE_EXCEPTION(RuntimeWarning, Warning)
+ MP_DEFINE_EXCEPTION(SyntaxWarning, Warning)
+ MP_DEFINE_EXCEPTION(UserWarning, Warning)
+ MP_DEFINE_EXCEPTION(FutureWarning, Warning)
+ MP_DEFINE_EXCEPTION(ImportWarning, Warning)
+ MP_DEFINE_EXCEPTION(UnicodeWarning, Warning)
+ MP_DEFINE_EXCEPTION(BytesWarning, Warning)
+ MP_DEFINE_EXCEPTION(ResourceWarning, Warning)
+ */
+
+mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type) {
+ return mp_obj_new_exception_args(exc_type, 0, NULL);
+}
+
+// "Optimized" version for common(?) case of having 1 exception arg
+mp_obj_t mp_obj_new_exception_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+ return mp_obj_new_exception_args(exc_type, 1, &arg);
+}
+
+mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, mp_uint_t n_args, const mp_obj_t *args) {
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+ return exc_type->make_new(exc_type, n_args, 0, args);
+}
+
+mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg) {
+ return mp_obj_new_exception_msg_varg(exc_type, msg);
+}
+
+mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...) {
+ // check that the given type is an exception type
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+
+ // make exception object
+ mp_obj_exception_t *o = m_new_obj_var_maybe(mp_obj_exception_t, mp_obj_t, 0);
+ if (o == NULL) {
+ // Couldn't allocate heap memory; use local data instead.
+ // Unfortunately, we won't be able to format the string...
+ o = &MP_STATE_VM(mp_emergency_exception_obj);
+ o->base.type = exc_type;
+ o->traceback_data = NULL;
+ o->args = (mp_obj_tuple_t*)&mp_const_empty_tuple_obj;
+
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ // If the user has provided a buffer, then we try to create a tuple
+ // of length 1, which has a string object and the string data.
+
+ if (mp_emergency_exception_buf_size > (sizeof(mp_obj_tuple_t) + sizeof(mp_obj_str_t) + sizeof(mp_obj_t))) {
+ mp_obj_tuple_t *tuple = (mp_obj_tuple_t *)MP_STATE_VM(mp_emergency_exception_buf);
+ mp_obj_str_t *str = (mp_obj_str_t *)&tuple->items[1];
+
+ tuple->base.type = &mp_type_tuple;
+ tuple->len = 1;
+ tuple->items[0] = MP_OBJ_FROM_PTR(str);
+
+ byte *str_data = (byte *)&str[1];
+ uint max_len = MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size
+ - str_data;
+
+ vstr_t vstr;
+ vstr_init_fixed_buf(&vstr, max_len, (char *)str_data);
+
+ va_list ap;
+ va_start(ap, fmt);
+ vstr_vprintf(&vstr, fmt, ap);
+ va_end(ap);
+
+ str->base.type = &mp_type_str;
+ str->hash = qstr_compute_hash(str_data, str->len);
+ str->len = vstr.len;
+ str->data = str_data;
+
+ o->args = tuple;
+
+ uint offset = &str_data[str->len] - MP_STATE_VM(mp_emergency_exception_buf);
+ offset += sizeof(void *) - 1;
+ offset &= ~(sizeof(void *) - 1);
+
+ if ((mp_emergency_exception_buf_size - offset) > (sizeof(o->traceback_data[0]) * 3)) {
+ // We have room to store some traceback.
+ o->traceback_data = (size_t*)((byte *)MP_STATE_VM(mp_emergency_exception_buf) + offset);
+ o->traceback_alloc = (MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size - (byte *)o->traceback_data) / sizeof(o->traceback_data[0]);
+ o->traceback_len = 0;
+ }
+ }
+#endif // MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ } else {
+ o->base.type = exc_type;
+ o->traceback_data = NULL;
+ o->args = MP_OBJ_TO_PTR(mp_obj_new_tuple(1, NULL));
+
+ if (fmt == NULL) {
+ // no message
+ assert(0);
+ } else {
+ if (strchr(fmt, '%') == NULL) {
+ // no formatting substitutions, avoid allocating vstr.
+ o->args->items[0] = mp_obj_new_str(fmt, strlen(fmt), false);
+ } else {
+ // render exception message and store as .args[0]
+ va_list ap;
+ vstr_t vstr;
+ vstr_init(&vstr, 16);
+ va_start(ap, fmt);
+ vstr_vprintf(&vstr, fmt, ap);
+ va_end(ap);
+ o->args->items[0] = mp_obj_new_str_from_vstr(&mp_type_str, &vstr);
+ }
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// return true if the given object is an exception type
+bool mp_obj_is_exception_type(mp_obj_t self_in) {
+ if (MP_OBJ_IS_TYPE(self_in, &mp_type_type)) {
+ // optimisation when self_in is a builtin exception
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->make_new == mp_obj_exception_make_new) {
+ return true;
+ }
+ }
+ return mp_obj_is_subclass_fast(self_in, MP_OBJ_FROM_PTR(&mp_type_BaseException));
+}
+
+// return true if the given object is an instance of an exception type
+bool mp_obj_is_exception_instance(mp_obj_t self_in) {
+ return mp_obj_is_exception_type(MP_OBJ_FROM_PTR(mp_obj_get_type(self_in)));
+}
+
+// Return true if exception (type or instance) is a subclass of given
+// exception type. Assumes exc_type is a subclass of BaseException, as
+// defined by mp_obj_is_exception_type(exc_type).
+bool mp_obj_exception_match(mp_obj_t exc, mp_const_obj_t exc_type) {
+ // if exc is an instance of an exception, then extract and use its type
+ if (mp_obj_is_exception_instance(exc)) {
+ exc = MP_OBJ_FROM_PTR(mp_obj_get_type(exc));
+ }
+ return mp_obj_is_subclass_fast(exc, exc_type);
+}
+
+// traceback handling functions
+
+#define GET_NATIVE_EXCEPTION(self, self_in) \
+ /* make sure self_in is an exception instance */ \
+ assert(mp_obj_is_exception_instance(self_in)); \
+ mp_obj_exception_t *self; \
+ if (mp_obj_is_native_exception_instance(self_in)) { \
+ self = MP_OBJ_TO_PTR(self_in); \
+ } else { \
+ self = MP_OBJ_TO_PTR(((mp_obj_instance_t*)MP_OBJ_TO_PTR(self_in))->subobj[0]); \
+ }
+
+void mp_obj_exception_clear_traceback(mp_obj_t self_in) {
+ GET_NATIVE_EXCEPTION(self, self_in);
+ // just set the traceback to the null object
+ // we don't want to call any memory management functions here
+ self->traceback_data = NULL;
+}
+
+void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qstr block) {
+ GET_NATIVE_EXCEPTION(self, self_in);
+
+ // append this traceback info to traceback data
+ // if memory allocation fails (eg because gc is locked), just return
+
+ if (self->traceback_data == NULL) {
+ self->traceback_data = m_new_maybe(size_t, 3);
+ if (self->traceback_data == NULL) {
+ return;
+ }
+ self->traceback_alloc = 3;
+ self->traceback_len = 0;
+ } else if (self->traceback_len + 3 > self->traceback_alloc) {
+ // be conservative with growing traceback data
+ size_t *tb_data = m_renew_maybe(size_t, self->traceback_data, self->traceback_alloc, self->traceback_alloc + 3, true);
+ if (tb_data == NULL) {
+ return;
+ }
+ self->traceback_data = tb_data;
+ self->traceback_alloc += 3;
+ }
+
+ size_t *tb_data = &self->traceback_data[self->traceback_len];
+ self->traceback_len += 3;
+ tb_data[0] = file;
+ tb_data[1] = line;
+ tb_data[2] = block;
+}
+
+void mp_obj_exception_get_traceback(mp_obj_t self_in, size_t *n, size_t **values) {
+ GET_NATIVE_EXCEPTION(self, self_in);
+
+ if (self->traceback_data == NULL) {
+ *n = 0;
+ *values = NULL;
+ } else {
+ *n = self->traceback_len;
+ *values = self->traceback_data;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objexcept.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,40 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
+#define __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
+
+#include "py/obj.h"
+#include "py/objtuple.h"
+
+typedef struct _mp_obj_exception_t {
+ mp_obj_base_t base;
+ mp_uint_t traceback_alloc : (BITS_PER_WORD / 2);
+ mp_uint_t traceback_len : (BITS_PER_WORD / 2);
+ size_t *traceback_data;
+ mp_obj_tuple_t *args;
+} mp_obj_exception_t;
+
+#endif // __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objfilter.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,72 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FILTER
+
+typedef struct _mp_obj_filter_t {
+ mp_obj_base_t base;
+ mp_obj_t fun;
+ mp_obj_t iter;
+} mp_obj_filter_t;
+
+STATIC mp_obj_t filter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ mp_obj_filter_t *o = m_new_obj(mp_obj_filter_t);
+ o->base.type = type;
+ o->fun = args[0];
+ o->iter = mp_getiter(args[1]);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t filter_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_filter));
+ mp_obj_filter_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t next;
+ while ((next = mp_iternext(self->iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_t val;
+ if (self->fun != mp_const_none) {
+ val = mp_call_function_n_kw(self->fun, 1, 0, &next);
+ } else {
+ val = next;
+ }
+ if (mp_obj_is_true(val)) {
+ return next;
+ }
+ }
+ return MP_OBJ_STOP_ITERATION;
+}
+
+const mp_obj_type_t mp_type_filter = {
+ { &mp_type_type },
+ .name = MP_QSTR_filter,
+ .make_new = filter_make_new,
+ .getiter = mp_identity,
+ .iternext = filter_iternext,
+};
+
+#endif // MICROPY_PY_BUILTINS_FILTER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objfloat.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,247 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/parsenum.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+#include <math.h>
+#include "py/formatfloat.h"
+
+#if MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_C && MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_D
+
+typedef struct _mp_obj_float_t {
+ mp_obj_base_t base;
+ mp_float_t value;
+} mp_obj_float_t;
+
+const mp_obj_float_t mp_const_float_e_obj = {{&mp_type_float}, M_E};
+const mp_obj_float_t mp_const_float_pi_obj = {{&mp_type_float}, M_PI};
+
+#endif
+
+STATIC void float_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_float_t o_val = mp_obj_float_get(o_in);
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ char buf[16];
+ const int precision = 7;
+#else
+ char buf[32];
+ const int precision = 16;
+#endif
+ mp_format_float(o_val, buf, sizeof(buf), 'g', precision, '\0');
+ mp_print_str(print, buf);
+ if (strchr(buf, '.') == NULL && strchr(buf, 'e') == NULL && strchr(buf, 'n') == NULL) {
+ // Python floats always have decimal point (unless inf or nan)
+ mp_print_str(print, ".0");
+ }
+}
+
+STATIC mp_obj_t float_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ return mp_obj_new_float(0);
+
+ case 1:
+ default:
+ if (MP_OBJ_IS_STR(args[0])) {
+ // a string, parse it
+ mp_uint_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_decimal(s, l, false, false, NULL);
+ } else if (mp_obj_is_float(args[0])) {
+ // a float, just return it
+ return args[0];
+ } else {
+ // something else, try to cast it to a float
+ return mp_obj_new_float(mp_obj_get_float(args[0]));
+ }
+ }
+}
+
+STATIC mp_obj_t float_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ mp_float_t val = mp_obj_float_get(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(val != 0);
+ case MP_UNARY_OP_POSITIVE: return o_in;
+ case MP_UNARY_OP_NEGATIVE: return mp_obj_new_float(-val);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t float_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_float_t lhs_val = mp_obj_float_get(lhs_in);
+#if MICROPY_PY_BUILTINS_COMPLEX
+ if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_complex)) {
+ return mp_obj_complex_binary_op(op, lhs_val, 0, rhs_in);
+ } else
+#endif
+ {
+ return mp_obj_float_binary_op(op, lhs_val, rhs_in);
+ }
+}
+
+const mp_obj_type_t mp_type_float = {
+ { &mp_type_type },
+ .name = MP_QSTR_float,
+ .print = float_print,
+ .make_new = float_make_new,
+ .unary_op = float_unary_op,
+ .binary_op = float_binary_op,
+};
+
+#if MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_C && MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_D
+
+mp_obj_t mp_obj_new_float(mp_float_t value) {
+ mp_obj_float_t *o = m_new(mp_obj_float_t, 1);
+ o->base.type = &mp_type_float;
+ o->value = value;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_float_t mp_obj_float_get(mp_obj_t self_in) {
+ assert(mp_obj_is_float(self_in));
+ mp_obj_float_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->value;
+}
+
+#endif
+
+STATIC void mp_obj_float_divmod(mp_float_t *x, mp_float_t *y) {
+ // logic here follows that of CPython
+ // https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations
+ // x == (x//y)*y + (x%y)
+ // divmod(x, y) == (x//y, x%y)
+ mp_float_t mod = MICROPY_FLOAT_C_FUN(fmod)(*x, *y);
+ mp_float_t div = (*x - mod) / *y;
+
+ // Python specs require that mod has same sign as second operand
+ if (mod == 0.0) {
+ mod = MICROPY_FLOAT_C_FUN(copysign)(0.0, *y);
+ } else {
+ if ((mod < 0.0) != (*y < 0.0)) {
+ mod += *y;
+ div -= 1.0;
+ }
+ }
+
+ mp_float_t floordiv;
+ if (div == 0.0) {
+ // if division is zero, take the correct sign of zero
+ floordiv = MICROPY_FLOAT_C_FUN(copysign)(0.0, *x / *y);
+ } else {
+ // Python specs require that x == (x//y)*y + (x%y)
+ floordiv = MICROPY_FLOAT_C_FUN(floor)(div);
+ if (div - floordiv > 0.5) {
+ floordiv += 1.0;
+ }
+ }
+
+ // return results
+ *x = floordiv;
+ *y = mod;
+}
+
+mp_obj_t mp_obj_float_binary_op(mp_uint_t op, mp_float_t lhs_val, mp_obj_t rhs_in) {
+ mp_float_t rhs_val = mp_obj_get_float(rhs_in); // can be any type, this function will convert to float (if possible)
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD: lhs_val += rhs_val; break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT: lhs_val -= rhs_val; break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: lhs_val *= rhs_val; break;
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ if (rhs_val == 0) {
+ zero_division_error:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ZeroDivisionError, "division by zero"));
+ }
+ // Python specs require that x == (x//y)*y + (x%y) so we must
+ // call divmod to compute the correct floor division, which
+ // returns the floor divide in lhs_val.
+ mp_obj_float_divmod(&lhs_val, &rhs_val);
+ break;
+ case MP_BINARY_OP_TRUE_DIVIDE:
+ case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division_error;
+ }
+ lhs_val /= rhs_val;
+ break;
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO:
+ if (rhs_val == 0) {
+ goto zero_division_error;
+ }
+ lhs_val = MICROPY_FLOAT_C_FUN(fmod)(lhs_val, rhs_val);
+ // Python specs require that mod has same sign as second operand
+ if (lhs_val == 0.0) {
+ lhs_val = MICROPY_FLOAT_C_FUN(copysign)(0.0, rhs_val);
+ } else {
+ if ((lhs_val < 0.0) != (rhs_val < 0.0)) {
+ lhs_val += rhs_val;
+ }
+ }
+ break;
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER: lhs_val = MICROPY_FLOAT_C_FUN(pow)(lhs_val, rhs_val); break;
+ case MP_BINARY_OP_DIVMOD: {
+ if (rhs_val == 0) {
+ goto zero_division_error;
+ }
+ mp_obj_float_divmod(&lhs_val, &rhs_val);
+ mp_obj_t tuple[2] = {
+ mp_obj_new_float(lhs_val),
+ mp_obj_new_float(rhs_val),
+ };
+ return mp_obj_new_tuple(2, tuple);
+ }
+ case MP_BINARY_OP_LESS: return mp_obj_new_bool(lhs_val < rhs_val);
+ case MP_BINARY_OP_MORE: return mp_obj_new_bool(lhs_val > rhs_val);
+ case MP_BINARY_OP_EQUAL: return mp_obj_new_bool(lhs_val == rhs_val);
+ case MP_BINARY_OP_LESS_EQUAL: return mp_obj_new_bool(lhs_val <= rhs_val);
+ case MP_BINARY_OP_MORE_EQUAL: return mp_obj_new_bool(lhs_val >= rhs_val);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+ return mp_obj_new_float(lhs_val);
+}
+
+#endif // MICROPY_PY_BUILTINS_FLOAT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objfun.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,560 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objtuple.h"
+#include "py/objfun.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/bc.h"
+#include "py/stackctrl.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+// Note: the "name" entry in mp_obj_type_t for a function type must be
+// MP_QSTR_function because it is used to determine if an object is of generic
+// function type.
+
+/******************************************************************************/
+/* builtin functions */
+
+// mp_obj_fun_builtin_t defined in obj.h
+
+STATIC mp_obj_t fun_builtin_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_fun_builtin));
+ mp_obj_fun_builtin_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // check number of arguments
+ mp_arg_check_num(n_args, n_kw, self->n_args_min, self->n_args_max, self->is_kw);
+
+ if (self->is_kw) {
+ // function allows keywords
+
+ // we create a map directly from the given args array
+ mp_map_t kw_args;
+ mp_map_init_fixed_table(&kw_args, n_kw, args + n_args);
+
+ return self->fun.kw(n_args, args, &kw_args);
+
+ } else if (self->n_args_min <= 3 && self->n_args_min == self->n_args_max) {
+ // function requires a fixed number of arguments
+
+ // dispatch function call
+ switch (self->n_args_min) {
+ case 0:
+ return self->fun._0();
+
+ case 1:
+ return self->fun._1(args[0]);
+
+ case 2:
+ return self->fun._2(args[0], args[1]);
+
+ case 3:
+ default:
+ return self->fun._3(args[0], args[1], args[2]);
+ }
+
+ } else {
+ // function takes a variable number of arguments, but no keywords
+
+ return self->fun.var(n_args, args);
+ }
+}
+
+const mp_obj_type_t mp_type_fun_builtin = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+ .call = fun_builtin_call,
+ .unary_op = mp_generic_unary_op,
+};
+
+/******************************************************************************/
+/* byte code functions */
+
+qstr mp_obj_code_get_name(const byte *code_info) {
+ mp_decode_uint(&code_info); // skip code_info_size entry
+ #if MICROPY_PERSISTENT_CODE
+ return code_info[0] | (code_info[1] << 8);
+ #else
+ return mp_decode_uint(&code_info);
+ #endif
+}
+
+#if MICROPY_EMIT_NATIVE
+STATIC const mp_obj_type_t mp_type_fun_native;
+#endif
+
+qstr mp_obj_fun_get_name(mp_const_obj_t fun_in) {
+ const mp_obj_fun_bc_t *fun = MP_OBJ_TO_PTR(fun_in);
+ #if MICROPY_EMIT_NATIVE
+ if (fun->base.type == &mp_type_fun_native) {
+ // TODO native functions don't have name stored
+ return MP_QSTR_;
+ }
+ #endif
+
+ const byte *bc = fun->bytecode;
+ mp_decode_uint(&bc); // skip n_state
+ mp_decode_uint(&bc); // skip n_exc_stack
+ bc++; // skip scope_params
+ bc++; // skip n_pos_args
+ bc++; // skip n_kwonly_args
+ bc++; // skip n_def_pos_args
+ return mp_obj_code_get_name(bc);
+}
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC void fun_bc_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_fun_bc_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<function %q at 0x%p>", mp_obj_fun_get_name(o_in), o);
+}
+#endif
+
+#if DEBUG_PRINT
+STATIC void dump_args(const mp_obj_t *a, mp_uint_t sz) {
+ DEBUG_printf("%p: ", a);
+ for (mp_uint_t i = 0; i < sz; i++) {
+ DEBUG_printf("%p ", a[i]);
+ }
+ DEBUG_printf("\n");
+}
+#else
+#define dump_args(...) (void)0
+#endif
+
+// With this macro you can tune the maximum number of function state bytes
+// that will be allocated on the stack. Any function that needs more
+// than this will try to use the heap, with fallback to stack allocation.
+#define VM_MAX_STATE_ON_STACK (11 * sizeof(mp_uint_t))
+
+// Set this to enable a simple stack overflow check.
+#define VM_DETECT_STACK_OVERFLOW (0)
+
+#if MICROPY_STACKLESS
+mp_code_state *mp_obj_fun_bc_prepare_codestate(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ MP_STACK_CHECK();
+ mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // get start of bytecode
+ const byte *ip = self->bytecode;
+
+ // bytecode prelude: state size and exception stack size
+ size_t n_state = mp_decode_uint(&ip);
+ size_t n_exc_stack = mp_decode_uint(&ip);
+
+ // allocate state for locals and stack
+ size_t state_size = n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t);
+ mp_code_state *code_state;
+ code_state = m_new_obj_var_maybe(mp_code_state, byte, state_size);
+ if (!code_state) {
+ return NULL;
+ }
+
+ code_state->ip = (byte*)(ip - self->bytecode); // offset to after n_state/n_exc_stack
+ code_state->n_state = n_state;
+ mp_setup_code_state(code_state, self, n_args, n_kw, args);
+
+ // execute the byte code with the correct globals context
+ code_state->old_globals = mp_globals_get();
+ mp_globals_set(self->globals);
+
+ return code_state;
+}
+#endif
+
+STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ MP_STACK_CHECK();
+
+ DEBUG_printf("Input n_args: " UINT_FMT ", n_kw: " UINT_FMT "\n", n_args, n_kw);
+ DEBUG_printf("Input pos args: ");
+ dump_args(args, n_args);
+ DEBUG_printf("Input kw args: ");
+ dump_args(args + n_args, n_kw * 2);
+ mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
+ DEBUG_printf("Func n_def_args: %d\n", self->n_def_args);
+
+ // get start of bytecode
+ const byte *ip = self->bytecode;
+
+ // bytecode prelude: state size and exception stack size
+ mp_uint_t n_state = mp_decode_uint(&ip);
+ mp_uint_t n_exc_stack = mp_decode_uint(&ip);
+
+#if VM_DETECT_STACK_OVERFLOW
+ n_state += 1;
+#endif
+
+ // allocate state for locals and stack
+ mp_uint_t state_size = n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t);
+ mp_code_state *code_state = NULL;
+ if (state_size > VM_MAX_STATE_ON_STACK) {
+ code_state = m_new_obj_var_maybe(mp_code_state, byte, state_size);
+ }
+ if (code_state == NULL) {
+ code_state = alloca(sizeof(mp_code_state) + state_size);
+ state_size = 0; // indicate that we allocated using alloca
+ }
+
+ code_state->ip = (byte*)(ip - self->bytecode); // offset to after n_state/n_exc_stack
+ code_state->n_state = n_state;
+ mp_setup_code_state(code_state, self, n_args, n_kw, args);
+
+ // execute the byte code with the correct globals context
+ code_state->old_globals = mp_globals_get();
+ mp_globals_set(self->globals);
+ mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL);
+ mp_globals_set(code_state->old_globals);
+
+#if VM_DETECT_STACK_OVERFLOW
+ if (vm_return_kind == MP_VM_RETURN_NORMAL) {
+ if (code_state->sp < code_state->state) {
+ printf("VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state);
+ assert(0);
+ }
+ }
+ // We can't check the case when an exception is returned in state[n_state - 1]
+ // and there are no arguments, because in this case our detection slot may have
+ // been overwritten by the returned exception (which is allowed).
+ if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && self->n_pos_args + self->n_kwonly_args == 0)) {
+ // Just check to see that we have at least 1 null object left in the state.
+ bool overflow = true;
+ for (mp_uint_t i = 0; i < n_state - self->n_pos_args - self->n_kwonly_args; i++) {
+ if (code_state->state[i] == MP_OBJ_NULL) {
+ overflow = false;
+ break;
+ }
+ }
+ if (overflow) {
+ printf("VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state);
+ assert(0);
+ }
+ }
+#endif
+
+ mp_obj_t result;
+ switch (vm_return_kind) {
+ case MP_VM_RETURN_NORMAL:
+ // return value is in *sp
+ result = *code_state->sp;
+ break;
+
+ case MP_VM_RETURN_EXCEPTION:
+ // return value is in state[n_state - 1]
+ result = code_state->state[n_state - 1];
+ break;
+
+ case MP_VM_RETURN_YIELD: // byte-code shouldn't yield
+ default:
+ assert(0);
+ result = mp_const_none;
+ vm_return_kind = MP_VM_RETURN_NORMAL;
+ break;
+ }
+
+ // free the state if it was allocated on the heap
+ if (state_size != 0) {
+ m_del_var(mp_code_state, byte, state_size, code_state);
+ }
+
+ if (vm_return_kind == MP_VM_RETURN_NORMAL) {
+ return result;
+ } else { // MP_VM_RETURN_EXCEPTION
+ nlr_raise(result);
+ }
+}
+
+#if MICROPY_PY_FUNCTION_ATTRS
+STATIC void fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ if (attr == MP_QSTR___name__) {
+ dest[0] = MP_OBJ_NEW_QSTR(mp_obj_fun_get_name(self_in));
+ }
+}
+#endif
+
+const mp_obj_type_t mp_type_fun_bc = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+#if MICROPY_CPYTHON_COMPAT
+ .print = fun_bc_print,
+#endif
+ .call = fun_bc_call,
+ .unary_op = mp_generic_unary_op,
+#if MICROPY_PY_FUNCTION_ATTRS
+ .attr = fun_bc_attr,
+#endif
+};
+
+mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args_in, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table) {
+ mp_uint_t n_def_args = 0;
+ mp_uint_t n_extra_args = 0;
+ mp_obj_tuple_t *def_args = MP_OBJ_TO_PTR(def_args_in);
+ if (def_args_in != MP_OBJ_NULL) {
+ assert(MP_OBJ_IS_TYPE(def_args_in, &mp_type_tuple));
+ n_def_args = def_args->len;
+ n_extra_args = def_args->len;
+ }
+ if (def_kw_args != MP_OBJ_NULL) {
+ n_extra_args += 1;
+ }
+ mp_obj_fun_bc_t *o = m_new_obj_var(mp_obj_fun_bc_t, mp_obj_t, n_extra_args);
+ o->base.type = &mp_type_fun_bc;
+ o->globals = mp_globals_get();
+ o->bytecode = code;
+ o->const_table = const_table;
+ if (def_args != NULL) {
+ memcpy(o->extra_args, def_args->items, n_def_args * sizeof(mp_obj_t));
+ }
+ if (def_kw_args != MP_OBJ_NULL) {
+ o->extra_args[n_def_args] = def_kw_args;
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* native functions */
+
+#if MICROPY_EMIT_NATIVE
+
+STATIC mp_obj_t fun_native_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ MP_STACK_CHECK();
+ mp_obj_fun_bc_t *self = self_in;
+ mp_call_fun_t fun = MICROPY_MAKE_POINTER_CALLABLE((void*)self->bytecode);
+ return fun(self_in, n_args, n_kw, args);
+}
+
+STATIC const mp_obj_type_t mp_type_fun_native = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+ .call = fun_native_call,
+ .unary_op = mp_generic_unary_op,
+};
+
+mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table) {
+ mp_obj_fun_bc_t *o = mp_obj_new_fun_bc(def_args_in, def_kw_args, (const byte*)fun_data, const_table);
+ o->base.type = &mp_type_fun_native;
+ return o;
+}
+
+#endif // MICROPY_EMIT_NATIVE
+
+/******************************************************************************/
+/* viper functions */
+
+#if MICROPY_EMIT_NATIVE
+
+typedef struct _mp_obj_fun_viper_t {
+ mp_obj_base_t base;
+ mp_uint_t n_args;
+ void *fun_data; // GC must be able to trace this pointer
+ mp_uint_t type_sig;
+} mp_obj_fun_viper_t;
+
+typedef mp_uint_t (*viper_fun_0_t)(void);
+typedef mp_uint_t (*viper_fun_1_t)(mp_uint_t);
+typedef mp_uint_t (*viper_fun_2_t)(mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*viper_fun_3_t)(mp_uint_t, mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*viper_fun_4_t)(mp_uint_t, mp_uint_t, mp_uint_t, mp_uint_t);
+
+STATIC mp_obj_t fun_viper_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_fun_viper_t *self = self_in;
+
+ mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false);
+
+ void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data);
+
+ mp_uint_t ret;
+ if (n_args == 0) {
+ ret = ((viper_fun_0_t)fun)();
+ } else if (n_args == 1) {
+ ret = ((viper_fun_1_t)fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 4));
+ } else if (n_args == 2) {
+ ret = ((viper_fun_2_t)fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 4), mp_convert_obj_to_native(args[1], self->type_sig >> 8));
+ } else if (n_args == 3) {
+ ret = ((viper_fun_3_t)fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 4), mp_convert_obj_to_native(args[1], self->type_sig >> 8), mp_convert_obj_to_native(args[2], self->type_sig >> 12));
+ } else if (n_args == 4) {
+ ret = ((viper_fun_4_t)fun)(
+ mp_convert_obj_to_native(args[0], self->type_sig >> 4),
+ mp_convert_obj_to_native(args[1], self->type_sig >> 8),
+ mp_convert_obj_to_native(args[2], self->type_sig >> 12),
+ mp_convert_obj_to_native(args[3], self->type_sig >> 16)
+ );
+ } else {
+ // TODO 5 or more arguments not supported for viper call
+ assert(0);
+ ret = 0;
+ }
+
+ return mp_convert_native_to_obj(ret, self->type_sig);
+}
+
+STATIC const mp_obj_type_t mp_type_fun_viper = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+ .call = fun_viper_call,
+ .unary_op = mp_generic_unary_op,
+};
+
+mp_obj_t mp_obj_new_fun_viper(mp_uint_t n_args, void *fun_data, mp_uint_t type_sig) {
+ mp_obj_fun_viper_t *o = m_new_obj(mp_obj_fun_viper_t);
+ o->base.type = &mp_type_fun_viper;
+ o->n_args = n_args;
+ o->fun_data = fun_data;
+ o->type_sig = type_sig;
+ return o;
+}
+
+#endif // MICROPY_EMIT_NATIVE
+
+/******************************************************************************/
+/* inline assembler functions */
+
+#if MICROPY_EMIT_INLINE_THUMB
+
+typedef struct _mp_obj_fun_asm_t {
+ mp_obj_base_t base;
+ mp_uint_t n_args;
+ void *fun_data; // GC must be able to trace this pointer
+ mp_uint_t type_sig;
+} mp_obj_fun_asm_t;
+
+typedef mp_uint_t (*inline_asm_fun_0_t)(void);
+typedef mp_uint_t (*inline_asm_fun_1_t)(mp_uint_t);
+typedef mp_uint_t (*inline_asm_fun_2_t)(mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*inline_asm_fun_3_t)(mp_uint_t, mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*inline_asm_fun_4_t)(mp_uint_t, mp_uint_t, mp_uint_t, mp_uint_t);
+
+// convert a Micro Python object to a sensible value for inline asm
+STATIC mp_uint_t convert_obj_for_inline_asm(mp_obj_t obj) {
+ // TODO for byte_array, pass pointer to the array
+ if (MP_OBJ_IS_SMALL_INT(obj)) {
+ return MP_OBJ_SMALL_INT_VALUE(obj);
+ } else if (obj == mp_const_none) {
+ return 0;
+ } else if (obj == mp_const_false) {
+ return 0;
+ } else if (obj == mp_const_true) {
+ return 1;
+ } else if (MP_OBJ_IS_TYPE(obj, &mp_type_int)) {
+ return mp_obj_int_get_truncated(obj);
+ } else if (MP_OBJ_IS_STR(obj)) {
+ // pointer to the string (it's probably constant though!)
+ mp_uint_t l;
+ return (mp_uint_t)mp_obj_str_get_data(obj, &l);
+ } else {
+ mp_obj_type_t *type = mp_obj_get_type(obj);
+ if (0) {
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (type == &mp_type_float) {
+ // convert float to int (could also pass in float registers)
+ return (mp_int_t)mp_obj_float_get(obj);
+#endif
+ } else if (type == &mp_type_tuple) {
+ // pointer to start of tuple (could pass length, but then could use len(x) for that)
+ mp_uint_t len;
+ mp_obj_t *items;
+ mp_obj_tuple_get(obj, &len, &items);
+ return (mp_uint_t)items;
+ } else if (type == &mp_type_list) {
+ // pointer to start of list (could pass length, but then could use len(x) for that)
+ mp_uint_t len;
+ mp_obj_t *items;
+ mp_obj_list_get(obj, &len, &items);
+ return (mp_uint_t)items;
+ } else {
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_WRITE)) {
+ // supports the buffer protocol, return a pointer to the data
+ return (mp_uint_t)bufinfo.buf;
+ } else {
+ // just pass along a pointer to the object
+ return (mp_uint_t)obj;
+ }
+ }
+ }
+}
+
+STATIC mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_fun_asm_t *self = self_in;
+
+ mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false);
+
+ void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data);
+
+ mp_uint_t ret;
+ if (n_args == 0) {
+ ret = ((inline_asm_fun_0_t)fun)();
+ } else if (n_args == 1) {
+ ret = ((inline_asm_fun_1_t)fun)(convert_obj_for_inline_asm(args[0]));
+ } else if (n_args == 2) {
+ ret = ((inline_asm_fun_2_t)fun)(convert_obj_for_inline_asm(args[0]), convert_obj_for_inline_asm(args[1]));
+ } else if (n_args == 3) {
+ ret = ((inline_asm_fun_3_t)fun)(convert_obj_for_inline_asm(args[0]), convert_obj_for_inline_asm(args[1]), convert_obj_for_inline_asm(args[2]));
+ } else {
+ // compiler allows at most 4 arguments
+ assert(n_args == 4);
+ ret = ((inline_asm_fun_4_t)fun)(
+ convert_obj_for_inline_asm(args[0]),
+ convert_obj_for_inline_asm(args[1]),
+ convert_obj_for_inline_asm(args[2]),
+ convert_obj_for_inline_asm(args[3])
+ );
+ }
+
+ return mp_convert_native_to_obj(ret, self->type_sig);
+}
+
+STATIC const mp_obj_type_t mp_type_fun_asm = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+ .call = fun_asm_call,
+ .unary_op = mp_generic_unary_op,
+};
+
+mp_obj_t mp_obj_new_fun_asm(mp_uint_t n_args, void *fun_data, mp_uint_t type_sig) {
+ mp_obj_fun_asm_t *o = m_new_obj(mp_obj_fun_asm_t);
+ o->base.type = &mp_type_fun_asm;
+ o->n_args = n_args;
+ o->fun_data = fun_data;
+ o->type_sig = type_sig;
+ return o;
+}
+
+#endif // MICROPY_EMIT_INLINE_THUMB
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objfun.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,44 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJFUN_H__
+#define __MICROPY_INCLUDED_PY_OBJFUN_H__
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_fun_bc_t {
+ mp_obj_base_t base;
+ mp_obj_dict_t *globals; // the context within which this function was defined
+ const byte *bytecode; // bytecode for the function
+ const mp_uint_t *const_table; // constant table
+ // the following extra_args array is allocated space to take (in order):
+ // - values of positional default args (if any)
+ // - a single slot for default kw args dict (if it has them)
+ // - a single slot for var args tuple (if it takes them)
+ // - a single slot for kw args dict (if it takes them)
+ mp_obj_t extra_args[];
+} mp_obj_fun_bc_t;
+
+#endif // __MICROPY_INCLUDED_PY_OBJFUN_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objgenerator.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,245 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime.h"
+#include "py/bc.h"
+#include "py/objgenerator.h"
+#include "py/objfun.h"
+
+/******************************************************************************/
+/* generator wrapper */
+
+typedef struct _mp_obj_gen_wrap_t {
+ mp_obj_base_t base;
+ mp_obj_t *fun;
+} mp_obj_gen_wrap_t;
+
+typedef struct _mp_obj_gen_instance_t {
+ mp_obj_base_t base;
+ mp_obj_dict_t *globals;
+ mp_code_state code_state;
+} mp_obj_gen_instance_t;
+
+STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t*)self->fun;
+ assert(self_fun->base.type == &mp_type_fun_bc);
+
+ // get start of bytecode
+ const byte *ip = self_fun->bytecode;
+
+ // bytecode prelude: get state size and exception stack size
+ mp_uint_t n_state = mp_decode_uint(&ip);
+ mp_uint_t n_exc_stack = mp_decode_uint(&ip);
+
+ // allocate the generator object, with room for local stack and exception stack
+ mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
+ n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
+ o->base.type = &mp_type_gen_instance;
+
+ o->globals = self_fun->globals;
+ o->code_state.n_state = n_state;
+ o->code_state.ip = (byte*)(ip - self_fun->bytecode); // offset to prelude
+ mp_setup_code_state(&o->code_state, self_fun, n_args, n_kw, args);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_gen_wrap = {
+ { &mp_type_type },
+ .name = MP_QSTR_generator,
+ .call = gen_wrap_call,
+};
+
+mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun) {
+ mp_obj_gen_wrap_t *o = m_new_obj(mp_obj_gen_wrap_t);
+ o->base.type = &mp_type_gen_wrap;
+ o->fun = MP_OBJ_TO_PTR(fun);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* generator instance */
+
+STATIC void gen_instance_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "<generator object '%q' at %p>", mp_obj_code_get_name(self->code_state.code_info), self);
+}
+
+mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_gen_instance));
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->code_state.ip == 0) {
+ *ret_val = MP_OBJ_STOP_ITERATION;
+ return MP_VM_RETURN_NORMAL;
+ }
+ if (self->code_state.sp == self->code_state.state - 1) {
+ if (send_value != mp_const_none) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "can't send non-None value to a just-started generator"));
+ }
+ } else {
+ *self->code_state.sp = send_value;
+ }
+ mp_obj_dict_t *old_globals = mp_globals_get();
+ mp_globals_set(self->globals);
+ mp_vm_return_kind_t ret_kind = mp_execute_bytecode(&self->code_state, throw_value);
+ mp_globals_set(old_globals);
+
+ switch (ret_kind) {
+ case MP_VM_RETURN_NORMAL:
+ default:
+ // Explicitly mark generator as completed. If we don't do this,
+ // subsequent next() may re-execute statements after last yield
+ // again and again, leading to side effects.
+ // TODO: check how return with value behaves under such conditions
+ // in CPython.
+ self->code_state.ip = 0;
+ *ret_val = *self->code_state.sp;
+ break;
+
+ case MP_VM_RETURN_YIELD:
+ *ret_val = *self->code_state.sp;
+ if (*ret_val == MP_OBJ_STOP_ITERATION) {
+ self->code_state.ip = 0;
+ }
+ break;
+
+ case MP_VM_RETURN_EXCEPTION:
+ self->code_state.ip = 0;
+ *ret_val = self->code_state.state[self->code_state.n_state - 1];
+ break;
+ }
+
+ return ret_kind;
+}
+
+STATIC mp_obj_t gen_resume_and_raise(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value) {
+ mp_obj_t ret;
+ switch (mp_obj_gen_resume(self_in, send_value, throw_value, &ret)) {
+ case MP_VM_RETURN_NORMAL:
+ default:
+ // Optimize return w/o value in case generator is used in for loop
+ if (ret == mp_const_none || ret == MP_OBJ_STOP_ITERATION) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ nlr_raise(mp_obj_new_exception_args(&mp_type_StopIteration, 1, &ret));
+ }
+
+ case MP_VM_RETURN_YIELD:
+ if (throw_value != MP_OBJ_NULL && mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(throw_value)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_RuntimeError, "generator ignored GeneratorExit"));
+ }
+ return ret;
+
+ case MP_VM_RETURN_EXCEPTION:
+ // TODO: Optimization of returning MP_OBJ_STOP_ITERATION is really part
+ // of mp_iternext() protocol, but this function is called by other methods
+ // too, which may not handled MP_OBJ_STOP_ITERATION.
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ mp_obj_t val = mp_obj_exception_get_value(ret);
+ if (val == mp_const_none) {
+ return MP_OBJ_STOP_ITERATION;
+ }
+ }
+ nlr_raise(ret);
+ }
+}
+
+STATIC mp_obj_t gen_instance_iternext(mp_obj_t self_in) {
+ return gen_resume_and_raise(self_in, mp_const_none, MP_OBJ_NULL);
+}
+
+STATIC mp_obj_t gen_instance_send(mp_obj_t self_in, mp_obj_t send_value) {
+ mp_obj_t ret = gen_resume_and_raise(self_in, send_value, MP_OBJ_NULL);
+ if (ret == MP_OBJ_STOP_ITERATION) {
+ nlr_raise(mp_obj_new_exception(&mp_type_StopIteration));
+ } else {
+ return ret;
+ }
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(gen_instance_send_obj, gen_instance_send);
+
+STATIC mp_obj_t gen_instance_close(mp_obj_t self_in);
+STATIC mp_obj_t gen_instance_throw(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t exc = (n_args == 2) ? args[1] : args[2];
+ exc = mp_make_raise_obj(exc);
+
+ mp_obj_t ret = gen_resume_and_raise(args[0], mp_const_none, exc);
+ if (ret == MP_OBJ_STOP_ITERATION) {
+ nlr_raise(mp_obj_new_exception(&mp_type_StopIteration));
+ } else {
+ return ret;
+ }
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gen_instance_throw_obj, 2, 4, gen_instance_throw);
+
+STATIC mp_obj_t gen_instance_close(mp_obj_t self_in) {
+ mp_obj_t ret;
+ switch (mp_obj_gen_resume(self_in, mp_const_none, MP_OBJ_FROM_PTR(&mp_const_GeneratorExit_obj), &ret)) {
+ case MP_VM_RETURN_YIELD:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_RuntimeError, "generator ignored GeneratorExit"));
+
+ // Swallow StopIteration & GeneratorExit (== successful close), and re-raise any other
+ case MP_VM_RETURN_EXCEPTION:
+ // ret should always be an instance of an exception class
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit)) ||
+ mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ return mp_const_none;
+ }
+ nlr_raise(ret);
+
+ default:
+ // The only choice left is MP_VM_RETURN_NORMAL which is successful close
+ return mp_const_none;
+ }
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(gen_instance_close_obj, gen_instance_close);
+
+STATIC const mp_rom_map_elem_t gen_instance_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&gen_instance_close_obj) },
+ { MP_ROM_QSTR(MP_QSTR_send), MP_ROM_PTR(&gen_instance_send_obj) },
+ { MP_ROM_QSTR(MP_QSTR_throw), MP_ROM_PTR(&gen_instance_throw_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(gen_instance_locals_dict, gen_instance_locals_dict_table);
+
+const mp_obj_type_t mp_type_gen_instance = {
+ { &mp_type_type },
+ .name = MP_QSTR_generator,
+ .print = gen_instance_print,
+ .getiter = mp_identity,
+ .iternext = gen_instance_iternext,
+ .locals_dict = (mp_obj_dict_t*)&gen_instance_locals_dict,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/objgenerator.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,34 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_OBJGENERATOR_H__ +#define __MICROPY_INCLUDED_PY_OBJGENERATOR_H__ + +#include "py/obj.h" +#include "py/runtime.h" + +mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_val, mp_obj_t throw_val, mp_obj_t *ret_val); + +#endif // __MICROPY_INCLUDED_PY_OBJGENERATOR_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objgetitemiter.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,76 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/nlr.h"
+#include "py/runtime.h"
+
+// this is a wrapper object that turns something that has a __getitem__ method into an iterator
+
+typedef struct _mp_obj_getitem_iter_t {
+ mp_obj_base_t base;
+ mp_obj_t args[3];
+} mp_obj_getitem_iter_t;
+
+STATIC mp_obj_t it_iternext(mp_obj_t self_in) {
+ mp_obj_getitem_iter_t *self = MP_OBJ_TO_PTR(self_in);
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ // try to get next item
+ mp_obj_t value = mp_call_method_n_kw(1, 0, self->args);
+ self->args[2] = MP_OBJ_NEW_SMALL_INT(MP_OBJ_SMALL_INT_VALUE(self->args[2]) + 1);
+ nlr_pop();
+ return value;
+ } else {
+ // an exception was raised
+ mp_obj_type_t *t = (mp_obj_type_t*)((mp_obj_base_t*)nlr.ret_val)->type;
+ if (t == &mp_type_StopIteration || t == &mp_type_IndexError) {
+ // return MP_OBJ_STOP_ITERATION instead of raising
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ // re-raise exception
+ nlr_jump(nlr.ret_val);
+ }
+ }
+}
+
+STATIC const mp_obj_type_t it_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_iterator,
+ .getiter = mp_identity,
+ .iternext = it_iternext,
+};
+
+// args are those returned from mp_load_method_maybe (ie either an attribute or a method)
+mp_obj_t mp_obj_new_getitem_iter(mp_obj_t *args) {
+ mp_obj_getitem_iter_t *o = m_new_obj(mp_obj_getitem_iter_t);
+ o->base.type = &it_type;
+ o->args[0] = args[0];
+ o->args[1] = args[1];
+ o->args[2] = MP_OBJ_NEW_SMALL_INT(0);
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objint.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,447 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/parsenum.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/binary.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+// This dispatcher function is expected to be independent of the implementation of long int
+STATIC mp_obj_t mp_obj_int_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 2, false);
+
+ switch (n_args) {
+ case 0:
+ return MP_OBJ_NEW_SMALL_INT(0);
+
+ case 1:
+ if (MP_OBJ_IS_INT(args[0])) {
+ // already an int (small or long), just return it
+ return args[0];
+ } else if (MP_OBJ_IS_STR_OR_BYTES(args[0])) {
+ // a string, parse it
+ mp_uint_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_integer(s, l, 0, NULL);
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(args[0])) {
+ return mp_obj_new_int_from_float(mp_obj_float_get(args[0]));
+#endif
+ } else {
+ // try to convert to small int (eg from bool)
+ return MP_OBJ_NEW_SMALL_INT(mp_obj_get_int(args[0]));
+ }
+
+ case 2:
+ default: {
+ // should be a string, parse it
+ // TODO proper error checking of argument types
+ mp_uint_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_integer(s, l, mp_obj_get_int(args[1]), NULL);
+ }
+ }
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_fp_as_int_class_t mp_classify_fp_as_int(mp_float_t val) {
+ union {
+ mp_float_t f;
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ uint32_t i;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+ uint32_t i[2];
+#endif
+ } u = {val};
+
+ uint32_t e;
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ e = u.i;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+ e = u.i[MP_ENDIANNESS_LITTLE];
+#endif
+#define MP_FLOAT_SIGN_SHIFT_I32 ((MP_FLOAT_FRAC_BITS + MP_FLOAT_EXP_BITS) % 32)
+#define MP_FLOAT_EXP_SHIFT_I32 (MP_FLOAT_FRAC_BITS % 32)
+
+ if (e & (1 << MP_FLOAT_SIGN_SHIFT_I32)) {
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+ e |= u.i[MP_ENDIANNESS_BIG] != 0;
+#endif
+ e += ((1 << MP_FLOAT_EXP_BITS) - 1) << MP_FLOAT_EXP_SHIFT_I32;
+ } else {
+ e &= ~((1 << MP_FLOAT_EXP_SHIFT_I32) - 1);
+ }
+ // 8 * sizeof(uintptr_t) counts the number of bits for a small int
+ // TODO provide a way to configure this properly
+ if (e <= ((8 * sizeof(uintptr_t) + MP_FLOAT_EXP_BIAS - 3) << MP_FLOAT_EXP_SHIFT_I32)) {
+ return MP_FP_CLASS_FIT_SMALLINT;
+ }
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ if (e <= (((sizeof(long long) * BITS_PER_BYTE) + MP_FLOAT_EXP_BIAS - 2) << MP_FLOAT_EXP_SHIFT_I32)) {
+ return MP_FP_CLASS_FIT_LONGINT;
+ }
+#endif
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+ return MP_FP_CLASS_FIT_LONGINT;
+#else
+ return MP_FP_CLASS_OVERFLOW;
+#endif
+}
+#undef MP_FLOAT_SIGN_SHIFT_I32
+#undef MP_FLOAT_EXP_SHIFT_I32
+#endif
+
+void mp_obj_int_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ // The size of this buffer is rather arbitrary. If it's not large
+ // enough, a dynamic one will be allocated.
+ char stack_buf[sizeof(mp_int_t) * 4];
+ char *buf = stack_buf;
+ mp_uint_t buf_size = sizeof(stack_buf);
+ mp_uint_t fmt_size;
+
+ char *str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size, self_in, 10, NULL, '\0', '\0');
+ mp_print_str(print, str);
+
+ if (buf != stack_buf) {
+ m_del(char, buf, buf_size);
+ }
+}
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+typedef mp_longint_impl_t fmt_int_t;
+#else
+typedef mp_int_t fmt_int_t;
+#endif
+
+STATIC const uint8_t log_base2_floor[] = {
+ 0,
+ 0, 1, 1, 2,
+ 2, 2, 2, 3,
+ 3, 3, 3, 3,
+ 3, 3, 3, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 5
+};
+
+STATIC uint int_as_str_size_formatted(uint base, const char *prefix, char comma) {
+ if (base < 2 || base > 32) {
+ return 0;
+ }
+
+ uint num_digits = sizeof(fmt_int_t) * 8 / log_base2_floor[base] + 1;
+ uint num_commas = comma ? num_digits / 3: 0;
+ uint prefix_len = prefix ? strlen(prefix) : 0;
+ return num_digits + num_commas + prefix_len + 2; // +1 for sign, +1 for null byte
+}
+
+// This routine expects you to pass in a buffer and size (in *buf and *buf_size).
+// If, for some reason, this buffer is too small, then it will allocate a
+// buffer and return the allocated buffer and size in *buf and *buf_size. It
+// is the callers responsibility to free this allocated buffer.
+//
+// The resulting formatted string will be returned from this function and the
+// formatted size will be in *fmt_size.
+char *mp_obj_int_formatted(char **buf, mp_uint_t *buf_size, mp_uint_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma) {
+ fmt_int_t num;
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ // A small int; get the integer value to format.
+ num = mp_obj_get_int(self_in);
+#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ } else if (MP_OBJ_IS_TYPE(self_in, &mp_type_int)) {
+ // Not a small int.
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ const mp_obj_int_t *self = self_in;
+ // Get the value to format; mp_obj_get_int truncates to mp_int_t.
+ num = self->val;
+#else
+ // Delegate to the implementation for the long int.
+ return mp_obj_int_formatted_impl(buf, buf_size, fmt_size, self_in, base, prefix, base_char, comma);
+#endif
+#endif
+ } else {
+ // Not an int.
+ **buf = '\0';
+ *fmt_size = 0;
+ return *buf;
+ }
+
+ char sign = '\0';
+ if (num < 0) {
+ num = -num;
+ sign = '-';
+ }
+
+ uint needed_size = int_as_str_size_formatted(base, prefix, comma);
+ if (needed_size > *buf_size) {
+ *buf = m_new(char, needed_size);
+ *buf_size = needed_size;
+ }
+ char *str = *buf;
+
+ char *b = str + needed_size;
+ *(--b) = '\0';
+ char *last_comma = b;
+
+ if (num == 0) {
+ *(--b) = '0';
+ } else {
+ do {
+ int c = num % base;
+ num /= base;
+ if (c >= 10) {
+ c += base_char - 10;
+ } else {
+ c += '0';
+ }
+ *(--b) = c;
+ if (comma && num != 0 && b > str && (last_comma - b) == 3) {
+ *(--b) = comma;
+ last_comma = b;
+ }
+ }
+ while (b > str && num != 0);
+ }
+ if (prefix) {
+ size_t prefix_len = strlen(prefix);
+ char *p = b - prefix_len;
+ if (p > str) {
+ b = p;
+ while (*prefix) {
+ *p++ = *prefix++;
+ }
+ }
+ }
+ if (sign && b > str) {
+ *(--b) = sign;
+ }
+ *fmt_size = *buf + needed_size - b - 1;
+
+ return b;
+}
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+
+int mp_obj_int_sign(mp_obj_t self_in) {
+ mp_int_t val = mp_obj_get_int(self_in);
+ if (val < 0) {
+ return -1;
+ } else if (val > 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+// This must handle int and bool types, and must raise a
+// TypeError if the argument is not integral
+mp_obj_t mp_obj_int_abs(mp_obj_t self_in) {
+ mp_int_t val = mp_obj_get_int(self_in);
+ if (val < 0) {
+ val = -val;
+ }
+ return MP_OBJ_NEW_SMALL_INT(val);
+}
+
+// This is called for operations on SMALL_INT that are not handled by mp_unary_op
+mp_obj_t mp_obj_int_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ return MP_OBJ_NULL; // op not supported
+}
+
+// This is called for operations on SMALL_INT that are not handled by mp_binary_op
+mp_obj_t mp_obj_int_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in);
+}
+
+// This is called only with strings whose value doesn't fit in SMALL_INT
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, mp_uint_t len, bool neg, mp_uint_t base) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OverflowError, "long int not supported in this build"));
+ return mp_const_none;
+}
+
+// This is called when an integer larger than a SMALL_INT is needed (although val might still fit in a SMALL_INT)
+mp_obj_t mp_obj_new_int_from_ll(long long val) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OverflowError, "small int overflow"));
+ return mp_const_none;
+}
+
+// This is called when an integer larger than a SMALL_INT is needed (although val might still fit in a SMALL_INT)
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OverflowError, "small int overflow"));
+ return mp_const_none;
+}
+
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value) {
+ // SMALL_INT accepts only signed numbers, so make sure the input
+ // value fits completely in the small-int positive range.
+ if ((value & ~MP_SMALL_INT_POSITIVE_MASK) == 0) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OverflowError, "small int overflow"));
+ return mp_const_none;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_obj_t mp_obj_new_int_from_float(mp_float_t val) {
+ int cl = fpclassify(val);
+ if (cl == FP_INFINITE) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_OverflowError, "can't convert inf to int"));
+ } else if (cl == FP_NAN) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "can't convert NaN to int"));
+ } else {
+ mp_fp_as_int_class_t icl = mp_classify_fp_as_int(val);
+ if (icl == MP_FP_CLASS_FIT_SMALLINT) {
+ return MP_OBJ_NEW_SMALL_INT((mp_int_t)val);
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "float too big"));
+ }
+ }
+}
+#endif
+
+mp_obj_t mp_obj_new_int(mp_int_t value) {
+ if (MP_SMALL_INT_FITS(value)) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OverflowError, "small int overflow"));
+ return mp_const_none;
+}
+
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+}
+
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float(mp_obj_t self_in) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+}
+#endif
+
+#endif // MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+
+// This dispatcher function is expected to be independent of the implementation of long int
+// It handles the extra cases for integer-like arithmetic
+mp_obj_t mp_obj_int_binary_op_extra_cases(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ if (rhs_in == mp_const_false) {
+ // false acts as 0
+ return mp_binary_op(op, lhs_in, MP_OBJ_NEW_SMALL_INT(0));
+ } else if (rhs_in == mp_const_true) {
+ // true acts as 0
+ return mp_binary_op(op, lhs_in, MP_OBJ_NEW_SMALL_INT(1));
+ } else if (op == MP_BINARY_OP_MULTIPLY) {
+ if (MP_OBJ_IS_STR(rhs_in) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_bytes) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_list)) {
+ // multiply is commutative for these types, so delegate to them
+ return mp_binary_op(op, rhs_in, lhs_in);
+ }
+ }
+ return MP_OBJ_NULL; // op not supported
+}
+
+// this is a classmethod
+STATIC mp_obj_t int_from_bytes(size_t n_args, const mp_obj_t *args) {
+ // TODO: Support long ints
+ // TODO: Support byteorder param (assumes 'little' at the moment)
+ // TODO: Support signed param (assumes signed=False at the moment)
+ (void)n_args;
+
+ // get the buffer info
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ);
+
+ // convert the bytes to an integer
+ mp_uint_t value = 0;
+ for (const byte* buf = (const byte*)bufinfo.buf + bufinfo.len - 1; buf >= (byte*)bufinfo.buf; buf--) {
+ value = (value << 8) | *buf;
+ }
+
+ return mp_obj_new_int_from_uint(value);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_from_bytes_fun_obj, 2, 3, int_from_bytes);
+STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(int_from_bytes_obj, MP_ROM_PTR(&int_from_bytes_fun_obj));
+
+STATIC mp_obj_t int_to_bytes(size_t n_args, const mp_obj_t *args) {
+ // TODO: Support byteorder param (assumes 'little')
+ // TODO: Support signed param (assumes signed=False)
+ (void)n_args;
+
+ mp_uint_t len = MP_OBJ_SMALL_INT_VALUE(args[1]);
+
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ byte *data = (byte*)vstr.buf;
+ memset(data, 0, len);
+
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (!MP_OBJ_IS_SMALL_INT(args[0])) {
+ mp_obj_int_to_bytes_impl(args[0], false, len, data);
+ } else
+ #endif
+ {
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(args[0]);
+ mp_binary_set_int(MIN((size_t)len, sizeof(val)), false, data, val);
+ }
+
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_to_bytes_obj, 2, 4, int_to_bytes);
+
+STATIC const mp_rom_map_elem_t int_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_from_bytes), MP_ROM_PTR(&int_from_bytes_obj) },
+ { MP_ROM_QSTR(MP_QSTR_to_bytes), MP_ROM_PTR(&int_to_bytes_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(int_locals_dict, int_locals_dict_table);
+
+const mp_obj_type_t mp_type_int = {
+ { &mp_type_type },
+ .name = MP_QSTR_int,
+ .print = mp_obj_int_print,
+ .make_new = mp_obj_int_make_new,
+ .unary_op = mp_obj_int_unary_op,
+ .binary_op = mp_obj_int_binary_op,
+ .locals_dict = (mp_obj_dict_t*)&int_locals_dict,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objint.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,66 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJINT_H__
+#define __MICROPY_INCLUDED_PY_OBJINT_H__
+
+#include "py/mpz.h"
+#include "py/obj.h"
+
+typedef struct _mp_obj_int_t {
+ mp_obj_base_t base;
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ mp_longint_impl_t val;
+#elif MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+ mpz_t mpz;
+#endif
+} mp_obj_int_t;
+
+extern const mp_obj_int_t mp_maxsize_obj;
+
+#if MICROPY_PY_BUILTINS_FLOAT
+typedef enum {
+ MP_FP_CLASS_FIT_SMALLINT,
+ MP_FP_CLASS_FIT_LONGINT,
+ MP_FP_CLASS_OVERFLOW
+} mp_fp_as_int_class_t;
+
+mp_fp_as_int_class_t mp_classify_fp_as_int(mp_float_t val);
+#endif // MICROPY_PY_BUILTINS_FLOAT
+
+void mp_obj_int_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind);
+char *mp_obj_int_formatted(char **buf, mp_uint_t *buf_size, mp_uint_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma);
+char *mp_obj_int_formatted_impl(char **buf, mp_uint_t *buf_size, mp_uint_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma);
+mp_int_t mp_obj_int_hash(mp_obj_t self_in);
+void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, mp_uint_t len, byte *buf);
+int mp_obj_int_sign(mp_obj_t self_in);
+mp_obj_t mp_obj_int_abs(mp_obj_t self_in);
+mp_obj_t mp_obj_int_unary_op(mp_uint_t op, mp_obj_t o_in);
+mp_obj_t mp_obj_int_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in);
+mp_obj_t mp_obj_int_binary_op_extra_cases(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in);
+
+#endif // __MICROPY_INCLUDED_PY_OBJINT_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objint_longlong.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,308 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+
+// Python3 no longer has "l" suffix for long ints. We allow to use it
+// for debugging purpose though.
+#ifdef DEBUG
+#define SUFFIX "l"
+#else
+#define SUFFIX ""
+#endif
+
+#if MICROPY_PY_SYS_MAXSIZE
+// Export value for sys.maxsize
+const mp_obj_int_t mp_maxsize_obj = {{&mp_type_int}, MP_SSIZE_MAX};
+#endif
+
+void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, mp_uint_t len, byte *buf) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int));
+ mp_obj_int_t *self = self_in;
+ long long val = self->val;
+ if (big_endian) {
+ byte *b = buf + len;
+ while (b > buf) {
+ *--b = val;
+ val >>= 8;
+ }
+ } else {
+ for (; len > 0; --len) {
+ *buf++ = val;
+ val >>= 8;
+ }
+ }
+}
+
+int mp_obj_int_sign(mp_obj_t self_in) {
+ mp_longint_impl_t val;
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ val = MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ mp_obj_int_t *self = self_in;
+ val = self->val;
+ }
+ if (val < 0) {
+ return -1;
+ } else if (val > 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+// This must handle int and bool types, and must raise a
+// TypeError if the argument is not integral
+mp_obj_t mp_obj_int_abs(mp_obj_t self_in) {
+ if (MP_OBJ_IS_TYPE(self_in, &mp_type_int)) {
+ mp_obj_int_t *self = self_in;
+ self = mp_obj_new_int_from_ll(self->val);
+ if (self->val < 0) {
+ // TODO could overflow long long
+ self->val = -self->val;
+ }
+ return self;
+ } else {
+ mp_int_t val = mp_obj_get_int(self_in);
+ if (val == MP_SMALL_INT_MIN) {
+ return mp_obj_new_int_from_ll(-val);
+ } else {
+ if (val < 0) {
+ val = -val;
+ }
+ return MP_OBJ_NEW_SMALL_INT(val);
+ }
+ }
+}
+
+mp_obj_t mp_obj_int_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ mp_obj_int_t *o = o_in;
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(o->val != 0);
+
+ // truncate value to fit in mp_int_t, which gives the same hash as
+ // small int if the value fits without truncation
+ case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT((mp_int_t)o->val);
+
+ case MP_UNARY_OP_POSITIVE: return o_in;
+ case MP_UNARY_OP_NEGATIVE: return mp_obj_new_int_from_ll(-o->val);
+ case MP_UNARY_OP_INVERT: return mp_obj_new_int_from_ll(~o->val);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_int_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ long long lhs_val;
+ long long rhs_val;
+
+ if (MP_OBJ_IS_SMALL_INT(lhs_in)) {
+ lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs_in);
+ } else if (MP_OBJ_IS_TYPE(lhs_in, &mp_type_int)) {
+ lhs_val = ((mp_obj_int_t*)lhs_in)->val;
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ if (MP_OBJ_IS_SMALL_INT(rhs_in)) {
+ rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs_in);
+ } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_int)) {
+ rhs_val = ((mp_obj_int_t*)rhs_in)->val;
+ } else {
+ // delegate to generic function to check for extra cases
+ return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in);
+ }
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ return mp_obj_new_int_from_ll(lhs_val + rhs_val);
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ return mp_obj_new_int_from_ll(lhs_val - rhs_val);
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY:
+ return mp_obj_new_int_from_ll(lhs_val * rhs_val);
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ return mp_obj_new_int_from_ll(lhs_val / rhs_val);
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO:
+ return mp_obj_new_int_from_ll(lhs_val % rhs_val);
+
+ case MP_BINARY_OP_AND:
+ case MP_BINARY_OP_INPLACE_AND:
+ return mp_obj_new_int_from_ll(lhs_val & rhs_val);
+ case MP_BINARY_OP_OR:
+ case MP_BINARY_OP_INPLACE_OR:
+ return mp_obj_new_int_from_ll(lhs_val | rhs_val);
+ case MP_BINARY_OP_XOR:
+ case MP_BINARY_OP_INPLACE_XOR:
+ return mp_obj_new_int_from_ll(lhs_val ^ rhs_val);
+
+ case MP_BINARY_OP_LSHIFT:
+ case MP_BINARY_OP_INPLACE_LSHIFT:
+ return mp_obj_new_int_from_ll(lhs_val << (int)rhs_val);
+ case MP_BINARY_OP_RSHIFT:
+ case MP_BINARY_OP_INPLACE_RSHIFT:
+ return mp_obj_new_int_from_ll(lhs_val >> (int)rhs_val);
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER: {
+ long long ans = 1;
+ while (rhs_val > 0) {
+ if (rhs_val & 1) {
+ ans *= lhs_val;
+ }
+ if (rhs_val == 1) {
+ break;
+ }
+ rhs_val /= 2;
+ lhs_val *= lhs_val;
+ }
+ return mp_obj_new_int_from_ll(ans);
+ }
+
+ case MP_BINARY_OP_LESS:
+ return mp_obj_new_bool(lhs_val < rhs_val);
+ case MP_BINARY_OP_MORE:
+ return mp_obj_new_bool(lhs_val > rhs_val);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return mp_obj_new_bool(lhs_val <= rhs_val);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(lhs_val >= rhs_val);
+ case MP_BINARY_OP_EQUAL:
+ return mp_obj_new_bool(lhs_val == rhs_val);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_new_int(mp_int_t value) {
+ if (MP_SMALL_INT_FITS(value)) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value) {
+ // SMALL_INT accepts only signed numbers, so make sure the input
+ // value fits completely in the small-int positive range.
+ if ((value & ~MP_SMALL_INT_POSITIVE_MASK) == 0) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+mp_obj_t mp_obj_new_int_from_ll(long long val) {
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ o->val = val;
+ return o;
+}
+
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val) {
+ // TODO raise an exception if the unsigned long long won't fit
+ if (val >> (sizeof(unsigned long long) * 8 - 1) != 0) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_OverflowError, "ulonglong too large"));
+ }
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ o->val = val;
+ return o;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_obj_t mp_obj_new_int_from_float(mp_float_t val) {
+ int cl = fpclassify(val);
+ if (cl == FP_INFINITE) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_OverflowError, "can't convert inf to int"));
+ } else if (cl == FP_NAN) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "can't convert NaN to int"));
+ } else {
+ mp_fp_as_int_class_t icl = mp_classify_fp_as_int(val);
+ if (icl == MP_FP_CLASS_FIT_SMALLINT) {
+ return MP_OBJ_NEW_SMALL_INT((mp_int_t)val);
+ } else if (icl == MP_FP_CLASS_FIT_LONGINT) {
+ return mp_obj_new_int_from_ll((long long)val);
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "float too big"));
+ }
+ }
+}
+#endif
+
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, mp_uint_t len, bool neg, mp_uint_t base) {
+ // TODO this does not honor the given length of the string, but it all cases it should anyway be null terminated
+ // TODO check overflow
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ char *endptr;
+ o->val = strtoll(*str, &endptr, base);
+ *str = endptr;
+ return o;
+}
+
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) {
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ const mp_obj_int_t *self = self_in;
+ return self->val;
+ }
+}
+
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) {
+ // TODO: Check overflow
+ return mp_obj_int_get_truncated(self_in);
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float(mp_obj_t self_in) {
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ mp_obj_int_t *self = self_in;
+ return self->val;
+ }
+}
+#endif
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objint_mpz.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,431 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/parsenumbase.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+
+#if MICROPY_PY_SYS_MAXSIZE
+// Export value for sys.maxsize
+#define DIG_MASK ((MPZ_LONG_1 << MPZ_DIG_SIZE) - 1)
+STATIC const mpz_dig_t maxsize_dig[] = {
+ #define NUM_DIG 1
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 0) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 0) > DIG_MASK
+ #undef NUM_DIG
+ #define NUM_DIG 2
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 1) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 1) > DIG_MASK
+ #undef NUM_DIG
+ #define NUM_DIG 3
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 2) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 2) > DIG_MASK
+ #undef NUM_DIG
+ #define NUM_DIG 4
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 3) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 3) > DIG_MASK
+ #error cannot encode MP_SSIZE_MAX as mpz
+ #endif
+ #endif
+ #endif
+ #endif
+};
+const mp_obj_int_t mp_maxsize_obj = {
+ {&mp_type_int},
+ {.fixed_dig = 1, .len = NUM_DIG, .alloc = NUM_DIG, .dig = (mpz_dig_t*)maxsize_dig}
+};
+#undef DIG_MASK
+#undef NUM_DIG
+#endif
+
+STATIC mp_obj_int_t *mp_obj_int_new_mpz(void) {
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ mpz_init_zero(&o->mpz);
+ return o;
+}
+
+// This routine expects you to pass in a buffer and size (in *buf and buf_size).
+// If, for some reason, this buffer is too small, then it will allocate a
+// buffer and return the allocated buffer and size in *buf and *buf_size. It
+// is the callers responsibility to free this allocated buffer.
+//
+// The resulting formatted string will be returned from this function and the
+// formatted size will be in *fmt_size.
+//
+// This particular routine should only be called for the mpz representation of the int.
+char *mp_obj_int_formatted_impl(char **buf, mp_uint_t *buf_size, mp_uint_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int));
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_uint_t needed_size = mpz_as_str_size(&self->mpz, base, prefix, comma);
+ if (needed_size > *buf_size) {
+ *buf = m_new(char, needed_size);
+ *buf_size = needed_size;
+ }
+ char *str = *buf;
+
+ *fmt_size = mpz_as_str_inpl(&self->mpz, base, prefix, base_char, comma, str);
+
+ return str;
+}
+
+void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, mp_uint_t len, byte *buf) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int));
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ mpz_as_bytes(&self->mpz, big_endian, len, buf);
+}
+
+int mp_obj_int_sign(mp_obj_t self_in) {
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(self_in);
+ if (val < 0) {
+ return -1;
+ } else if (val > 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->mpz.len == 0) {
+ return 0;
+ } else if (self->mpz.neg == 0) {
+ return 1;
+ } else {
+ return -1;
+ }
+}
+
+// This must handle int and bool types, and must raise a
+// TypeError if the argument is not integral
+mp_obj_t mp_obj_int_abs(mp_obj_t self_in) {
+ if (MP_OBJ_IS_TYPE(self_in, &mp_type_int)) {
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_int_t *self2 = mp_obj_int_new_mpz();
+ mpz_abs_inpl(&self2->mpz, &self->mpz);
+ return MP_OBJ_FROM_PTR(self2);
+ } else {
+ mp_int_t val = mp_obj_get_int(self_in);
+ if (val == MP_SMALL_INT_MIN) {
+ return mp_obj_new_int_from_ll(-val);
+ } else {
+ if (val < 0) {
+ val = -val;
+ }
+ return MP_OBJ_NEW_SMALL_INT(val);
+ }
+ }
+}
+
+mp_obj_t mp_obj_int_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ mp_obj_int_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(!mpz_is_zero(&o->mpz));
+ case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT(mpz_hash(&o->mpz));
+ case MP_UNARY_OP_POSITIVE: return o_in;
+ case MP_UNARY_OP_NEGATIVE: { mp_obj_int_t *o2 = mp_obj_int_new_mpz(); mpz_neg_inpl(&o2->mpz, &o->mpz); return MP_OBJ_FROM_PTR(o2); }
+ case MP_UNARY_OP_INVERT: { mp_obj_int_t *o2 = mp_obj_int_new_mpz(); mpz_not_inpl(&o2->mpz, &o->mpz); return MP_OBJ_FROM_PTR(o2); }
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_int_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ const mpz_t *zlhs;
+ const mpz_t *zrhs;
+ mpz_t z_int;
+ mpz_dig_t z_int_dig[MPZ_NUM_DIG_FOR_INT];
+
+ // lhs could be a small int (eg small-int + mpz)
+ if (MP_OBJ_IS_SMALL_INT(lhs_in)) {
+ mpz_init_fixed_from_int(&z_int, z_int_dig, MPZ_NUM_DIG_FOR_INT, MP_OBJ_SMALL_INT_VALUE(lhs_in));
+ zlhs = &z_int;
+ } else if (MP_OBJ_IS_TYPE(lhs_in, &mp_type_int)) {
+ zlhs = &((mp_obj_int_t*)MP_OBJ_TO_PTR(lhs_in))->mpz;
+ } else {
+ // unsupported type
+ return MP_OBJ_NULL;
+ }
+
+ // if rhs is small int, then lhs was not (otherwise mp_binary_op handles it)
+ if (MP_OBJ_IS_SMALL_INT(rhs_in)) {
+ mpz_init_fixed_from_int(&z_int, z_int_dig, MPZ_NUM_DIG_FOR_INT, MP_OBJ_SMALL_INT_VALUE(rhs_in));
+ zrhs = &z_int;
+ } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_int)) {
+ zrhs = &((mp_obj_int_t*)MP_OBJ_TO_PTR(rhs_in))->mpz;
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(rhs_in)) {
+ return mp_obj_float_binary_op(op, mpz_as_float(zlhs), rhs_in);
+#if MICROPY_PY_BUILTINS_COMPLEX
+ } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_complex)) {
+ return mp_obj_complex_binary_op(op, mpz_as_float(zlhs), 0, rhs_in);
+#endif
+#endif
+ } else {
+ // delegate to generic function to check for extra cases
+ return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in);
+ }
+
+ if (0) {
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (op == MP_BINARY_OP_TRUE_DIVIDE || op == MP_BINARY_OP_INPLACE_TRUE_DIVIDE) {
+ if (mpz_is_zero(zrhs)) {
+ goto zero_division_error;
+ }
+ mp_float_t flhs = mpz_as_float(zlhs);
+ mp_float_t frhs = mpz_as_float(zrhs);
+ return mp_obj_new_float(flhs / frhs);
+#endif
+
+ } else if (op <= MP_BINARY_OP_INPLACE_POWER) {
+ mp_obj_int_t *res = mp_obj_int_new_mpz();
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ mpz_add_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ mpz_sub_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY:
+ mpz_mul_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE: {
+ if (mpz_is_zero(zrhs)) {
+ zero_division_error:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ZeroDivisionError,
+ "division by zero"));
+ }
+ mpz_t rem; mpz_init_zero(&rem);
+ mpz_divmod_inpl(&res->mpz, &rem, zlhs, zrhs);
+ if (zlhs->neg != zrhs->neg) {
+ if (!mpz_is_zero(&rem)) {
+ mpz_t mpzone; mpz_init_from_int(&mpzone, -1);
+ mpz_add_inpl(&res->mpz, &res->mpz, &mpzone);
+ }
+ }
+ mpz_deinit(&rem);
+ break;
+ }
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO: {
+ if (mpz_is_zero(zrhs)) {
+ goto zero_division_error;
+ }
+ mpz_t quo; mpz_init_zero(&quo);
+ mpz_divmod_inpl(&quo, &res->mpz, zlhs, zrhs);
+ mpz_deinit(&quo);
+ // Check signs and do Python style modulo
+ if (zlhs->neg != zrhs->neg) {
+ mpz_add_inpl(&res->mpz, &res->mpz, zrhs);
+ }
+ break;
+ }
+
+ case MP_BINARY_OP_AND:
+ case MP_BINARY_OP_INPLACE_AND:
+ mpz_and_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_OR:
+ case MP_BINARY_OP_INPLACE_OR:
+ mpz_or_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_XOR:
+ case MP_BINARY_OP_INPLACE_XOR:
+ mpz_xor_inpl(&res->mpz, zlhs, zrhs);
+ break;
+
+ case MP_BINARY_OP_LSHIFT:
+ case MP_BINARY_OP_INPLACE_LSHIFT:
+ case MP_BINARY_OP_RSHIFT:
+ case MP_BINARY_OP_INPLACE_RSHIFT: {
+ mp_int_t irhs = mp_obj_int_get_checked(rhs_in);
+ if (irhs < 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "negative shift count"));
+ }
+ if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_INPLACE_LSHIFT) {
+ mpz_shl_inpl(&res->mpz, zlhs, irhs);
+ } else {
+ mpz_shr_inpl(&res->mpz, zlhs, irhs);
+ }
+ break;
+ }
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER:
+ mpz_pow_inpl(&res->mpz, zlhs, zrhs);
+ break;
+
+ case MP_BINARY_OP_DIVMOD: {
+ if (mpz_is_zero(zrhs)) {
+ goto zero_division_error;
+ }
+ mp_obj_int_t *quo = mp_obj_int_new_mpz();
+ mpz_divmod_inpl(&quo->mpz, &res->mpz, zlhs, zrhs);
+ // Check signs and do Python style modulo
+ if (zlhs->neg != zrhs->neg) {
+ mpz_add_inpl(&res->mpz, &res->mpz, zrhs);
+ }
+ mp_obj_t tuple[2] = {MP_OBJ_FROM_PTR(quo), MP_OBJ_FROM_PTR(res)};
+ return mp_obj_new_tuple(2, tuple);
+ }
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ return MP_OBJ_FROM_PTR(res);
+
+ } else {
+ int cmp = mpz_cmp(zlhs, zrhs);
+ switch (op) {
+ case MP_BINARY_OP_LESS:
+ return mp_obj_new_bool(cmp < 0);
+ case MP_BINARY_OP_MORE:
+ return mp_obj_new_bool(cmp > 0);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return mp_obj_new_bool(cmp <= 0);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(cmp >= 0);
+ case MP_BINARY_OP_EQUAL:
+ return mp_obj_new_bool(cmp == 0);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+ }
+}
+
+mp_obj_t mp_obj_new_int(mp_int_t value) {
+ if (MP_SMALL_INT_FITS(value)) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+mp_obj_t mp_obj_new_int_from_ll(long long val) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_ll(&o->mpz, val, true);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_ll(&o->mpz, val, false);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value) {
+ // SMALL_INT accepts only signed numbers, so make sure the input
+ // value fits completely in the small-int positive range.
+ if ((value & ~MP_SMALL_INT_POSITIVE_MASK) == 0) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ull(value);
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_obj_t mp_obj_new_int_from_float(mp_float_t val) {
+ int cl = fpclassify(val);
+ if (cl == FP_INFINITE) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_OverflowError, "can't convert inf to int"));
+ } else if (cl == FP_NAN) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, "can't convert NaN to int"));
+ } else {
+ mp_fp_as_int_class_t icl = mp_classify_fp_as_int(val);
+ if (icl == MP_FP_CLASS_FIT_SMALLINT) {
+ return MP_OBJ_NEW_SMALL_INT((mp_int_t)val);
+ } else {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_float(&o->mpz, val);
+ return MP_OBJ_FROM_PTR(o);
+ }
+ }
+}
+#endif
+
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, mp_uint_t len, bool neg, mp_uint_t base) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mp_uint_t n = mpz_set_from_str(&o->mpz, *str, len, neg, base);
+ *str += n;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) {
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ // hash returns actual int value if it fits in mp_int_t
+ return mpz_hash(&self->mpz);
+ }
+}
+
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) {
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t value;
+ if (mpz_as_int_checked(&self->mpz, &value)) {
+ return value;
+ } else {
+ // overflow
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OverflowError, "overflow converting long int to machine word"));
+ }
+ }
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float(mp_obj_t self_in) {
+ if (MP_OBJ_IS_SMALL_INT(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ return mpz_as_float(&self->mpz);
+ }
+}
+#endif
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objlist.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,528 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objlist.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+STATIC mp_obj_t mp_obj_new_list_iterator(mp_obj_t list, mp_uint_t cur);
+STATIC mp_obj_list_t *list_new(mp_uint_t n);
+STATIC mp_obj_t list_extend(mp_obj_t self_in, mp_obj_t arg_in);
+STATIC mp_obj_t list_pop(size_t n_args, const mp_obj_t *args);
+
+// TODO: Move to mpconfig.h
+#define LIST_MIN_ALLOC 4
+
+/******************************************************************************/
+/* list */
+
+STATIC void list_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_list_t *o = MP_OBJ_TO_PTR(o_in);
+ if (!(MICROPY_PY_UJSON && kind == PRINT_JSON)) {
+ kind = PRINT_REPR;
+ }
+ mp_print_str(print, "[");
+ for (mp_uint_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, ", ");
+ }
+ mp_obj_print_helper(print, o->items[i], kind);
+ }
+ mp_print_str(print, "]");
+}
+
+STATIC mp_obj_t list_extend_from_iter(mp_obj_t list, mp_obj_t iterable) {
+ mp_obj_t iter = mp_getiter(iterable);
+ mp_obj_t item;
+ while ((item = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_list_append(list, item);
+ }
+ return list;
+}
+
+STATIC mp_obj_t list_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ // return a new, empty list
+ return mp_obj_new_list(0, NULL);
+
+ case 1:
+ default: {
+ // make list from iterable
+ // TODO: optimize list/tuple
+ mp_obj_t list = mp_obj_new_list(0, NULL);
+ return list_extend_from_iter(list, args[0]);
+ }
+ }
+}
+
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+STATIC bool list_cmp_helper(mp_uint_t op, mp_obj_t self_in, mp_obj_t another_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ if (!MP_OBJ_IS_TYPE(another_in, &mp_type_list)) {
+ return false;
+ }
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_list_t *another = MP_OBJ_TO_PTR(another_in);
+
+ return mp_seq_cmp_objs(op, self->items, self->len, another->items, another->len);
+}
+
+STATIC mp_obj_t list_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(self->len != 0);
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(self->len);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t list_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ mp_obj_list_t *o = MP_OBJ_TO_PTR(lhs);
+ switch (op) {
+ case MP_BINARY_OP_ADD: {
+ if (!MP_OBJ_IS_TYPE(rhs, &mp_type_list)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ mp_obj_list_t *p = MP_OBJ_TO_PTR(rhs);
+ mp_obj_list_t *s = list_new(o->len + p->len);
+ mp_seq_cat(s->items, o->items, o->len, p->items, p->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_INPLACE_ADD: {
+ list_extend(lhs, rhs);
+ return lhs;
+ }
+ case MP_BINARY_OP_MULTIPLY: {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs, &n)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (n < 0) {
+ n = 0;
+ }
+ mp_obj_list_t *s = list_new(o->len * n);
+ mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_EQUAL:
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(list_cmp_helper(op, lhs, rhs));
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t list_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_NULL) {
+ // delete
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) {
+ assert(0);
+ }
+
+ mp_int_t len_adj = slice.start - slice.stop;
+ //printf("Len adj: %d\n", len_adj);
+ assert(len_adj <= 0);
+ mp_seq_replace_slice_no_grow(self->items, self->len, slice.start, slice.stop, self->items/*NULL*/, 0, sizeof(*self->items));
+ // Clear "freed" elements at the end of list
+ mp_seq_clear(self->items, self->len + len_adj, self->len, sizeof(*self->items));
+ self->len += len_adj;
+ return mp_const_none;
+ }
+#endif
+ mp_obj_t args[2] = {self_in, index};
+ list_pop(2, args);
+ return mp_const_none;
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) {
+ return mp_seq_extract_slice(self->len, self->items, &slice);
+ }
+ mp_obj_list_t *res = list_new(slice.stop - slice.start);
+ mp_seq_copy(res->items, self->items + slice.start, res->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(res);
+ }
+#endif
+ mp_uint_t index_val = mp_get_index(self->base.type, self->len, index, false);
+ return self->items[index_val];
+ } else {
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ assert(MP_OBJ_IS_TYPE(value, &mp_type_list));
+ mp_obj_list_t *slice = MP_OBJ_TO_PTR(value);
+ mp_bound_slice_t slice_out;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice_out)) {
+ assert(0);
+ }
+ mp_int_t len_adj = slice->len - (slice_out.stop - slice_out.start);
+ //printf("Len adj: %d\n", len_adj);
+ if (len_adj > 0) {
+ if (self->len + len_adj > self->alloc) {
+ // TODO: Might optimize memory copies here by checking if block can
+ // be grown inplace or not
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->len + len_adj);
+ self->alloc = self->len + len_adj;
+ }
+ mp_seq_replace_slice_grow_inplace(self->items, self->len,
+ slice_out.start, slice_out.stop, slice->items, slice->len, len_adj, sizeof(*self->items));
+ } else {
+ mp_seq_replace_slice_no_grow(self->items, self->len,
+ slice_out.start, slice_out.stop, slice->items, slice->len, sizeof(*self->items));
+ // Clear "freed" elements at the end of list
+ mp_seq_clear(self->items, self->len + len_adj, self->len, sizeof(*self->items));
+ // TODO: apply allocation policy re: alloc_size
+ }
+ self->len += len_adj;
+ return mp_const_none;
+ }
+#endif
+ mp_obj_list_store(self_in, index, value);
+ return mp_const_none;
+ }
+}
+
+STATIC mp_obj_t list_getiter(mp_obj_t o_in) {
+ return mp_obj_new_list_iterator(o_in, 0);
+}
+
+mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->len >= self->alloc) {
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->alloc * 2);
+ self->alloc *= 2;
+ mp_seq_clear(self->items, self->len + 1, self->alloc, sizeof(*self->items));
+ }
+ self->items[self->len++] = arg;
+ return mp_const_none; // return None, as per CPython
+}
+
+STATIC mp_obj_t list_extend(mp_obj_t self_in, mp_obj_t arg_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ if (MP_OBJ_IS_TYPE(arg_in, &mp_type_list)) {
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_list_t *arg = MP_OBJ_TO_PTR(arg_in);
+
+ if (self->len + arg->len > self->alloc) {
+ // TODO: use alloc policy for "4"
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->len + arg->len + 4);
+ self->alloc = self->len + arg->len + 4;
+ mp_seq_clear(self->items, self->len + arg->len, self->alloc, sizeof(*self->items));
+ }
+
+ memcpy(self->items + self->len, arg->items, sizeof(mp_obj_t) * arg->len);
+ self->len += arg->len;
+ } else {
+ list_extend_from_iter(self_in, arg_in);
+ }
+ return mp_const_none; // return None, as per CPython
+}
+
+STATIC mp_obj_t list_pop(size_t n_args, const mp_obj_t *args) {
+ assert(1 <= n_args && n_args <= 2);
+ assert(MP_OBJ_IS_TYPE(args[0], &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(args[0]);
+ if (self->len == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_IndexError, "pop from empty list"));
+ }
+ mp_uint_t index = mp_get_index(self->base.type, self->len, n_args == 1 ? MP_OBJ_NEW_SMALL_INT(-1) : args[1], false);
+ mp_obj_t ret = self->items[index];
+ self->len -= 1;
+ memmove(self->items + index, self->items + index + 1, (self->len - index) * sizeof(mp_obj_t));
+ // Clear stale pointer from slot which just got freed to prevent GC issues
+ self->items[self->len] = MP_OBJ_NULL;
+ if (self->alloc > LIST_MIN_ALLOC && self->alloc > 2 * self->len) {
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->alloc/2);
+ self->alloc /= 2;
+ }
+ return ret;
+}
+
+STATIC void mp_quicksort(mp_obj_t *head, mp_obj_t *tail, mp_obj_t key_fn, mp_obj_t binop_less_result) {
+ MP_STACK_CHECK();
+ while (head < tail) {
+ mp_obj_t *h = head - 1;
+ mp_obj_t *t = tail;
+ mp_obj_t v = key_fn == MP_OBJ_NULL ? tail[0] : mp_call_function_1(key_fn, tail[0]); // get pivot using key_fn
+ for (;;) {
+ do ++h; while (h < t && mp_binary_op(MP_BINARY_OP_LESS, key_fn == MP_OBJ_NULL ? h[0] : mp_call_function_1(key_fn, h[0]), v) == binop_less_result);
+ do --t; while (h < t && mp_binary_op(MP_BINARY_OP_LESS, v, key_fn == MP_OBJ_NULL ? t[0] : mp_call_function_1(key_fn, t[0])) == binop_less_result);
+ if (h >= t) break;
+ mp_obj_t x = h[0];
+ h[0] = t[0];
+ t[0] = x;
+ }
+ mp_obj_t x = h[0];
+ h[0] = tail[0];
+ tail[0] = x;
+ // do the smaller recursive call first, to keep stack within O(log(N))
+ if (t - head < tail - h - 1) {
+ mp_quicksort(head, t, key_fn, binop_less_result);
+ head = h + 1;
+ } else {
+ mp_quicksort(h + 1, tail, key_fn, binop_less_result);
+ tail = t;
+ }
+ }
+}
+
+// TODO Python defines sort to be stable but ours is not
+mp_obj_t mp_obj_list_sort(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_key, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_const_none_obj)} },
+ { MP_QSTR_reverse, MP_ARG_KW_ONLY | MP_ARG_BOOL, {.u_bool = false} },
+ };
+
+ // parse args
+ struct {
+ mp_arg_val_t key, reverse;
+ } args;
+ mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args,
+ MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t*)&args);
+
+ assert(MP_OBJ_IS_TYPE(pos_args[0], &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(pos_args[0]);
+
+ if (self->len > 1) {
+ mp_quicksort(self->items, self->items + self->len - 1,
+ args.key.u_obj == mp_const_none ? MP_OBJ_NULL : args.key.u_obj,
+ args.reverse.u_bool ? mp_const_false : mp_const_true);
+ }
+
+ return mp_const_none;
+}
+
+STATIC mp_obj_t list_clear(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ self->len = 0;
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, LIST_MIN_ALLOC);
+ self->alloc = LIST_MIN_ALLOC;
+ mp_seq_clear(self->items, 0, self->alloc, sizeof(*self->items));
+ return mp_const_none;
+}
+
+STATIC mp_obj_t list_copy(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ return mp_obj_new_list(self->len, self->items);
+}
+
+STATIC mp_obj_t list_count(mp_obj_t self_in, mp_obj_t value) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ return mp_seq_count_obj(self->items, self->len, value);
+}
+
+STATIC mp_obj_t list_index(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args && n_args <= 4);
+ assert(MP_OBJ_IS_TYPE(args[0], &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(args[0]);
+ return mp_seq_index_obj(self->items, self->len, n_args, args);
+}
+
+STATIC mp_obj_t list_insert(mp_obj_t self_in, mp_obj_t idx, mp_obj_t obj) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ // insert has its own strange index logic
+ mp_int_t index = MP_OBJ_SMALL_INT_VALUE(idx);
+ if (index < 0) {
+ index += self->len;
+ }
+ if (index < 0) {
+ index = 0;
+ }
+ if ((mp_uint_t)index > self->len) {
+ index = self->len;
+ }
+
+ mp_obj_list_append(self_in, mp_const_none);
+
+ for (mp_int_t i = self->len-1; i > index; i--) {
+ self->items[i] = self->items[i-1];
+ }
+ self->items[index] = obj;
+
+ return mp_const_none;
+}
+
+mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_t args[] = {self_in, value};
+ args[1] = list_index(2, args);
+ list_pop(2, args);
+
+ return mp_const_none;
+}
+
+STATIC mp_obj_t list_reverse(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_list));
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_int_t len = self->len;
+ for (mp_int_t i = 0; i < len/2; i++) {
+ mp_obj_t a = self->items[i];
+ self->items[i] = self->items[len-i-1];
+ self->items[len-i-1] = a;
+ }
+
+ return mp_const_none;
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_append_obj, mp_obj_list_append);
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_extend_obj, list_extend);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(list_clear_obj, list_clear);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(list_copy_obj, list_copy);
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_count_obj, list_count);
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(list_index_obj, 2, 4, list_index);
+STATIC MP_DEFINE_CONST_FUN_OBJ_3(list_insert_obj, list_insert);
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(list_pop_obj, 1, 2, list_pop);
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_remove_obj, mp_obj_list_remove);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(list_reverse_obj, list_reverse);
+STATIC MP_DEFINE_CONST_FUN_OBJ_KW(list_sort_obj, 1, mp_obj_list_sort);
+
+STATIC const mp_rom_map_elem_t list_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&list_append_obj) },
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&list_clear_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&list_copy_obj) },
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&list_count_obj) },
+ { MP_ROM_QSTR(MP_QSTR_extend), MP_ROM_PTR(&list_extend_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&list_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_insert), MP_ROM_PTR(&list_insert_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pop), MP_ROM_PTR(&list_pop_obj) },
+ { MP_ROM_QSTR(MP_QSTR_remove), MP_ROM_PTR(&list_remove_obj) },
+ { MP_ROM_QSTR(MP_QSTR_reverse), MP_ROM_PTR(&list_reverse_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sort), MP_ROM_PTR(&list_sort_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(list_locals_dict, list_locals_dict_table);
+
+const mp_obj_type_t mp_type_list = {
+ { &mp_type_type },
+ .name = MP_QSTR_list,
+ .print = list_print,
+ .make_new = list_make_new,
+ .unary_op = list_unary_op,
+ .binary_op = list_binary_op,
+ .subscr = list_subscr,
+ .getiter = list_getiter,
+ .locals_dict = (mp_obj_dict_t*)&list_locals_dict,
+};
+
+void mp_obj_list_init(mp_obj_list_t *o, mp_uint_t n) {
+ o->base.type = &mp_type_list;
+ o->alloc = n < LIST_MIN_ALLOC ? LIST_MIN_ALLOC : n;
+ o->len = n;
+ o->items = m_new(mp_obj_t, o->alloc);
+ mp_seq_clear(o->items, n, o->alloc, sizeof(*o->items));
+}
+
+STATIC mp_obj_list_t *list_new(mp_uint_t n) {
+ mp_obj_list_t *o = m_new_obj(mp_obj_list_t);
+ mp_obj_list_init(o, n);
+ return o;
+}
+
+mp_obj_t mp_obj_new_list(mp_uint_t n, mp_obj_t *items) {
+ mp_obj_list_t *o = list_new(n);
+ if (items != NULL) {
+ for (mp_uint_t i = 0; i < n; i++) {
+ o->items[i] = items[i];
+ }
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_list_get(mp_obj_t self_in, mp_uint_t *len, mp_obj_t **items) {
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ *len = self->len;
+ *items = self->items;
+}
+
+void mp_obj_list_set_len(mp_obj_t self_in, mp_uint_t len) {
+ // trust that the caller knows what it's doing
+ // TODO realloc if len got much smaller than alloc
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ self->len = len;
+}
+
+void mp_obj_list_store(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_uint_t i = mp_get_index(self->base.type, self->len, index, false);
+ self->items[i] = value;
+}
+
+/******************************************************************************/
+/* list iterator */
+
+typedef struct _mp_obj_list_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t list;
+ mp_uint_t cur;
+} mp_obj_list_it_t;
+
+STATIC mp_obj_t list_it_iternext(mp_obj_t self_in) {
+ mp_obj_list_it_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_list_t *list = MP_OBJ_TO_PTR(self->list);
+ if (self->cur < list->len) {
+ mp_obj_t o_out = list->items[self->cur];
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+mp_obj_t mp_obj_new_list_iterator(mp_obj_t list, mp_uint_t cur) {
+ mp_obj_list_it_t *o = m_new_obj(mp_obj_list_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = list_it_iternext;
+ o->list = list;
+ o->cur = cur;
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objlist.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJLIST_H__
+#define __MICROPY_INCLUDED_PY_OBJLIST_H__
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_list_t {
+ mp_obj_base_t base;
+ mp_uint_t alloc;
+ mp_uint_t len;
+ mp_obj_t *items;
+} mp_obj_list_t;
+
+#endif // __MICROPY_INCLUDED_PY_OBJLIST_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objmap.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,73 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+typedef struct _mp_obj_map_t {
+ mp_obj_base_t base;
+ mp_uint_t n_iters;
+ mp_obj_t fun;
+ mp_obj_t iters[];
+} mp_obj_map_t;
+
+STATIC mp_obj_t map_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, MP_OBJ_FUN_ARGS_MAX, false);
+ mp_obj_map_t *o = m_new_obj_var(mp_obj_map_t, mp_obj_t, n_args - 1);
+ o->base.type = type;
+ o->n_iters = n_args - 1;
+ o->fun = args[0];
+ for (mp_uint_t i = 0; i < n_args - 1; i++) {
+ o->iters[i] = mp_getiter(args[i + 1]);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t map_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_map));
+ mp_obj_map_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t *nextses = m_new(mp_obj_t, self->n_iters);
+
+ for (mp_uint_t i = 0; i < self->n_iters; i++) {
+ mp_obj_t next = mp_iternext(self->iters[i]);
+ if (next == MP_OBJ_STOP_ITERATION) {
+ m_del(mp_obj_t, nextses, self->n_iters);
+ return MP_OBJ_STOP_ITERATION;
+ }
+ nextses[i] = next;
+ }
+ return mp_call_function_n_kw(self->fun, self->n_iters, 0, nextses);
+}
+
+const mp_obj_type_t mp_type_map = {
+ { &mp_type_type },
+ .name = MP_QSTR_map,
+ .make_new = map_make_new,
+ .getiter = mp_identity,
+ .iternext = map_iternext,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objmodule.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,246 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/nlr.h"
+#include "py/objmodule.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+
+STATIC void module_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+
+#if MICROPY_PY___FILE__
+ // If we store __file__ to imported modules then try to lookup this
+ // symbol to give more information about the module.
+ mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___file__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ mp_printf(print, "<module '%q' from '%s'>", self->name, mp_obj_str_get_str(elem->value));
+ return;
+ }
+#endif
+
+ mp_printf(print, "<module '%q'>", self->name);
+}
+
+STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ dest[0] = elem->value;
+ }
+ } else {
+ // delete/store attribute
+ mp_obj_dict_t *dict = self->globals;
+ if (dict->map.is_fixed) {
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ if (dict == &mp_module_builtins_globals) {
+ if (MP_STATE_VM(mp_module_builtins_override_dict) == NULL) {
+ MP_STATE_VM(mp_module_builtins_override_dict) = MP_OBJ_TO_PTR(mp_obj_new_dict(1));
+ }
+ dict = MP_STATE_VM(mp_module_builtins_override_dict);
+ } else
+ #endif
+ {
+ // can't delete or store to fixed map
+ return;
+ }
+ }
+ if (dest[1] == MP_OBJ_NULL) {
+ // delete attribute
+ mp_obj_dict_delete(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr));
+ } else {
+ // store attribute
+ // TODO CPython allows STORE_ATTR to a module, but is this the correct implementation?
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr), dest[1]);
+ }
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+}
+
+const mp_obj_type_t mp_type_module = {
+ { &mp_type_type },
+ .name = MP_QSTR_module,
+ .print = module_print,
+ .attr = module_attr,
+};
+
+mp_obj_t mp_obj_new_module(qstr module_name) {
+ mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
+ mp_map_elem_t *el = mp_map_lookup(mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ // We could error out if module already exists, but let C extensions
+ // add new members to existing modules.
+ if (el->value != MP_OBJ_NULL) {
+ return el->value;
+ }
+
+ // create new module object
+ mp_obj_module_t *o = m_new_obj(mp_obj_module_t);
+ o->base.type = &mp_type_module;
+ o->name = module_name;
+ o->globals = MP_OBJ_TO_PTR(mp_obj_new_dict(MICROPY_MODULE_DICT_SIZE));
+
+ // store __name__ entry in the module
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(module_name));
+
+ // store the new module into the slot in the global dict holding all modules
+ el->value = MP_OBJ_FROM_PTR(o);
+
+ // return the new module
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_module));
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->globals;
+}
+
+/******************************************************************************/
+// Global module table and related functions
+
+STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___main__), MP_ROM_PTR(&mp_module___main__) },
+ { MP_ROM_QSTR(MP_QSTR_builtins), MP_ROM_PTR(&mp_module_builtins) },
+ { MP_ROM_QSTR(MP_QSTR_micropython), MP_ROM_PTR(&mp_module_micropython) },
+
+#if MICROPY_PY_ARRAY
+ { MP_ROM_QSTR(MP_QSTR_array), MP_ROM_PTR(&mp_module_array) },
+#endif
+#if MICROPY_PY_IO
+ { MP_ROM_QSTR(MP_QSTR__io), MP_ROM_PTR(&mp_module_io) },
+#endif
+#if MICROPY_PY_COLLECTIONS
+ { MP_ROM_QSTR(MP_QSTR__collections), MP_ROM_PTR(&mp_module_collections) },
+#endif
+#if MICROPY_PY_STRUCT
+ { MP_ROM_QSTR(MP_QSTR_ustruct), MP_ROM_PTR(&mp_module_ustruct) },
+#endif
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#if MICROPY_PY_MATH
+ { MP_ROM_QSTR(MP_QSTR_math), MP_ROM_PTR(&mp_module_math) },
+#endif
+#if MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH
+ { MP_ROM_QSTR(MP_QSTR_cmath), MP_ROM_PTR(&mp_module_cmath) },
+#endif
+#endif
+#if MICROPY_PY_SYS
+ { MP_ROM_QSTR(MP_QSTR_sys), MP_ROM_PTR(&mp_module_sys) },
+#endif
+#if MICROPY_PY_GC && MICROPY_ENABLE_GC
+ { MP_ROM_QSTR(MP_QSTR_gc), MP_ROM_PTR(&mp_module_gc) },
+#endif
+
+ // extmod modules
+
+#if MICROPY_PY_UCTYPES
+ { MP_ROM_QSTR(MP_QSTR_uctypes), MP_ROM_PTR(&mp_module_uctypes) },
+#endif
+#if MICROPY_PY_UZLIB
+ { MP_ROM_QSTR(MP_QSTR_uzlib), MP_ROM_PTR(&mp_module_uzlib) },
+#endif
+#if MICROPY_PY_UJSON
+ { MP_ROM_QSTR(MP_QSTR_ujson), MP_ROM_PTR(&mp_module_ujson) },
+#endif
+#if MICROPY_PY_URE
+ { MP_ROM_QSTR(MP_QSTR_ure), MP_ROM_PTR(&mp_module_ure) },
+#endif
+#if MICROPY_PY_UHEAPQ
+ { MP_ROM_QSTR(MP_QSTR_uheapq), MP_ROM_PTR(&mp_module_uheapq) },
+#endif
+#if MICROPY_PY_UHASHLIB
+ { MP_ROM_QSTR(MP_QSTR_uhashlib), MP_ROM_PTR(&mp_module_uhashlib) },
+#endif
+#if MICROPY_PY_UBINASCII
+ { MP_ROM_QSTR(MP_QSTR_ubinascii), MP_ROM_PTR(&mp_module_ubinascii) },
+#endif
+#if MICROPY_PY_URANDOM
+ { MP_ROM_QSTR(MP_QSTR_urandom), MP_ROM_PTR(&mp_module_urandom) },
+#endif
+#if MICROPY_PY_USSL
+ { MP_ROM_QSTR(MP_QSTR_ussl), MP_ROM_PTR(&mp_module_ussl) },
+#endif
+#if MICROPY_PY_LWIP
+ { MP_ROM_QSTR(MP_QSTR_lwip), MP_ROM_PTR(&mp_module_lwip) },
+#endif
+#if MICROPY_PY_WEBSOCKET
+ { MP_ROM_QSTR(MP_QSTR_websocket), MP_ROM_PTR(&mp_module_websocket) },
+#endif
+
+ // extra builtin modules as defined by a port
+ MICROPY_PORT_BUILTIN_MODULES
+};
+
+STATIC MP_DEFINE_CONST_MAP(mp_builtin_module_map, mp_builtin_module_table);
+
+void mp_module_init(void) {
+ mp_obj_dict_init(&MP_STATE_VM(mp_loaded_modules_dict), 3);
+}
+
+void mp_module_deinit(void) {
+ //mp_map_deinit(&MP_STATE_VM(mp_loaded_modules_map));
+}
+
+// returns MP_OBJ_NULL if not found
+mp_obj_t mp_module_get(qstr module_name) {
+ mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
+ // lookup module
+ mp_map_elem_t *el = mp_map_lookup(mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
+
+ if (el == NULL) {
+ // module not found, look for builtin module names
+ el = mp_map_lookup((mp_map_t*)&mp_builtin_module_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
+ if (el == NULL) {
+ return MP_OBJ_NULL;
+ }
+
+ if (MICROPY_MODULE_BUILTIN_INIT) {
+ // look for __init__ and call it if it exists
+ mp_obj_t dest[2];
+ mp_load_method_maybe(el->value, MP_QSTR___init__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ mp_call_method_n_kw(0, 0, dest);
+ // register module so __init__ is not called again
+ mp_module_register(module_name, el->value);
+ }
+ }
+ }
+
+ // module found, return it
+ return el->value;
+}
+
+void mp_module_register(qstr qst, mp_obj_t module) {
+ mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
+ mp_map_lookup(mp_loaded_modules_map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/objmodule.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,36 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_OBJMODULE_H__ +#define __MICROPY_INCLUDED_PY_OBJMODULE_H__ + +#include "py/obj.h" + +void mp_module_init(void); +void mp_module_deinit(void); +mp_obj_t mp_module_get(qstr module_name); +void mp_module_register(qstr qstr, mp_obj_t module); + +#endif // __MICROPY_INCLUDED_PY_OBJMODULE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objnamedtuple.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,176 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/objtuple.h"
+#include "py/runtime.h"
+#include "py/objstr.h"
+
+#if MICROPY_PY_COLLECTIONS
+
+typedef struct _mp_obj_namedtuple_type_t {
+ mp_obj_type_t base;
+ mp_uint_t n_fields;
+ qstr fields[];
+} mp_obj_namedtuple_type_t;
+
+typedef struct _mp_obj_namedtuple_t {
+ mp_obj_tuple_t tuple;
+} mp_obj_namedtuple_t;
+
+STATIC mp_uint_t namedtuple_find_field(const mp_obj_namedtuple_type_t *type, qstr name) {
+ for (mp_uint_t i = 0; i < type->n_fields; i++) {
+ if (type->fields[i] == name) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+STATIC void namedtuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_namedtuple_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "%q", o->tuple.base.type->name);
+ const qstr *fields = ((mp_obj_namedtuple_type_t*)o->tuple.base.type)->fields;
+ mp_obj_attrtuple_print_helper(print, fields, &o->tuple);
+}
+
+STATIC void namedtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ mp_obj_namedtuple_t *self = MP_OBJ_TO_PTR(self_in);
+ int id = namedtuple_find_field((mp_obj_namedtuple_type_t*)self->tuple.base.type, attr);
+ if (id == -1) {
+ return;
+ }
+ dest[0] = self->tuple.items[id];
+ } else {
+ // delete/store attribute
+ // provide more detailed error message than we'd get by just returning
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_AttributeError, "can't set attribute"));
+ }
+}
+
+STATIC mp_obj_t namedtuple_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ const mp_obj_namedtuple_type_t *type = (const mp_obj_namedtuple_type_t*)type_in;
+ size_t num_fields = type->n_fields;
+ if (n_args + n_kw != num_fields) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function takes %d positional arguments but %d were given",
+ num_fields, n_args + n_kw));
+ } else if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "%q() takes %d positional arguments but %d were given",
+ type->base.name, num_fields, n_args + n_kw));
+ }
+ }
+
+ mp_obj_t *arg_objects;
+ if (n_args == num_fields) {
+ arg_objects = (mp_obj_t*)args;
+ } else {
+ size_t alloc_size = sizeof(mp_obj_t) * num_fields;
+ arg_objects = alloca(alloc_size);
+ memset(arg_objects, 0, alloc_size);
+
+ for (mp_uint_t i = 0; i < n_args; i++) {
+ arg_objects[i] = args[i];
+ }
+
+ for (mp_uint_t i = n_args; i < n_args + 2 * n_kw; i += 2) {
+ qstr kw = mp_obj_str_get_qstr(args[i]);
+ int id = namedtuple_find_field(type, kw);
+ if (id == -1) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "unexpected keyword argument '%q'", kw));
+ }
+ }
+ if (arg_objects[id] != MP_OBJ_NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_arg_error_terse_mismatch();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "function got multiple values for argument '%q'", kw));
+ }
+ }
+ arg_objects[id] = args[i + 1];
+ }
+ }
+
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(num_fields, arg_objects));
+ tuple->base.type = type_in;
+ return MP_OBJ_FROM_PTR(tuple);
+}
+
+STATIC const mp_rom_obj_tuple_t namedtuple_base_tuple = {{&mp_type_tuple}, 1, {MP_ROM_PTR(&mp_type_tuple)}};
+
+STATIC mp_obj_t mp_obj_new_namedtuple_type(qstr name, mp_uint_t n_fields, mp_obj_t *fields) {
+ mp_obj_namedtuple_type_t *o = m_new_obj_var(mp_obj_namedtuple_type_t, qstr, n_fields);
+ memset(&o->base, 0, sizeof(o->base));
+ o->base.base.type = &mp_type_type;
+ o->base.name = name;
+ o->base.print = namedtuple_print;
+ o->base.make_new = namedtuple_make_new;
+ o->base.unary_op = mp_obj_tuple_unary_op;
+ o->base.binary_op = mp_obj_tuple_binary_op;
+ o->base.attr = namedtuple_attr;
+ o->base.subscr = mp_obj_tuple_subscr;
+ o->base.getiter = mp_obj_tuple_getiter;
+ o->base.bases_tuple = (mp_obj_tuple_t*)(mp_rom_obj_tuple_t*)&namedtuple_base_tuple;
+ o->n_fields = n_fields;
+ for (mp_uint_t i = 0; i < n_fields; i++) {
+ o->fields[i] = mp_obj_str_get_qstr(fields[i]);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t new_namedtuple_type(mp_obj_t name_in, mp_obj_t fields_in) {
+ qstr name = mp_obj_str_get_qstr(name_in);
+ mp_uint_t n_fields;
+ mp_obj_t *fields;
+ #if MICROPY_CPYTHON_COMPAT
+ if (MP_OBJ_IS_STR(fields_in)) {
+ fields_in = mp_obj_str_split(1, &fields_in);
+ }
+ #endif
+ if (!MP_OBJ_IS_TYPE(fields_in, &mp_type_list)) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "list required"));
+ }
+ mp_obj_list_get(fields_in, &n_fields, &fields);
+ return mp_obj_new_namedtuple_type(name, n_fields, fields);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_namedtuple_obj, new_namedtuple_type);
+
+#endif // MICROPY_PY_COLLECTIONS
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objnone.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,62 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime0.h"
+
+typedef struct _mp_obj_none_t {
+ mp_obj_base_t base;
+} mp_obj_none_t;
+
+STATIC void none_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)self_in;
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ mp_print_str(print, "null");
+ } else {
+ mp_print_str(print, "None");
+ }
+}
+
+STATIC mp_obj_t none_unary_op(mp_uint_t op, mp_obj_t o_in) {
+ (void)o_in;
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_const_false;
+ case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT((mp_uint_t)o_in);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+const mp_obj_type_t mp_type_NoneType = {
+ { &mp_type_type },
+ .name = MP_QSTR_NoneType,
+ .print = none_print,
+ .unary_op = none_unary_op,
+};
+
+const mp_obj_none_t mp_const_none_obj = {{&mp_type_NoneType}};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objobject.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,82 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/objtype.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_object_t {
+ mp_obj_base_t base;
+} mp_obj_object_t;
+
+STATIC mp_obj_t object_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)args;
+ mp_arg_check_num(n_args, n_kw, 0, 0, false);
+ mp_obj_object_t *o = m_new_obj(mp_obj_object_t);
+ o->base.type = type;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t object___init__(mp_obj_t self) {
+ (void)self;
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(object___init___obj, object___init__);
+
+STATIC mp_obj_t object___new__(mp_obj_t cls) {
+ if (!MP_OBJ_IS_TYPE(cls, &mp_type_type) || !mp_obj_is_instance_type((mp_obj_type_t*)MP_OBJ_TO_PTR(cls))) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "__new__ arg must be a user-type"));
+ }
+ mp_obj_t o = MP_OBJ_SENTINEL;
+ mp_obj_t res = mp_obj_instance_make_new(MP_OBJ_TO_PTR(cls), 1, 0, &o);
+ return res;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(object___new___fun_obj, object___new__);
+STATIC MP_DEFINE_CONST_STATICMETHOD_OBJ(object___new___obj, MP_ROM_PTR(&object___new___fun_obj));
+
+STATIC const mp_rom_map_elem_t object_locals_dict_table[] = {
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR___init__), MP_ROM_PTR(&object___init___obj) },
+ #endif
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR___new__), MP_ROM_PTR(&object___new___obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(object_locals_dict, object_locals_dict_table);
+#endif
+
+const mp_obj_type_t mp_type_object = {
+ { &mp_type_type },
+ .name = MP_QSTR_object,
+ .make_new = object_make_new,
+ #if MICROPY_CPYTHON_COMPAT
+ .locals_dict = (mp_obj_dict_t*)&object_locals_dict,
+ #endif
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objpolyiter.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,54 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/nlr.h"
+#include "py/runtime.h"
+
+// This is universal iterator type which calls "iternext" method stored in
+// particular object instance. (So, each instance of this time can have its
+// own iteration behavior.) Having this type saves to define type objects
+// for various internal iterator objects.
+
+// Any instance should have these 2 fields at the beginning
+typedef struct _mp_obj_polymorph_iter_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+} mp_obj_polymorph_iter_t;
+
+STATIC mp_obj_t polymorph_it_iternext(mp_obj_t self_in) {
+ mp_obj_polymorph_iter_t *self = MP_OBJ_TO_PTR(self_in);
+ // Redirect call to object instance's iternext method
+ return self->iternext(self_in);
+}
+
+const mp_obj_type_t mp_type_polymorph_iter = {
+ { &mp_type_type },
+ .name = MP_QSTR_iterator,
+ .getiter = mp_identity,
+ .iternext = polymorph_it_iternext,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objproperty.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,108 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_PROPERTY
+
+typedef struct _mp_obj_property_t {
+ mp_obj_base_t base;
+ mp_obj_t proxy[3]; // getter, setter, deleter
+} mp_obj_property_t;
+
+STATIC mp_obj_t property_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ enum { ARG_fget, ARG_fset, ARG_fdel, ARG_doc };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_const_none_obj)} },
+ { MP_QSTR_, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_const_none_obj)} },
+ { MP_QSTR_, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_const_none_obj)} },
+ { MP_QSTR_doc, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_const_none_obj)} },
+ };
+ mp_arg_val_t vals[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all_kw_array(n_args, n_kw, args, MP_ARRAY_SIZE(allowed_args), allowed_args, vals);
+
+ mp_obj_property_t *o = m_new_obj(mp_obj_property_t);
+ o->base.type = type;
+ o->proxy[0] = vals[ARG_fget].u_obj;
+ o->proxy[1] = vals[ARG_fset].u_obj;
+ o->proxy[2] = vals[ARG_fdel].u_obj;
+ // vals[ARG_doc] is silently discarded
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t property_getter(mp_obj_t self_in, mp_obj_t getter) {
+ mp_obj_property_t *p2 = m_new_obj(mp_obj_property_t);
+ *p2 = *(mp_obj_property_t*)MP_OBJ_TO_PTR(self_in);
+ p2->proxy[0] = getter;
+ return MP_OBJ_FROM_PTR(p2);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(property_getter_obj, property_getter);
+
+STATIC mp_obj_t property_setter(mp_obj_t self_in, mp_obj_t setter) {
+ mp_obj_property_t *p2 = m_new_obj(mp_obj_property_t);
+ *p2 = *(mp_obj_property_t*)MP_OBJ_TO_PTR(self_in);
+ p2->proxy[1] = setter;
+ return MP_OBJ_FROM_PTR(p2);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(property_setter_obj, property_setter);
+
+STATIC mp_obj_t property_deleter(mp_obj_t self_in, mp_obj_t deleter) {
+ mp_obj_property_t *p2 = m_new_obj(mp_obj_property_t);
+ *p2 = *(mp_obj_property_t*)MP_OBJ_TO_PTR(self_in);
+ p2->proxy[2] = deleter;
+ return MP_OBJ_FROM_PTR(p2);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(property_deleter_obj, property_deleter);
+
+STATIC const mp_rom_map_elem_t property_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_getter), MP_ROM_PTR(&property_getter_obj) },
+ { MP_ROM_QSTR(MP_QSTR_setter), MP_ROM_PTR(&property_setter_obj) },
+ { MP_ROM_QSTR(MP_QSTR_deleter), MP_ROM_PTR(&property_deleter_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(property_locals_dict, property_locals_dict_table);
+
+const mp_obj_type_t mp_type_property = {
+ { &mp_type_type },
+ .name = MP_QSTR_property,
+ .make_new = property_make_new,
+ .locals_dict = (mp_obj_dict_t*)&property_locals_dict,
+};
+
+const mp_obj_t *mp_obj_property_get(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_property));
+ mp_obj_property_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->proxy;
+}
+
+#endif // MICROPY_PY_BUILTINS_PROPERTY
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objrange.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,198 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/nlr.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+/******************************************************************************/
+/* range iterator */
+
+typedef struct _mp_obj_range_it_t {
+ mp_obj_base_t base;
+ // TODO make these values generic objects or something
+ mp_int_t cur;
+ mp_int_t stop;
+ mp_int_t step;
+} mp_obj_range_it_t;
+
+STATIC mp_obj_t range_it_iternext(mp_obj_t o_in) {
+ mp_obj_range_it_t *o = MP_OBJ_TO_PTR(o_in);
+ if ((o->step > 0 && o->cur < o->stop) || (o->step < 0 && o->cur > o->stop)) {
+ mp_obj_t o_out = MP_OBJ_NEW_SMALL_INT(o->cur);
+ o->cur += o->step;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC const mp_obj_type_t range_it_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_iterator,
+ .getiter = mp_identity,
+ .iternext = range_it_iternext,
+};
+
+STATIC mp_obj_t mp_obj_new_range_iterator(mp_int_t cur, mp_int_t stop, mp_int_t step) {
+ mp_obj_range_it_t *o = m_new_obj(mp_obj_range_it_t);
+ o->base.type = &range_it_type;
+ o->cur = cur;
+ o->stop = stop;
+ o->step = step;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* range */
+
+typedef struct _mp_obj_range_t {
+ mp_obj_base_t base;
+ // TODO make these values generic objects or something
+ mp_int_t start;
+ mp_int_t stop;
+ mp_int_t step;
+} mp_obj_range_t;
+
+STATIC void range_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "range(" INT_FMT ", " INT_FMT "", self->start, self->stop);
+ if (self->step == 1) {
+ mp_print_str(print, ")");
+ } else {
+ mp_printf(print, ", " INT_FMT ")", self->step);
+ }
+}
+
+STATIC mp_obj_t range_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 1, 3, false);
+
+ mp_obj_range_t *o = m_new_obj(mp_obj_range_t);
+ o->base.type = type;
+ o->start = 0;
+ o->step = 1;
+
+ if (n_args == 1) {
+ o->stop = mp_obj_get_int(args[0]);
+ } else {
+ o->start = mp_obj_get_int(args[0]);
+ o->stop = mp_obj_get_int(args[1]);
+ if (n_args == 3) {
+ // TODO check step is non-zero
+ o->step = mp_obj_get_int(args[2]);
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_int_t range_len(mp_obj_range_t *self) {
+ // When computing length, need to take into account step!=1 and step<0.
+ mp_int_t len = self->stop - self->start + self->step;
+ if (self->step > 0) {
+ len -= 1;
+ } else {
+ len += 1;
+ }
+ len = len / self->step;
+ if (len < 0) {
+ len = 0;
+ }
+ return len;
+}
+
+STATIC mp_obj_t range_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t len = range_len(self);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(len > 0);
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(len);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t range_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t len = range_len(self);
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ mp_seq_get_fast_slice_indexes(len, index, &slice);
+ mp_obj_range_t *o = m_new_obj(mp_obj_range_t);
+ o->base.type = &mp_type_range;
+ o->start = self->start + slice.start * self->step;
+ o->stop = self->start + slice.stop * self->step;
+ o->step = slice.step * self->step;
+ return MP_OBJ_FROM_PTR(o);
+ }
+#endif
+ uint index_val = mp_get_index(self->base.type, len, index, false);
+ return MP_OBJ_NEW_SMALL_INT(self->start + index_val * self->step);
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t range_getiter(mp_obj_t o_in) {
+ mp_obj_range_t *o = MP_OBJ_TO_PTR(o_in);
+ return mp_obj_new_range_iterator(o->start, o->stop, o->step);
+}
+
+
+#if MICROPY_PY_BUILTINS_RANGE_ATTRS
+STATIC void range_attr(mp_obj_t o_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_range_t *o = MP_OBJ_TO_PTR(o_in);
+ if (attr == MP_QSTR_start) {
+ dest[0] = mp_obj_new_int(o->start);
+ } else if (attr == MP_QSTR_stop) {
+ dest[0] = mp_obj_new_int(o->stop);
+ } else if (attr == MP_QSTR_step) {
+ dest[0] = mp_obj_new_int(o->step);
+ }
+}
+#endif
+
+const mp_obj_type_t mp_type_range = {
+ { &mp_type_type },
+ .name = MP_QSTR_range,
+ .print = range_print,
+ .make_new = range_make_new,
+ .unary_op = range_unary_op,
+ .subscr = range_subscr,
+ .getiter = range_getiter,
+#if MICROPY_PY_BUILTINS_RANGE_ATTRS
+ .attr = range_attr,
+#endif
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objreversed.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,81 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_REVERSED
+
+typedef struct _mp_obj_reversed_t {
+ mp_obj_base_t base;
+ mp_obj_t seq; // sequence object that we are reversing
+ mp_uint_t cur_index; // current index, plus 1; 0=no more, 1=last one (index 0)
+} mp_obj_reversed_t;
+
+STATIC mp_obj_t reversed_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ // check if __reversed__ exists, and if so delegate to it
+ mp_obj_t dest[2];
+ mp_load_method_maybe(args[0], MP_QSTR___reversed__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ return mp_call_method_n_kw(0, 0, dest);
+ }
+
+ mp_obj_reversed_t *o = m_new_obj(mp_obj_reversed_t);
+ o->base.type = type;
+ o->seq = args[0];
+ o->cur_index = mp_obj_get_int(mp_obj_len(args[0])); // start at the end of the sequence
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t reversed_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_reversed));
+ mp_obj_reversed_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // "raise" stop iteration if we are at the end (the start) of the sequence
+ if (self->cur_index == 0) {
+ return MP_OBJ_STOP_ITERATION;
+ }
+
+ // pre-decrement and index sequence
+ self->cur_index -= 1;
+ return mp_obj_subscr(self->seq, MP_OBJ_NEW_SMALL_INT(self->cur_index), MP_OBJ_SENTINEL);
+}
+
+const mp_obj_type_t mp_type_reversed = {
+ { &mp_type_type },
+ .name = MP_QSTR_reversed,
+ .make_new = reversed_make_new,
+ .getiter = mp_identity,
+ .iternext = reversed_iternext,
+};
+
+#endif // MICROPY_PY_BUILTINS_REVERSED
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objset.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,595 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/runtime.h"
+#include "py/runtime0.h"
+#include "py/builtin.h"
+
+#if MICROPY_PY_BUILTINS_SET
+
+typedef struct _mp_obj_set_t {
+ mp_obj_base_t base;
+ mp_set_t set;
+} mp_obj_set_t;
+
+typedef struct _mp_obj_set_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_set_t *set;
+ mp_uint_t cur;
+} mp_obj_set_it_t;
+
+STATIC mp_obj_t set_it_iternext(mp_obj_t self_in);
+
+STATIC bool is_set_or_frozenset(mp_obj_t o) {
+ return MP_OBJ_IS_TYPE(o, &mp_type_set)
+#if MICROPY_PY_BUILTINS_FROZENSET
+ || MP_OBJ_IS_TYPE(o, &mp_type_frozenset)
+#endif
+ ;
+}
+
+#if MICROPY_PY_BUILTINS_FROZENSET
+STATIC void check_set_or_frozenset(mp_obj_t o) {
+ if (!is_set_or_frozenset(o)) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "'set' object required"));
+ }
+}
+#else
+#define check_set_or_frozenset(o) check_set(o)
+#endif
+
+STATIC void check_set(mp_obj_t o) {
+ if (!MP_OBJ_IS_TYPE(o, &mp_type_set)) {
+ // Emulate CPython behavior
+ // AttributeError: 'frozenset' object has no attribute 'add'
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (MP_OBJ_IS_TYPE(o, &mp_type_frozenset)) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_AttributeError, "'frozenset' has no such attribute"));
+ }
+ #endif
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "'set' object required"));
+ }
+}
+
+STATIC void set_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ bool is_frozen = MP_OBJ_IS_TYPE(self_in, &mp_type_frozenset);
+ #endif
+ if (self->set.used == 0) {
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (is_frozen) {
+ mp_print_str(print, "frozen");
+ }
+ #endif
+ mp_print_str(print, "set()");
+ return;
+ }
+ bool first = true;
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (is_frozen) {
+ mp_print_str(print, "frozenset(");
+ }
+ #endif
+ mp_print_str(print, "{");
+ for (mp_uint_t i = 0; i < self->set.alloc; i++) {
+ if (MP_SET_SLOT_IS_FILLED(&self->set, i)) {
+ if (!first) {
+ mp_print_str(print, ", ");
+ }
+ first = false;
+ mp_obj_print_helper(print, self->set.table[i], PRINT_REPR);
+ }
+ }
+ mp_print_str(print, "}");
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (is_frozen) {
+ mp_print_str(print, ")");
+ }
+ #endif
+}
+
+STATIC mp_obj_t set_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0: {
+ // create a new, empty set
+ mp_obj_set_t *set = MP_OBJ_TO_PTR(mp_obj_new_set(0, NULL));
+ // set actual set/frozenset type
+ set->base.type = type;
+ return MP_OBJ_FROM_PTR(set);
+ }
+
+ case 1:
+ default: { // can only be 0 or 1 arg
+ // 1 argument, an iterable from which we make a new set
+ mp_obj_t set = mp_obj_new_set(0, NULL);
+ mp_obj_t iterable = mp_getiter(args[0]);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_set_store(set, item);
+ }
+ // Set actual set/frozenset type
+ ((mp_obj_set_t*)MP_OBJ_TO_PTR(set))->base.type = type;
+ return set;
+ }
+ }
+}
+
+STATIC mp_obj_t set_it_iternext(mp_obj_t self_in) {
+ mp_obj_set_it_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_uint_t max = self->set->set.alloc;
+ mp_set_t *set = &self->set->set;
+
+ for (mp_uint_t i = self->cur; i < max; i++) {
+ if (MP_SET_SLOT_IS_FILLED(set, i)) {
+ self->cur = i + 1;
+ return set->table[i];
+ }
+ }
+
+ return MP_OBJ_STOP_ITERATION;
+}
+
+STATIC mp_obj_t set_getiter(mp_obj_t set_in) {
+ mp_obj_set_it_t *o = m_new_obj(mp_obj_set_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = set_it_iternext;
+ o->set = (mp_obj_set_t *)MP_OBJ_TO_PTR(set_in);
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+
+/******************************************************************************/
+/* set methods */
+
+STATIC mp_obj_t set_add(mp_obj_t self_in, mp_obj_t item) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_add_obj, set_add);
+
+STATIC mp_obj_t set_clear(mp_obj_t self_in) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_set_clear(&self->set);
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_clear_obj, set_clear);
+
+STATIC mp_obj_t set_copy_as_mutable(mp_obj_t self_in) {
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_obj_set_t *other = m_new_obj(mp_obj_set_t);
+ other->base.type = &mp_type_set;
+ mp_set_init(&other->set, self->set.alloc);
+ other->set.used = self->set.used;
+ memcpy(other->set.table, self->set.table, self->set.alloc * sizeof(mp_obj_t));
+
+ return MP_OBJ_FROM_PTR(other);
+}
+
+STATIC mp_obj_t set_copy(mp_obj_t self_in) {
+ check_set_or_frozenset(self_in);
+
+ mp_obj_t other = set_copy_as_mutable(self_in);
+ ((mp_obj_base_t*)MP_OBJ_TO_PTR(other))->type = ((mp_obj_base_t*)MP_OBJ_TO_PTR(self_in))->type;
+
+ return other;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_copy_obj, set_copy);
+
+STATIC mp_obj_t set_discard(mp_obj_t self_in, mp_obj_t item) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_discard_obj, set_discard);
+
+STATIC mp_obj_t set_diff_int(size_t n_args, const mp_obj_t *args, bool update) {
+ assert(n_args > 0);
+
+ mp_obj_t self;
+ if (update) {
+ check_set(args[0]);
+ self = args[0];
+ } else {
+ check_set_or_frozenset(args[0]);
+ self = set_copy_as_mutable(args[0]);
+ }
+
+
+ for (mp_uint_t i = 1; i < n_args; i++) {
+ mp_obj_t other = args[i];
+ if (self == other) {
+ set_clear(self);
+ } else {
+ mp_obj_t iter = mp_getiter(other);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ set_discard(self, next);
+ }
+ }
+ }
+
+ ((mp_obj_base_t*)MP_OBJ_TO_PTR(self))->type = ((mp_obj_base_t*)MP_OBJ_TO_PTR(args[0]))->type;
+ return self;
+}
+
+STATIC mp_obj_t set_diff(size_t n_args, const mp_obj_t *args) {
+ return set_diff_int(n_args, args, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR(set_diff_obj, 1, set_diff);
+
+STATIC mp_obj_t set_diff_update(size_t n_args, const mp_obj_t *args) {
+ set_diff_int(n_args, args, true);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR(set_diff_update_obj, 1, set_diff_update);
+
+STATIC mp_obj_t set_intersect_int(mp_obj_t self_in, mp_obj_t other, bool update) {
+ if (update) {
+ check_set(self_in);
+ } else {
+ check_set_or_frozenset(self_in);
+ }
+
+ if (self_in == other) {
+ return update ? mp_const_none : set_copy(self_in);
+ }
+
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_set_t *out = MP_OBJ_TO_PTR(mp_obj_new_set(0, NULL));
+
+ mp_obj_t iter = mp_getiter(other);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_set_lookup(&self->set, next, MP_MAP_LOOKUP)) {
+ set_add(MP_OBJ_FROM_PTR(out), next);
+ }
+ }
+
+ if (update) {
+ m_del(mp_obj_t, self->set.table, self->set.alloc);
+ self->set.alloc = out->set.alloc;
+ self->set.used = out->set.used;
+ self->set.table = out->set.table;
+ }
+
+ return update ? mp_const_none : MP_OBJ_FROM_PTR(out);
+}
+
+STATIC mp_obj_t set_intersect(mp_obj_t self_in, mp_obj_t other) {
+ return set_intersect_int(self_in, other, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_intersect_obj, set_intersect);
+
+STATIC mp_obj_t set_intersect_update(mp_obj_t self_in, mp_obj_t other) {
+ return set_intersect_int(self_in, other, true);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_intersect_update_obj, set_intersect_update);
+
+STATIC mp_obj_t set_isdisjoint(mp_obj_t self_in, mp_obj_t other) {
+ check_set_or_frozenset(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_obj_t iter = mp_getiter(other);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_set_lookup(&self->set, next, MP_MAP_LOOKUP)) {
+ return mp_const_false;
+ }
+ }
+ return mp_const_true;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_isdisjoint_obj, set_isdisjoint);
+
+STATIC mp_obj_t set_issubset_internal(mp_obj_t self_in, mp_obj_t other_in, bool proper) {
+ mp_obj_set_t *self;
+ bool cleanup_self = false;
+ if (is_set_or_frozenset(self_in)) {
+ self = MP_OBJ_TO_PTR(self_in);
+ } else {
+ self = MP_OBJ_TO_PTR(set_make_new(&mp_type_set, 1, 0, &self_in));
+ cleanup_self = true;
+ }
+
+ mp_obj_set_t *other;
+ bool cleanup_other = false;
+ if (is_set_or_frozenset(other_in)) {
+ other = MP_OBJ_TO_PTR(other_in);
+ } else {
+ other = MP_OBJ_TO_PTR(set_make_new(&mp_type_set, 1, 0, &other_in));
+ cleanup_other = true;
+ }
+ bool out = true;
+ if (proper && self->set.used == other->set.used) {
+ out = false;
+ } else {
+ mp_obj_t iter = set_getiter(MP_OBJ_FROM_PTR(self));
+ mp_obj_t next;
+ while ((next = set_it_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (!mp_set_lookup(&other->set, next, MP_MAP_LOOKUP)) {
+ out = false;
+ break;
+ }
+ }
+ }
+ // TODO: Should free objects altogether
+ if (cleanup_self) {
+ set_clear(MP_OBJ_FROM_PTR(self));
+ }
+ if (cleanup_other) {
+ set_clear(MP_OBJ_FROM_PTR(other));
+ }
+ return mp_obj_new_bool(out);
+}
+STATIC mp_obj_t set_issubset(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(self_in, other_in, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_issubset_obj, set_issubset);
+
+STATIC mp_obj_t set_issubset_proper(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(self_in, other_in, true);
+}
+
+STATIC mp_obj_t set_issuperset(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(other_in, self_in, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_issuperset_obj, set_issuperset);
+
+STATIC mp_obj_t set_issuperset_proper(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(other_in, self_in, true);
+}
+
+STATIC mp_obj_t set_equal(mp_obj_t self_in, mp_obj_t other_in) {
+ check_set_or_frozenset(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ if (!is_set_or_frozenset(other_in)) {
+ return mp_const_false;
+ }
+ mp_obj_set_t *other = MP_OBJ_TO_PTR(other_in);
+ if (self->set.used != other->set.used) {
+ return mp_const_false;
+ }
+ return set_issubset(self_in, other_in);
+}
+
+STATIC mp_obj_t set_pop(mp_obj_t self_in) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t obj = mp_set_remove_first(&self->set);
+ if (obj == MP_OBJ_NULL) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_KeyError, "pop from an empty set"));
+ }
+ return obj;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_pop_obj, set_pop);
+
+STATIC mp_obj_t set_remove(mp_obj_t self_in, mp_obj_t item) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ if (mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_REMOVE_IF_FOUND) == MP_OBJ_NULL) {
+ nlr_raise(mp_obj_new_exception(&mp_type_KeyError));
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_remove_obj, set_remove);
+
+STATIC mp_obj_t set_symmetric_difference_update(mp_obj_t self_in, mp_obj_t other_in) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t iter = mp_getiter(other_in);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_set_lookup(&self->set, next, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND);
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_symmetric_difference_update_obj, set_symmetric_difference_update);
+
+STATIC mp_obj_t set_symmetric_difference(mp_obj_t self_in, mp_obj_t other_in) {
+ check_set_or_frozenset(self_in);
+ mp_obj_t self_out = set_copy_as_mutable(self_in);
+ set_symmetric_difference_update(self_out, other_in);
+ ((mp_obj_base_t*)MP_OBJ_TO_PTR(self_out))->type = ((mp_obj_base_t*)MP_OBJ_TO_PTR(self_in))->type;
+ return self_out;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_symmetric_difference_obj, set_symmetric_difference);
+
+STATIC void set_update_int(mp_obj_set_t *self, mp_obj_t other_in) {
+ mp_obj_t iter = mp_getiter(other_in);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_set_lookup(&self->set, next, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ }
+}
+
+STATIC mp_obj_t set_update(size_t n_args, const mp_obj_t *args) {
+ assert(n_args > 0);
+
+ for (mp_uint_t i = 1; i < n_args; i++) {
+ set_update_int(MP_OBJ_TO_PTR(args[0]), args[i]);
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR(set_update_obj, 1, set_update);
+
+STATIC mp_obj_t set_union(mp_obj_t self_in, mp_obj_t other_in) {
+ check_set_or_frozenset(self_in);
+ mp_obj_t self = set_copy(self_in);
+ set_update_int(MP_OBJ_TO_PTR(self), other_in);
+ return self;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_union_obj, set_union);
+
+STATIC mp_obj_t set_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(self->set.used != 0);
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(self->set.used);
+#if MICROPY_PY_BUILTINS_FROZENSET
+ case MP_UNARY_OP_HASH:
+ if (MP_OBJ_IS_TYPE(self_in, &mp_type_frozenset)) {
+ // start hash with unique value
+ mp_int_t hash = (mp_int_t)(uintptr_t)&mp_type_frozenset;
+ mp_uint_t max = self->set.alloc;
+ mp_set_t *set = &self->set;
+
+ for (mp_uint_t i = 0; i < max; i++) {
+ if (MP_SET_SLOT_IS_FILLED(set, i)) {
+ hash += MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, set->table[i]));
+ }
+ }
+ return MP_OBJ_NEW_SMALL_INT(hash);
+ }
+#endif
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t set_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ mp_obj_t args[] = {lhs, rhs};
+ switch (op) {
+ case MP_BINARY_OP_OR:
+ return set_union(lhs, rhs);
+ case MP_BINARY_OP_XOR:
+ return set_symmetric_difference(lhs, rhs);
+ case MP_BINARY_OP_AND:
+ return set_intersect(lhs, rhs);
+ case MP_BINARY_OP_SUBTRACT:
+ return set_diff(2, args);
+ case MP_BINARY_OP_INPLACE_OR:
+ return set_union(lhs, rhs);
+ case MP_BINARY_OP_INPLACE_XOR:
+ return set_symmetric_difference(lhs, rhs);
+ case MP_BINARY_OP_INPLACE_AND:
+ return set_intersect(lhs, rhs);
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ return set_diff(2, args);
+ case MP_BINARY_OP_LESS:
+ return set_issubset_proper(lhs, rhs);
+ case MP_BINARY_OP_MORE:
+ return set_issuperset_proper(lhs, rhs);
+ case MP_BINARY_OP_EQUAL:
+ return set_equal(lhs, rhs);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return set_issubset(lhs, rhs);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return set_issuperset(lhs, rhs);
+ case MP_BINARY_OP_IN: {
+ mp_obj_set_t *o = MP_OBJ_TO_PTR(lhs);
+ mp_obj_t elem = mp_set_lookup(&o->set, rhs, MP_MAP_LOOKUP);
+ return mp_obj_new_bool(elem != MP_OBJ_NULL);
+ }
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+/******************************************************************************/
+/* set constructors & public C API */
+
+
+STATIC const mp_rom_map_elem_t set_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_add), MP_ROM_PTR(&set_add_obj) },
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&set_clear_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&set_copy_obj) },
+ { MP_ROM_QSTR(MP_QSTR_discard), MP_ROM_PTR(&set_discard_obj) },
+ { MP_ROM_QSTR(MP_QSTR_difference), MP_ROM_PTR(&set_diff_obj) },
+ { MP_ROM_QSTR(MP_QSTR_difference_update), MP_ROM_PTR(&set_diff_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_intersection), MP_ROM_PTR(&set_intersect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_intersection_update), MP_ROM_PTR(&set_intersect_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdisjoint), MP_ROM_PTR(&set_isdisjoint_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issubset), MP_ROM_PTR(&set_issubset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issuperset), MP_ROM_PTR(&set_issuperset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pop), MP_ROM_PTR(&set_pop_obj) },
+ { MP_ROM_QSTR(MP_QSTR_remove), MP_ROM_PTR(&set_remove_obj) },
+ { MP_ROM_QSTR(MP_QSTR_symmetric_difference), MP_ROM_PTR(&set_symmetric_difference_obj) },
+ { MP_ROM_QSTR(MP_QSTR_symmetric_difference_update), MP_ROM_PTR(&set_symmetric_difference_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_union), MP_ROM_PTR(&set_union_obj) },
+ { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&set_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR___contains__), MP_ROM_PTR(&mp_op_contains_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(set_locals_dict, set_locals_dict_table);
+
+const mp_obj_type_t mp_type_set = {
+ { &mp_type_type },
+ .name = MP_QSTR_set,
+ .print = set_print,
+ .make_new = set_make_new,
+ .unary_op = set_unary_op,
+ .binary_op = set_binary_op,
+ .getiter = set_getiter,
+ .locals_dict = (mp_obj_dict_t*)&set_locals_dict,
+};
+
+#if MICROPY_PY_BUILTINS_FROZENSET
+const mp_obj_type_t mp_type_frozenset = {
+ { &mp_type_type },
+ .name = MP_QSTR_frozenset,
+ .print = set_print,
+ .make_new = set_make_new,
+ .unary_op = set_unary_op,
+ .binary_op = set_binary_op,
+ .getiter = set_getiter,
+ .locals_dict = (mp_obj_dict_t*)&set_locals_dict,
+};
+#endif
+
+mp_obj_t mp_obj_new_set(mp_uint_t n_args, mp_obj_t *items) {
+ mp_obj_set_t *o = m_new_obj(mp_obj_set_t);
+ o->base.type = &mp_type_set;
+ mp_set_init(&o->set, n_args);
+ for (mp_uint_t i = 0; i < n_args; i++) {
+ mp_set_lookup(&o->set, items[i], MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_set));
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+}
+
+#endif // MICROPY_PY_BUILTINS_SET
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objsingleton.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime0.h"
+
+/******************************************************************************/
+/* singleton objects defined by Python */
+
+typedef struct _mp_obj_singleton_t {
+ mp_obj_base_t base;
+ qstr name;
+} mp_obj_singleton_t;
+
+STATIC void singleton_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_singleton_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "%q", self->name);
+}
+
+const mp_obj_type_t mp_type_singleton = {
+ { &mp_type_type },
+ .name = MP_QSTR_,
+ .print = singleton_print,
+};
+
+const mp_obj_singleton_t mp_const_ellipsis_obj = {{&mp_type_singleton}, MP_QSTR_Ellipsis};
+#if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+const mp_obj_singleton_t mp_const_notimplemented_obj = {{&mp_type_singleton}, MP_QSTR_NotImplemented};
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objslice.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,103 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime0.h"
+
+/******************************************************************************/
+/* slice object */
+
+#if MICROPY_PY_BUILTINS_SLICE
+
+// TODO: This implements only variant of slice with 2 integer args only.
+// CPython supports 3rd arg (step), plus args can be arbitrary Python objects.
+typedef struct _mp_obj_slice_t {
+ mp_obj_base_t base;
+ mp_obj_t start;
+ mp_obj_t stop;
+ mp_obj_t step;
+} mp_obj_slice_t;
+
+STATIC void slice_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_slice_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_print_str(print, "slice(");
+ mp_obj_print_helper(print, o->start, PRINT_REPR);
+ mp_print_str(print, ", ");
+ mp_obj_print_helper(print, o->stop, PRINT_REPR);
+ mp_print_str(print, ", ");
+ mp_obj_print_helper(print, o->step, PRINT_REPR);
+ mp_print_str(print, ")");
+}
+
+#if MICROPY_PY_BUILTINS_SLICE_ATTRS
+STATIC void slice_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_slice_t *self = MP_OBJ_TO_PTR(self_in);
+ if (attr == MP_QSTR_start) {
+ dest[0] = self->start;
+ } else if (attr == MP_QSTR_stop) {
+ dest[0] = self->stop;
+ } else if (attr == MP_QSTR_step) {
+ dest[0] = self->step;
+ }
+}
+#endif
+
+const mp_obj_type_t mp_type_slice = {
+ { &mp_type_type },
+ .name = MP_QSTR_slice,
+ .print = slice_print,
+#if MICROPY_PY_BUILTINS_SLICE_ATTRS
+ .attr = slice_attr,
+#endif
+};
+
+mp_obj_t mp_obj_new_slice(mp_obj_t ostart, mp_obj_t ostop, mp_obj_t ostep) {
+ mp_obj_slice_t *o = m_new_obj(mp_obj_slice_t);
+ o->base.type = &mp_type_slice;
+ o->start = ostart;
+ o->stop = ostop;
+ o->step = ostep;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_slice_get(mp_obj_t self_in, mp_obj_t *start, mp_obj_t *stop, mp_obj_t *step) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_slice));
+ mp_obj_slice_t *self = MP_OBJ_TO_PTR(self_in);
+ *start = self->start;
+ *stop = self->stop;
+ *step = self->step;
+}
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objstr.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,2126 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/unicode.h"
+#include "py/objstr.h"
+#include "py/objlist.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, mp_uint_t n_args, const mp_obj_t *args, mp_obj_t dict);
+
+STATIC mp_obj_t mp_obj_new_bytes_iterator(mp_obj_t str);
+STATIC NORETURN void bad_implicit_conversion(mp_obj_t self_in);
+
+/******************************************************************************/
+/* str */
+
+void mp_str_print_quoted(const mp_print_t *print, const byte *str_data, mp_uint_t str_len, bool is_bytes) {
+ // this escapes characters, but it will be very slow to print (calling print many times)
+ bool has_single_quote = false;
+ bool has_double_quote = false;
+ for (const byte *s = str_data, *top = str_data + str_len; !has_double_quote && s < top; s++) {
+ if (*s == '\'') {
+ has_single_quote = true;
+ } else if (*s == '"') {
+ has_double_quote = true;
+ }
+ }
+ int quote_char = '\'';
+ if (has_single_quote && !has_double_quote) {
+ quote_char = '"';
+ }
+ mp_printf(print, "%c", quote_char);
+ for (const byte *s = str_data, *top = str_data + str_len; s < top; s++) {
+ if (*s == quote_char) {
+ mp_printf(print, "\\%c", quote_char);
+ } else if (*s == '\\') {
+ mp_print_str(print, "\\\\");
+ } else if (*s >= 0x20 && *s != 0x7f && (!is_bytes || *s < 0x80)) {
+ // In strings, anything which is not ascii control character
+ // is printed as is, this includes characters in range 0x80-0xff
+ // (which can be non-Latin letters, etc.)
+ mp_printf(print, "%c", *s);
+ } else if (*s == '\n') {
+ mp_print_str(print, "\\n");
+ } else if (*s == '\r') {
+ mp_print_str(print, "\\r");
+ } else if (*s == '\t') {
+ mp_print_str(print, "\\t");
+ } else {
+ mp_printf(print, "\\x%02x", *s);
+ }
+ }
+ mp_printf(print, "%c", quote_char);
+}
+
+#if MICROPY_PY_UJSON
+void mp_str_print_json(const mp_print_t *print, const byte *str_data, size_t str_len) {
+ // for JSON spec, see http://www.ietf.org/rfc/rfc4627.txt
+ // if we are given a valid utf8-encoded string, we will print it in a JSON-conforming way
+ mp_print_str(print, "\"");
+ for (const byte *s = str_data, *top = str_data + str_len; s < top; s++) {
+ if (*s == '"' || *s == '\\') {
+ mp_printf(print, "\\%c", *s);
+ } else if (*s >= 32) {
+ // this will handle normal and utf-8 encoded chars
+ mp_printf(print, "%c", *s);
+ } else if (*s == '\n') {
+ mp_print_str(print, "\\n");
+ } else if (*s == '\r') {
+ mp_print_str(print, "\\r");
+ } else if (*s == '\t') {
+ mp_print_str(print, "\\t");
+ } else {
+ // this will handle control chars
+ mp_printf(print, "\\u%04x", *s);
+ }
+ }
+ mp_print_str(print, "\"");
+}
+#endif
+
+STATIC void str_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ #if MICROPY_PY_UJSON
+ if (kind == PRINT_JSON) {
+ mp_str_print_json(print, str_data, str_len);
+ return;
+ }
+ #endif
+ #if !MICROPY_PY_BUILTINS_STR_UNICODE
+ bool is_bytes = MP_OBJ_IS_TYPE(self_in, &mp_type_bytes);
+ #else
+ bool is_bytes = true;
+ #endif
+ if (kind == PRINT_RAW || (!MICROPY_PY_BUILTINS_STR_UNICODE && kind == PRINT_STR && !is_bytes)) {
+ mp_printf(print, "%.*s", str_len, str_data);
+ } else {
+ if (is_bytes) {
+ mp_print_str(print, "b");
+ }
+ mp_str_print_quoted(print, str_data, str_len, is_bytes);
+ }
+}
+
+mp_obj_t mp_obj_str_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+#if MICROPY_CPYTHON_COMPAT
+ if (n_kw != 0) {
+ mp_arg_error_unimpl_kw();
+ }
+#endif
+
+ mp_arg_check_num(n_args, n_kw, 0, 3, false);
+
+ switch (n_args) {
+ case 0:
+ return MP_OBJ_NEW_QSTR(MP_QSTR_);
+
+ case 1: {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+ mp_obj_print_helper(&print, args[0], PRINT_STR);
+ return mp_obj_new_str_from_vstr(type, &vstr);
+ }
+
+ default: // 2 or 3 args
+ // TODO: validate 2nd/3rd args
+ if (MP_OBJ_IS_TYPE(args[0], &mp_type_bytes)) {
+ GET_STR_DATA_LEN(args[0], str_data, str_len);
+ GET_STR_HASH(args[0], str_hash);
+ mp_obj_str_t *o = MP_OBJ_TO_PTR(mp_obj_new_str_of_type(type, NULL, str_len));
+ o->data = str_data;
+ o->hash = str_hash;
+ return MP_OBJ_FROM_PTR(o);
+ } else {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+ return mp_obj_new_str(bufinfo.buf, bufinfo.len, false);
+ }
+ }
+}
+
+STATIC mp_obj_t bytes_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ #if MICROPY_CPYTHON_COMPAT
+ if (n_kw != 0) {
+ mp_arg_error_unimpl_kw();
+ }
+ #else
+ (void)n_kw;
+ #endif
+
+ if (n_args == 0) {
+ return mp_const_empty_bytes;
+ }
+
+ if (MP_OBJ_IS_STR(args[0])) {
+ if (n_args < 2 || n_args > 3) {
+ goto wrong_args;
+ }
+ GET_STR_DATA_LEN(args[0], str_data, str_len);
+ GET_STR_HASH(args[0], str_hash);
+ mp_obj_str_t *o = MP_OBJ_TO_PTR(mp_obj_new_str_of_type(&mp_type_bytes, NULL, str_len));
+ o->data = str_data;
+ o->hash = str_hash;
+ return MP_OBJ_FROM_PTR(o);
+ }
+
+ if (n_args > 1) {
+ goto wrong_args;
+ }
+
+ if (MP_OBJ_IS_SMALL_INT(args[0])) {
+ uint len = MP_OBJ_SMALL_INT_VALUE(args[0]);
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ memset(vstr.buf, 0, len);
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+ }
+
+ // check if argument has the buffer protocol
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_READ)) {
+ return mp_obj_new_str_of_type(&mp_type_bytes, bufinfo.buf, bufinfo.len);
+ }
+
+ vstr_t vstr;
+ // Try to create array of exact len if initializer len is known
+ mp_obj_t len_in = mp_obj_len_maybe(args[0]);
+ if (len_in == MP_OBJ_NULL) {
+ vstr_init(&vstr, 16);
+ } else {
+ mp_int_t len = MP_OBJ_SMALL_INT_VALUE(len_in);
+ vstr_init(&vstr, len);
+ }
+
+ mp_obj_t iterable = mp_getiter(args[0]);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_int_t val = mp_obj_get_int(item);
+ #if MICROPY_CPYTHON_COMPAT
+ if (val < 0 || val > 255) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "bytes value out of range"));
+ }
+ #endif
+ vstr_add_byte(&vstr, val);
+ }
+
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+
+wrong_args:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "wrong number of arguments"));
+}
+
+// like strstr but with specified length and allows \0 bytes
+// TODO replace with something more efficient/standard
+const byte *find_subbytes(const byte *haystack, mp_uint_t hlen, const byte *needle, mp_uint_t nlen, mp_int_t direction) {
+ if (hlen >= nlen) {
+ mp_uint_t str_index, str_index_end;
+ if (direction > 0) {
+ str_index = 0;
+ str_index_end = hlen - nlen;
+ } else {
+ str_index = hlen - nlen;
+ str_index_end = 0;
+ }
+ for (;;) {
+ if (memcmp(&haystack[str_index], needle, nlen) == 0) {
+ //found
+ return haystack + str_index;
+ }
+ if (str_index == str_index_end) {
+ //not found
+ break;
+ }
+ str_index += direction;
+ }
+ }
+ return NULL;
+}
+
+// Note: this function is used to check if an object is a str or bytes, which
+// works because both those types use it as their binary_op method. Revisit
+// MP_OBJ_IS_STR_OR_BYTES if this fact changes.
+mp_obj_t mp_obj_str_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ // check for modulo
+ if (op == MP_BINARY_OP_MODULO) {
+ mp_obj_t *args;
+ mp_uint_t n_args;
+ mp_obj_t dict = MP_OBJ_NULL;
+ if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_tuple)) {
+ // TODO: Support tuple subclasses?
+ mp_obj_tuple_get(rhs_in, &n_args, &args);
+ } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_dict)) {
+ args = NULL;
+ n_args = 0;
+ dict = rhs_in;
+ } else {
+ args = &rhs_in;
+ n_args = 1;
+ }
+ return str_modulo_format(lhs_in, n_args, args, dict);
+ }
+
+ // from now on we need lhs type and data, so extract them
+ mp_obj_type_t *lhs_type = mp_obj_get_type(lhs_in);
+ GET_STR_DATA_LEN(lhs_in, lhs_data, lhs_len);
+
+ // check for multiply
+ if (op == MP_BINARY_OP_MULTIPLY) {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs_in, &n)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (n <= 0) {
+ if (lhs_type == &mp_type_str) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_); // empty str
+ } else {
+ return mp_const_empty_bytes;
+ }
+ }
+ vstr_t vstr;
+ vstr_init_len(&vstr, lhs_len * n);
+ mp_seq_multiply(lhs_data, sizeof(*lhs_data), lhs_len, n, vstr.buf);
+ return mp_obj_new_str_from_vstr(lhs_type, &vstr);
+ }
+
+ // From now on all operations allow:
+ // - str with str
+ // - bytes with bytes
+ // - bytes with bytearray
+ // - bytes with array.array
+ // To do this efficiently we use the buffer protocol to extract the raw
+ // data for the rhs, but only if the lhs is a bytes object.
+ //
+ // NOTE: CPython does not allow comparison between bytes ard array.array
+ // (even if the array is of type 'b'), even though it allows addition of
+ // such types. We are not compatible with this (we do allow comparison
+ // of bytes with anything that has the buffer protocol). It would be
+ // easy to "fix" this with a bit of extra logic below, but it costs code
+ // size and execution time so we don't.
+
+ const byte *rhs_data;
+ mp_uint_t rhs_len;
+ if (lhs_type == mp_obj_get_type(rhs_in)) {
+ GET_STR_DATA_LEN(rhs_in, rhs_data_, rhs_len_);
+ rhs_data = rhs_data_;
+ rhs_len = rhs_len_;
+ } else if (lhs_type == &mp_type_bytes) {
+ mp_buffer_info_t bufinfo;
+ if (!mp_get_buffer(rhs_in, &bufinfo, MP_BUFFER_READ)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ rhs_data = bufinfo.buf;
+ rhs_len = bufinfo.len;
+ } else {
+ // incompatible types
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD: {
+ vstr_t vstr;
+ vstr_init_len(&vstr, lhs_len + rhs_len);
+ memcpy(vstr.buf, lhs_data, lhs_len);
+ memcpy(vstr.buf + lhs_len, rhs_data, rhs_len);
+ return mp_obj_new_str_from_vstr(lhs_type, &vstr);
+ }
+
+ case MP_BINARY_OP_IN:
+ /* NOTE `a in b` is `b.__contains__(a)` */
+ return mp_obj_new_bool(find_subbytes(lhs_data, lhs_len, rhs_data, rhs_len, 1) != NULL);
+
+ //case MP_BINARY_OP_NOT_EQUAL: // This is never passed here
+ case MP_BINARY_OP_EQUAL: // This will be passed only for bytes, str is dealt with in mp_obj_equal()
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(mp_seq_cmp_bytes(op, lhs_data, lhs_len, rhs_data, rhs_len));
+ }
+
+ return MP_OBJ_NULL; // op not supported
+}
+
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+// objstrunicode defines own version
+const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ mp_obj_t index, bool is_slice) {
+ mp_uint_t index_val = mp_get_index(type, self_len, index, is_slice);
+ return self_data + index_val;
+}
+#endif
+
+// This is used for both bytes and 8-bit strings. This is not used for unicode strings.
+STATIC mp_obj_t bytes_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_type_t *type = mp_obj_get_type(self_in);
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self_len, index, &slice)) {
+ mp_not_implemented("only slices with step=1 (aka None) are supported");
+ }
+ return mp_obj_new_str_of_type(type, self_data + slice.start, slice.stop - slice.start);
+ }
+#endif
+ mp_uint_t index_val = mp_get_index(type, self_len, index, false);
+ // If we have unicode enabled the type will always be bytes, so take the short cut.
+ if (MICROPY_PY_BUILTINS_STR_UNICODE || type == &mp_type_bytes) {
+ return MP_OBJ_NEW_SMALL_INT(self_data[index_val]);
+ } else {
+ return mp_obj_new_str((char*)&self_data[index_val], 1, true);
+ }
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t str_join(mp_obj_t self_in, mp_obj_t arg) {
+ assert(MP_OBJ_IS_STR_OR_BYTES(self_in));
+ const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+
+ // get separation string
+ GET_STR_DATA_LEN(self_in, sep_str, sep_len);
+
+ // process args
+ mp_uint_t seq_len;
+ mp_obj_t *seq_items;
+ if (MP_OBJ_IS_TYPE(arg, &mp_type_tuple)) {
+ mp_obj_tuple_get(arg, &seq_len, &seq_items);
+ } else {
+ if (!MP_OBJ_IS_TYPE(arg, &mp_type_list)) {
+ // arg is not a list, try to convert it to one
+ // TODO: Try to optimize?
+ arg = mp_type_list.make_new(&mp_type_list, 1, 0, &arg);
+ }
+ mp_obj_list_get(arg, &seq_len, &seq_items);
+ }
+
+ // count required length
+ mp_uint_t required_len = 0;
+ for (mp_uint_t i = 0; i < seq_len; i++) {
+ if (mp_obj_get_type(seq_items[i]) != self_type) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "join expects a list of str/bytes objects consistent with self object"));
+ }
+ if (i > 0) {
+ required_len += sep_len;
+ }
+ GET_STR_LEN(seq_items[i], l);
+ required_len += l;
+ }
+
+ // make joined string
+ vstr_t vstr;
+ vstr_init_len(&vstr, required_len);
+ byte *data = (byte*)vstr.buf;
+ for (mp_uint_t i = 0; i < seq_len; i++) {
+ if (i > 0) {
+ memcpy(data, sep_str, sep_len);
+ data += sep_len;
+ }
+ GET_STR_DATA_LEN(seq_items[i], s, l);
+ memcpy(data, s, l);
+ data += l;
+ }
+
+ // return joined string
+ return mp_obj_new_str_from_vstr(self_type, &vstr);
+}
+
+enum {SPLIT = 0, KEEP = 1, SPLITLINES = 2};
+
+STATIC inline mp_obj_t str_split_internal(mp_uint_t n_args, const mp_obj_t *args, int type) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ mp_int_t splits = -1;
+ mp_obj_t sep = mp_const_none;
+ if (n_args > 1) {
+ sep = args[1];
+ if (n_args > 2) {
+ splits = mp_obj_get_int(args[2]);
+ }
+ }
+
+ mp_obj_t res = mp_obj_new_list(0, NULL);
+ GET_STR_DATA_LEN(args[0], s, len);
+ const byte *top = s + len;
+
+ if (sep == mp_const_none) {
+ // sep not given, so separate on whitespace
+
+ // Initial whitespace is not counted as split, so we pre-do it
+ while (s < top && unichar_isspace(*s)) s++;
+ while (s < top && splits != 0) {
+ const byte *start = s;
+ while (s < top && !unichar_isspace(*s)) s++;
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, start, s - start));
+ if (s >= top) {
+ break;
+ }
+ while (s < top && unichar_isspace(*s)) s++;
+ if (splits > 0) {
+ splits--;
+ }
+ }
+
+ if (s < top) {
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, s, top - s));
+ }
+
+ } else {
+ // sep given
+ if (mp_obj_get_type(sep) != self_type) {
+ bad_implicit_conversion(sep);
+ }
+
+ mp_uint_t sep_len;
+ const char *sep_str = mp_obj_str_get_data(sep, &sep_len);
+
+ if (sep_len == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "empty separator"));
+ }
+
+ for (;;) {
+ const byte *start = s;
+ for (;;) {
+ if (splits == 0 || s + sep_len > top) {
+ s = top;
+ break;
+ } else if (memcmp(s, sep_str, sep_len) == 0) {
+ break;
+ }
+ s++;
+ }
+ mp_uint_t sub_len = s - start;
+ if (MP_LIKELY(!(sub_len == 0 && s == top && (type && SPLITLINES)))) {
+ if (start + sub_len != top && (type & KEEP)) {
+ sub_len++;
+ }
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, start, sub_len));
+ }
+ if (s >= top) {
+ break;
+ }
+ s += sep_len;
+ if (splits > 0) {
+ splits--;
+ }
+ }
+ }
+
+ return res;
+}
+
+mp_obj_t mp_obj_str_split(size_t n_args, const mp_obj_t *args) {
+ return str_split_internal(n_args, args, SPLIT);
+}
+
+#if MICROPY_PY_BUILTINS_STR_SPLITLINES
+STATIC mp_obj_t str_splitlines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_keepends, MP_ARG_BOOL, {.u_bool = false} },
+ };
+
+ // parse args
+ struct {
+ mp_arg_val_t keepends;
+ } args;
+ mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args,
+ MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t*)&args);
+
+ mp_obj_t new_args[2] = {pos_args[0], MP_OBJ_NEW_QSTR(MP_QSTR__backslash_n)};
+ return str_split_internal(2, new_args, SPLITLINES | (args.keepends.u_bool ? KEEP : 0));
+}
+#endif
+
+STATIC mp_obj_t str_rsplit(size_t n_args, const mp_obj_t *args) {
+ if (n_args < 3) {
+ // If we don't have split limit, it doesn't matter from which side
+ // we split.
+ return mp_obj_str_split(n_args, args);
+ }
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ mp_obj_t sep = args[1];
+ GET_STR_DATA_LEN(args[0], s, len);
+
+ mp_int_t splits = mp_obj_get_int(args[2]);
+ mp_int_t org_splits = splits;
+ // Preallocate list to the max expected # of elements, as we
+ // will fill it from the end.
+ mp_obj_list_t *res = MP_OBJ_TO_PTR(mp_obj_new_list(splits + 1, NULL));
+ mp_int_t idx = splits;
+
+ if (sep == mp_const_none) {
+ mp_not_implemented("rsplit(None,n)");
+ } else {
+ mp_uint_t sep_len;
+ const char *sep_str = mp_obj_str_get_data(sep, &sep_len);
+
+ if (sep_len == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "empty separator"));
+ }
+
+ const byte *beg = s;
+ const byte *last = s + len;
+ for (;;) {
+ s = last - sep_len;
+ for (;;) {
+ if (splits == 0 || s < beg) {
+ break;
+ } else if (memcmp(s, sep_str, sep_len) == 0) {
+ break;
+ }
+ s--;
+ }
+ if (s < beg || splits == 0) {
+ res->items[idx] = mp_obj_new_str_of_type(self_type, beg, last - beg);
+ break;
+ }
+ res->items[idx--] = mp_obj_new_str_of_type(self_type, s + sep_len, last - s - sep_len);
+ last = s;
+ if (splits > 0) {
+ splits--;
+ }
+ }
+ if (idx != 0) {
+ // We split less parts than split limit, now go cleanup surplus
+ mp_int_t used = org_splits + 1 - idx;
+ memmove(res->items, &res->items[idx], used * sizeof(mp_obj_t));
+ mp_seq_clear(res->items, used, res->alloc, sizeof(*res->items));
+ res->len = used;
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(res);
+}
+
+STATIC mp_obj_t str_finder(mp_uint_t n_args, const mp_obj_t *args, mp_int_t direction, bool is_index) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ assert(2 <= n_args && n_args <= 4);
+ assert(MP_OBJ_IS_STR_OR_BYTES(args[0]));
+
+ // check argument type
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+
+ GET_STR_DATA_LEN(args[0], haystack, haystack_len);
+ GET_STR_DATA_LEN(args[1], needle, needle_len);
+
+ const byte *start = haystack;
+ const byte *end = haystack + haystack_len;
+ if (n_args >= 3 && args[2] != mp_const_none) {
+ start = str_index_to_ptr(self_type, haystack, haystack_len, args[2], true);
+ }
+ if (n_args >= 4 && args[3] != mp_const_none) {
+ end = str_index_to_ptr(self_type, haystack, haystack_len, args[3], true);
+ }
+
+ const byte *p = find_subbytes(start, end - start, needle, needle_len, direction);
+ if (p == NULL) {
+ // not found
+ if (is_index) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "substring not found"));
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(-1);
+ }
+ } else {
+ // found
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ if (self_type == &mp_type_str) {
+ return MP_OBJ_NEW_SMALL_INT(utf8_ptr_to_index(haystack, p));
+ }
+ #endif
+ return MP_OBJ_NEW_SMALL_INT(p - haystack);
+ }
+}
+
+STATIC mp_obj_t str_find(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, 1, false);
+}
+
+STATIC mp_obj_t str_rfind(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, -1, false);
+}
+
+STATIC mp_obj_t str_index(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, 1, true);
+}
+
+STATIC mp_obj_t str_rindex(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, -1, true);
+}
+
+// TODO: (Much) more variety in args
+STATIC mp_obj_t str_startswith(size_t n_args, const mp_obj_t *args) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ GET_STR_DATA_LEN(args[0], str, str_len);
+ GET_STR_DATA_LEN(args[1], prefix, prefix_len);
+ const byte *start = str;
+ if (n_args > 2) {
+ start = str_index_to_ptr(self_type, str, str_len, args[2], true);
+ }
+ if (prefix_len + (start - str) > str_len) {
+ return mp_const_false;
+ }
+ return mp_obj_new_bool(memcmp(start, prefix, prefix_len) == 0);
+}
+
+STATIC mp_obj_t str_endswith(size_t n_args, const mp_obj_t *args) {
+ GET_STR_DATA_LEN(args[0], str, str_len);
+ GET_STR_DATA_LEN(args[1], suffix, suffix_len);
+ if (n_args > 2) {
+ mp_not_implemented("start/end indices");
+ }
+
+ if (suffix_len > str_len) {
+ return mp_const_false;
+ }
+ return mp_obj_new_bool(memcmp(str + (str_len - suffix_len), suffix, suffix_len) == 0);
+}
+
+enum { LSTRIP, RSTRIP, STRIP };
+
+STATIC mp_obj_t str_uni_strip(int type, mp_uint_t n_args, const mp_obj_t *args) {
+ assert(1 <= n_args && n_args <= 2);
+ assert(MP_OBJ_IS_STR_OR_BYTES(args[0]));
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+
+ const byte *chars_to_del;
+ uint chars_to_del_len;
+ static const byte whitespace[] = " \t\n\r\v\f";
+
+ if (n_args == 1) {
+ chars_to_del = whitespace;
+ chars_to_del_len = sizeof(whitespace);
+ } else {
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+ GET_STR_DATA_LEN(args[1], s, l);
+ chars_to_del = s;
+ chars_to_del_len = l;
+ }
+
+ GET_STR_DATA_LEN(args[0], orig_str, orig_str_len);
+
+ mp_uint_t first_good_char_pos = 0;
+ bool first_good_char_pos_set = false;
+ mp_uint_t last_good_char_pos = 0;
+ mp_uint_t i = 0;
+ mp_int_t delta = 1;
+ if (type == RSTRIP) {
+ i = orig_str_len - 1;
+ delta = -1;
+ }
+ for (mp_uint_t len = orig_str_len; len > 0; len--) {
+ if (find_subbytes(chars_to_del, chars_to_del_len, &orig_str[i], 1, 1) == NULL) {
+ if (!first_good_char_pos_set) {
+ first_good_char_pos_set = true;
+ first_good_char_pos = i;
+ if (type == LSTRIP) {
+ last_good_char_pos = orig_str_len - 1;
+ break;
+ } else if (type == RSTRIP) {
+ first_good_char_pos = 0;
+ last_good_char_pos = i;
+ break;
+ }
+ }
+ last_good_char_pos = i;
+ }
+ i += delta;
+ }
+
+ if (!first_good_char_pos_set) {
+ // string is all whitespace, return ''
+ if (self_type == &mp_type_str) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_);
+ } else {
+ return mp_const_empty_bytes;
+ }
+ }
+
+ assert(last_good_char_pos >= first_good_char_pos);
+ //+1 to accomodate the last character
+ mp_uint_t stripped_len = last_good_char_pos - first_good_char_pos + 1;
+ if (stripped_len == orig_str_len) {
+ // If nothing was stripped, don't bother to dup original string
+ // TODO: watch out for this case when we'll get to bytearray.strip()
+ assert(first_good_char_pos == 0);
+ return args[0];
+ }
+ return mp_obj_new_str_of_type(self_type, orig_str + first_good_char_pos, stripped_len);
+}
+
+STATIC mp_obj_t str_strip(size_t n_args, const mp_obj_t *args) {
+ return str_uni_strip(STRIP, n_args, args);
+}
+
+STATIC mp_obj_t str_lstrip(size_t n_args, const mp_obj_t *args) {
+ return str_uni_strip(LSTRIP, n_args, args);
+}
+
+STATIC mp_obj_t str_rstrip(size_t n_args, const mp_obj_t *args) {
+ return str_uni_strip(RSTRIP, n_args, args);
+}
+
+// Takes an int arg, but only parses unsigned numbers, and only changes
+// *num if at least one digit was parsed.
+STATIC const char *str_to_int(const char *str, const char *top, int *num) {
+ if (str < top && '0' <= *str && *str <= '9') {
+ *num = 0;
+ do {
+ *num = *num * 10 + (*str - '0');
+ str++;
+ }
+ while (str < top && '0' <= *str && *str <= '9');
+ }
+ return str;
+}
+
+STATIC bool isalignment(char ch) {
+ return ch && strchr("<>=^", ch) != NULL;
+}
+
+STATIC bool istype(char ch) {
+ return ch && strchr("bcdeEfFgGnosxX%", ch) != NULL;
+}
+
+STATIC bool arg_looks_integer(mp_obj_t arg) {
+ return MP_OBJ_IS_TYPE(arg, &mp_type_bool) || MP_OBJ_IS_INT(arg);
+}
+
+STATIC bool arg_looks_numeric(mp_obj_t arg) {
+ return arg_looks_integer(arg)
+#if MICROPY_PY_BUILTINS_FLOAT
+ || mp_obj_is_float(arg)
+#endif
+ ;
+}
+
+STATIC mp_obj_t arg_as_int(mp_obj_t arg) {
+#if MICROPY_PY_BUILTINS_FLOAT
+ if (mp_obj_is_float(arg)) {
+ return mp_obj_new_int_from_float(mp_obj_float_get(arg));
+ }
+#endif
+ return arg;
+}
+
+STATIC NORETURN void terse_str_format_value_error(void) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "bad format string"));
+}
+
+STATIC vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *arg_i, mp_uint_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+
+ for (; str < top; str++) {
+ if (*str == '}') {
+ str++;
+ if (str < top && *str == '}') {
+ vstr_add_byte(&vstr, '}');
+ continue;
+ }
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "single '}' encountered in format string"));
+ }
+ }
+ if (*str != '{') {
+ vstr_add_byte(&vstr, *str);
+ continue;
+ }
+
+ str++;
+ if (str < top && *str == '{') {
+ vstr_add_byte(&vstr, '{');
+ continue;
+ }
+
+ // replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"
+
+ const char *field_name = NULL;
+ const char *field_name_top = NULL;
+ char conversion = '\0';
+ const char *format_spec = NULL;
+
+ if (str < top && *str != '}' && *str != '!' && *str != ':') {
+ field_name = (const char *)str;
+ while (str < top && *str != '}' && *str != '!' && *str != ':') {
+ ++str;
+ }
+ field_name_top = (const char *)str;
+ }
+
+ // conversion ::= "r" | "s"
+
+ if (str < top && *str == '!') {
+ str++;
+ if (str < top && (*str == 'r' || *str == 's')) {
+ conversion = *str++;
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "bad conversion specifier"));
+ } else {
+ if (str >= top) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "end of format while looking for conversion specifier"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "unknown conversion specifier %c", *str));
+ }
+ }
+ }
+ }
+
+ if (str < top && *str == ':') {
+ str++;
+ // {:} is the same as {}, which is the same as {!s}
+ // This makes a difference when passing in a True or False
+ // '{}'.format(True) returns 'True'
+ // '{:d}'.format(True) returns '1'
+ // So we treat {:} as {} and this later gets treated to be {!s}
+ if (*str != '}') {
+ format_spec = str;
+ for (int nest = 1; str < top;) {
+ if (*str == '{') {
+ ++nest;
+ } else if (*str == '}') {
+ if (--nest == 0) {
+ break;
+ }
+ }
+ ++str;
+ }
+ }
+ }
+ if (str >= top) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "unmatched '{' in format"));
+ }
+ }
+ if (*str != '}') {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "expected ':' after format specifier"));
+ }
+ }
+
+ mp_obj_t arg = mp_const_none;
+
+ if (field_name) {
+ int index = 0;
+ if (MP_LIKELY(unichar_isdigit(*field_name))) {
+ if (*arg_i > 0) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "can't switch from automatic field numbering to manual field specification"));
+ }
+ }
+ field_name = str_to_int(field_name, field_name_top, &index);
+ if ((uint)index >= n_args - 1) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_IndexError, "tuple index out of range"));
+ }
+ arg = args[index + 1];
+ *arg_i = -1;
+ } else {
+ const char *lookup;
+ for (lookup = field_name; lookup < field_name_top && *lookup != '.' && *lookup != '['; lookup++);
+ mp_obj_t field_q = mp_obj_new_str(field_name, lookup - field_name, true/*?*/);
+ field_name = lookup;
+ mp_map_elem_t *key_elem = mp_map_lookup(kwargs, field_q, MP_MAP_LOOKUP);
+ if (key_elem == NULL) {
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_KeyError, field_q));
+ }
+ arg = key_elem->value;
+ }
+ if (field_name < field_name_top) {
+ mp_not_implemented("attributes not supported yet");
+ }
+ } else {
+ if (*arg_i < 0) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "can't switch from manual field specification to automatic field numbering"));
+ }
+ }
+ if ((uint)*arg_i >= n_args - 1) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_IndexError, "tuple index out of range"));
+ }
+ arg = args[(*arg_i) + 1];
+ (*arg_i)++;
+ }
+ if (!format_spec && !conversion) {
+ conversion = 's';
+ }
+ if (conversion) {
+ mp_print_kind_t print_kind;
+ if (conversion == 's') {
+ print_kind = PRINT_STR;
+ } else {
+ assert(conversion == 'r');
+ print_kind = PRINT_REPR;
+ }
+ vstr_t arg_vstr;
+ mp_print_t arg_print;
+ vstr_init_print(&arg_vstr, 16, &arg_print);
+ mp_obj_print_helper(&arg_print, arg, print_kind);
+ arg = mp_obj_new_str_from_vstr(&mp_type_str, &arg_vstr);
+ }
+
+ char sign = '\0';
+ char fill = '\0';
+ char align = '\0';
+ int width = -1;
+ int precision = -1;
+ char type = '\0';
+ int flags = 0;
+
+ if (format_spec) {
+ // The format specifier (from http://docs.python.org/2/library/string.html#formatspec)
+ //
+ // [[fill]align][sign][#][0][width][,][.precision][type]
+ // fill ::= <any character>
+ // align ::= "<" | ">" | "=" | "^"
+ // sign ::= "+" | "-" | " "
+ // width ::= integer
+ // precision ::= integer
+ // type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
+
+ // recursively call the formatter to format any nested specifiers
+ MP_STACK_CHECK();
+ vstr_t format_spec_vstr = mp_obj_str_format_helper(format_spec, str, arg_i, n_args, args, kwargs);
+ const char *s = vstr_null_terminated_str(&format_spec_vstr);
+ const char *stop = s + format_spec_vstr.len;
+ if (isalignment(*s)) {
+ align = *s++;
+ } else if (*s && isalignment(s[1])) {
+ fill = *s++;
+ align = *s++;
+ }
+ if (*s == '+' || *s == '-' || *s == ' ') {
+ if (*s == '+') {
+ flags |= PF_FLAG_SHOW_SIGN;
+ } else if (*s == ' ') {
+ flags |= PF_FLAG_SPACE_SIGN;
+ }
+ sign = *s++;
+ }
+ if (*s == '#') {
+ flags |= PF_FLAG_SHOW_PREFIX;
+ s++;
+ }
+ if (*s == '0') {
+ if (!align) {
+ align = '=';
+ }
+ if (!fill) {
+ fill = '0';
+ }
+ }
+ s = str_to_int(s, stop, &width);
+ if (*s == ',') {
+ flags |= PF_FLAG_SHOW_COMMA;
+ s++;
+ }
+ if (*s == '.') {
+ s++;
+ s = str_to_int(s, stop, &precision);
+ }
+ if (istype(*s)) {
+ type = *s++;
+ }
+ if (*s) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "invalid format specifier"));
+ }
+ }
+ vstr_clear(&format_spec_vstr);
+ }
+ if (!align) {
+ if (arg_looks_numeric(arg)) {
+ align = '>';
+ } else {
+ align = '<';
+ }
+ }
+ if (!fill) {
+ fill = ' ';
+ }
+
+ if (sign) {
+ if (type == 's') {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "sign not allowed in string format specifier"));
+ }
+ }
+ if (type == 'c') {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "sign not allowed with integer format specifier 'c'"));
+ }
+ }
+ } else {
+ sign = '-';
+ }
+
+ switch (align) {
+ case '<': flags |= PF_FLAG_LEFT_ADJUST; break;
+ case '=': flags |= PF_FLAG_PAD_AFTER_SIGN; break;
+ case '^': flags |= PF_FLAG_CENTER_ADJUST; break;
+ }
+
+ if (arg_looks_integer(arg)) {
+ switch (type) {
+ case 'b':
+ mp_print_mp_int(&print, arg, 2, 'a', flags, fill, width, 0);
+ continue;
+
+ case 'c':
+ {
+ char ch = mp_obj_get_int(arg);
+ mp_print_strn(&print, &ch, 1, flags, fill, width);
+ continue;
+ }
+
+ case '\0': // No explicit format type implies 'd'
+ case 'n': // I don't think we support locales in uPy so use 'd'
+ case 'd':
+ mp_print_mp_int(&print, arg, 10, 'a', flags, fill, width, 0);
+ continue;
+
+ case 'o':
+ if (flags & PF_FLAG_SHOW_PREFIX) {
+ flags |= PF_FLAG_SHOW_OCTAL_LETTER;
+ }
+
+ mp_print_mp_int(&print, arg, 8, 'a', flags, fill, width, 0);
+ continue;
+
+ case 'X':
+ case 'x':
+ mp_print_mp_int(&print, arg, 16, type - ('X' - 'A'), flags, fill, width, 0);
+ continue;
+
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ case '%':
+ // The floating point formatters all work with anything that
+ // looks like an integer
+ break;
+
+ default:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "unknown format code '%c' for object of type '%s'",
+ type, mp_obj_get_type_str(arg)));
+ }
+ }
+ }
+
+ // NOTE: no else here. We need the e, f, g etc formats for integer
+ // arguments (from above if) to take this if.
+ if (arg_looks_numeric(arg)) {
+ if (!type) {
+
+ // Even though the docs say that an unspecified type is the same
+ // as 'g', there is one subtle difference, when the exponent
+ // is one less than the precision.
+ //
+ // '{:10.1}'.format(0.0) ==> '0e+00'
+ // '{:10.1g}'.format(0.0) ==> '0'
+ //
+ // TODO: Figure out how to deal with this.
+ //
+ // A proper solution would involve adding a special flag
+ // or something to format_float, and create a format_double
+ // to deal with doubles. In order to fix this when using
+ // sprintf, we'd need to use the e format and tweak the
+ // returned result to strip trailing zeros like the g format
+ // does.
+ //
+ // {:10.3} and {:10.2e} with 1.23e2 both produce 1.23e+02
+ // but with 1.e2 you get 1e+02 and 1.00e+02
+ //
+ // Stripping the trailing 0's (like g) does would make the
+ // e format give us the right format.
+ //
+ // CPython sources say:
+ // Omitted type specifier. Behaves in the same way as repr(x)
+ // and str(x) if no precision is given, else like 'g', but with
+ // at least one digit after the decimal point. */
+
+ type = 'g';
+ }
+ if (type == 'n') {
+ type = 'g';
+ }
+
+ switch (type) {
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ mp_print_float(&print, mp_obj_get_float(arg), type, flags, fill, width, precision);
+ break;
+
+ case '%':
+ flags |= PF_FLAG_ADD_PERCENT;
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ #define F100 100.0F
+ #else
+ #define F100 100.0
+ #endif
+ mp_print_float(&print, mp_obj_get_float(arg) * F100, 'f', flags, fill, width, precision);
+ #undef F100
+ break;
+#endif
+
+ default:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "unknown format code '%c' for object of type 'float'",
+ type, mp_obj_get_type_str(arg)));
+ }
+ }
+ } else {
+ // arg doesn't look like a number
+
+ if (align == '=') {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "'=' alignment not allowed in string format specifier"));
+ }
+ }
+
+ switch (type) {
+ case '\0': // no explicit format type implies 's'
+ case 's': {
+ mp_uint_t slen;
+ const char *s = mp_obj_str_get_data(arg, &slen);
+ if (precision < 0) {
+ precision = slen;
+ }
+ if (slen > (mp_uint_t)precision) {
+ slen = precision;
+ }
+ mp_print_strn(&print, s, slen, flags, fill, width);
+ break;
+ }
+
+ default:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "unknown format code '%c' for object of type 'str'",
+ type, mp_obj_get_type_str(arg)));
+ }
+ }
+ }
+ }
+
+ return vstr;
+}
+
+mp_obj_t mp_obj_str_format(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ assert(MP_OBJ_IS_STR_OR_BYTES(args[0]));
+
+ GET_STR_DATA_LEN(args[0], str, len);
+ int arg_i = 0;
+ vstr_t vstr = mp_obj_str_format_helper((const char*)str, (const char*)str + len, &arg_i, n_args, args, kwargs);
+ return mp_obj_new_str_from_vstr(&mp_type_str, &vstr);
+}
+
+STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, mp_uint_t n_args, const mp_obj_t *args, mp_obj_t dict) {
+ assert(MP_OBJ_IS_STR_OR_BYTES(pattern));
+
+ GET_STR_DATA_LEN(pattern, str, len);
+ const byte *start_str = str;
+ bool is_bytes = MP_OBJ_IS_TYPE(pattern, &mp_type_bytes);
+ int arg_i = 0;
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+
+ for (const byte *top = str + len; str < top; str++) {
+ mp_obj_t arg = MP_OBJ_NULL;
+ if (*str != '%') {
+ vstr_add_byte(&vstr, *str);
+ continue;
+ }
+ if (++str >= top) {
+ goto incomplete_format;
+ }
+ if (*str == '%') {
+ vstr_add_byte(&vstr, '%');
+ continue;
+ }
+
+ // Dictionary value lookup
+ if (*str == '(') {
+ const byte *key = ++str;
+ while (*str != ')') {
+ if (str >= top) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "incomplete format key"));
+ }
+ }
+ ++str;
+ }
+ mp_obj_t k_obj = mp_obj_new_str((const char*)key, str - key, true);
+ arg = mp_obj_dict_get(dict, k_obj);
+ str++;
+ }
+
+ int flags = 0;
+ char fill = ' ';
+ int alt = 0;
+ while (str < top) {
+ if (*str == '-') flags |= PF_FLAG_LEFT_ADJUST;
+ else if (*str == '+') flags |= PF_FLAG_SHOW_SIGN;
+ else if (*str == ' ') flags |= PF_FLAG_SPACE_SIGN;
+ else if (*str == '#') alt = PF_FLAG_SHOW_PREFIX;
+ else if (*str == '0') {
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ fill = '0';
+ } else break;
+ str++;
+ }
+ // parse width, if it exists
+ int width = 0;
+ if (str < top) {
+ if (*str == '*') {
+ if ((uint)arg_i >= n_args) {
+ goto not_enough_args;
+ }
+ width = mp_obj_get_int(args[arg_i++]);
+ str++;
+ } else {
+ str = (const byte*)str_to_int((const char*)str, (const char*)top, &width);
+ }
+ }
+ int prec = -1;
+ if (str < top && *str == '.') {
+ if (++str < top) {
+ if (*str == '*') {
+ if ((uint)arg_i >= n_args) {
+ goto not_enough_args;
+ }
+ prec = mp_obj_get_int(args[arg_i++]);
+ str++;
+ } else {
+ prec = 0;
+ str = (const byte*)str_to_int((const char*)str, (const char*)top, &prec);
+ }
+ }
+ }
+
+ if (str >= top) {
+incomplete_format:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "incomplete format"));
+ }
+ }
+
+ // Tuple value lookup
+ if (arg == MP_OBJ_NULL) {
+ if ((uint)arg_i >= n_args) {
+not_enough_args:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "not enough arguments for format string"));
+ }
+ arg = args[arg_i++];
+ }
+ switch (*str) {
+ case 'c':
+ if (MP_OBJ_IS_STR(arg)) {
+ mp_uint_t slen;
+ const char *s = mp_obj_str_get_data(arg, &slen);
+ if (slen != 1) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "%%c requires int or char"));
+ }
+ mp_print_strn(&print, s, 1, flags, ' ', width);
+ } else if (arg_looks_integer(arg)) {
+ char ch = mp_obj_get_int(arg);
+ mp_print_strn(&print, &ch, 1, flags, ' ', width);
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "integer required"));
+ }
+ break;
+
+ case 'd':
+ case 'i':
+ case 'u':
+ mp_print_mp_int(&print, arg_as_int(arg), 10, 'a', flags, fill, width, prec);
+ break;
+
+#if MICROPY_PY_BUILTINS_FLOAT
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ mp_print_float(&print, mp_obj_get_float(arg), *str, flags, fill, width, prec);
+ break;
+#endif
+
+ case 'o':
+ if (alt) {
+ flags |= (PF_FLAG_SHOW_PREFIX | PF_FLAG_SHOW_OCTAL_LETTER);
+ }
+ mp_print_mp_int(&print, arg, 8, 'a', flags, fill, width, prec);
+ break;
+
+ case 'r':
+ case 's':
+ {
+ vstr_t arg_vstr;
+ mp_print_t arg_print;
+ vstr_init_print(&arg_vstr, 16, &arg_print);
+ mp_print_kind_t print_kind = (*str == 'r' ? PRINT_REPR : PRINT_STR);
+ if (print_kind == PRINT_STR && is_bytes && MP_OBJ_IS_TYPE(arg, &mp_type_bytes)) {
+ // If we have something like b"%s" % b"1", bytes arg should be
+ // printed undecorated.
+ print_kind = PRINT_RAW;
+ }
+ mp_obj_print_helper(&arg_print, arg, print_kind);
+ uint vlen = arg_vstr.len;
+ if (prec < 0) {
+ prec = vlen;
+ }
+ if (vlen > (uint)prec) {
+ vlen = prec;
+ }
+ mp_print_strn(&print, arg_vstr.buf, vlen, flags, ' ', width);
+ vstr_clear(&arg_vstr);
+ break;
+ }
+
+ case 'X':
+ case 'x':
+ mp_print_mp_int(&print, arg, 16, *str - ('X' - 'A'), flags | alt, fill, width, prec);
+ break;
+
+ default:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ terse_str_format_value_error();
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "unsupported format character '%c' (0x%x) at index %d",
+ *str, *str, str - start_str));
+ }
+ }
+ }
+
+ if ((uint)arg_i != n_args) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "not all arguments converted during string formatting"));
+ }
+
+ return mp_obj_new_str_from_vstr(is_bytes ? &mp_type_bytes : &mp_type_str, &vstr);
+}
+
+// The implementation is optimized, returning the original string if there's
+// nothing to replace.
+STATIC mp_obj_t str_replace(size_t n_args, const mp_obj_t *args) {
+ assert(MP_OBJ_IS_STR_OR_BYTES(args[0]));
+
+ mp_int_t max_rep = -1;
+ if (n_args == 4) {
+ max_rep = mp_obj_get_int(args[3]);
+ if (max_rep == 0) {
+ return args[0];
+ } else if (max_rep < 0) {
+ max_rep = -1;
+ }
+ }
+
+ // if max_rep is still -1 by this point we will need to do all possible replacements
+
+ // check argument types
+
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+
+ if (mp_obj_get_type(args[2]) != self_type) {
+ bad_implicit_conversion(args[2]);
+ }
+
+ // extract string data
+
+ GET_STR_DATA_LEN(args[0], str, str_len);
+ GET_STR_DATA_LEN(args[1], old, old_len);
+ GET_STR_DATA_LEN(args[2], new, new_len);
+
+ // old won't exist in str if it's longer, so nothing to replace
+ if (old_len > str_len) {
+ return args[0];
+ }
+
+ // data for the replaced string
+ byte *data = NULL;
+ vstr_t vstr;
+
+ // do 2 passes over the string:
+ // first pass computes the required length of the replaced string
+ // second pass does the replacements
+ for (;;) {
+ mp_uint_t replaced_str_index = 0;
+ mp_uint_t num_replacements_done = 0;
+ const byte *old_occurrence;
+ const byte *offset_ptr = str;
+ mp_uint_t str_len_remain = str_len;
+ if (old_len == 0) {
+ // if old_str is empty, copy new_str to start of replaced string
+ // copy the replacement string
+ if (data != NULL) {
+ memcpy(data, new, new_len);
+ }
+ replaced_str_index += new_len;
+ num_replacements_done++;
+ }
+ while (num_replacements_done != (mp_uint_t)max_rep && str_len_remain > 0 && (old_occurrence = find_subbytes(offset_ptr, str_len_remain, old, old_len, 1)) != NULL) {
+ if (old_len == 0) {
+ old_occurrence += 1;
+ }
+ // copy from just after end of last occurrence of to-be-replaced string to right before start of next occurrence
+ if (data != NULL) {
+ memcpy(data + replaced_str_index, offset_ptr, old_occurrence - offset_ptr);
+ }
+ replaced_str_index += old_occurrence - offset_ptr;
+ // copy the replacement string
+ if (data != NULL) {
+ memcpy(data + replaced_str_index, new, new_len);
+ }
+ replaced_str_index += new_len;
+ offset_ptr = old_occurrence + old_len;
+ str_len_remain = str + str_len - offset_ptr;
+ num_replacements_done++;
+ }
+
+ // copy from just after end of last occurrence of to-be-replaced string to end of old string
+ if (data != NULL) {
+ memcpy(data + replaced_str_index, offset_ptr, str_len_remain);
+ }
+ replaced_str_index += str_len_remain;
+
+ if (data == NULL) {
+ // first pass
+ if (num_replacements_done == 0) {
+ // no substr found, return original string
+ return args[0];
+ } else {
+ // substr found, allocate new string
+ vstr_init_len(&vstr, replaced_str_index);
+ data = (byte*)vstr.buf;
+ assert(data != NULL);
+ }
+ } else {
+ // second pass, we are done
+ break;
+ }
+ }
+
+ return mp_obj_new_str_from_vstr(self_type, &vstr);
+}
+
+STATIC mp_obj_t str_count(size_t n_args, const mp_obj_t *args) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ assert(2 <= n_args && n_args <= 4);
+ assert(MP_OBJ_IS_STR_OR_BYTES(args[0]));
+
+ // check argument type
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+
+ GET_STR_DATA_LEN(args[0], haystack, haystack_len);
+ GET_STR_DATA_LEN(args[1], needle, needle_len);
+
+ const byte *start = haystack;
+ const byte *end = haystack + haystack_len;
+ if (n_args >= 3 && args[2] != mp_const_none) {
+ start = str_index_to_ptr(self_type, haystack, haystack_len, args[2], true);
+ }
+ if (n_args >= 4 && args[3] != mp_const_none) {
+ end = str_index_to_ptr(self_type, haystack, haystack_len, args[3], true);
+ }
+
+ // if needle_len is zero then we count each gap between characters as an occurrence
+ if (needle_len == 0) {
+ return MP_OBJ_NEW_SMALL_INT(unichar_charlen((const char*)start, end - start) + 1);
+ }
+
+ // count the occurrences
+ mp_int_t num_occurrences = 0;
+ for (const byte *haystack_ptr = start; haystack_ptr + needle_len <= end;) {
+ if (memcmp(haystack_ptr, needle, needle_len) == 0) {
+ num_occurrences++;
+ haystack_ptr += needle_len;
+ } else {
+ haystack_ptr = utf8_next_char(haystack_ptr);
+ }
+ }
+
+ return MP_OBJ_NEW_SMALL_INT(num_occurrences);
+}
+
+STATIC mp_obj_t str_partitioner(mp_obj_t self_in, mp_obj_t arg, mp_int_t direction) {
+ assert(MP_OBJ_IS_STR_OR_BYTES(self_in));
+ mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+ if (self_type != mp_obj_get_type(arg)) {
+ bad_implicit_conversion(arg);
+ }
+
+ GET_STR_DATA_LEN(self_in, str, str_len);
+ GET_STR_DATA_LEN(arg, sep, sep_len);
+
+ if (sep_len == 0) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "empty separator"));
+ }
+
+ mp_obj_t result[3];
+ if (self_type == &mp_type_str) {
+ result[0] = MP_OBJ_NEW_QSTR(MP_QSTR_);
+ result[1] = MP_OBJ_NEW_QSTR(MP_QSTR_);
+ result[2] = MP_OBJ_NEW_QSTR(MP_QSTR_);
+ } else {
+ result[0] = mp_const_empty_bytes;
+ result[1] = mp_const_empty_bytes;
+ result[2] = mp_const_empty_bytes;
+ }
+
+ if (direction > 0) {
+ result[0] = self_in;
+ } else {
+ result[2] = self_in;
+ }
+
+ const byte *position_ptr = find_subbytes(str, str_len, sep, sep_len, direction);
+ if (position_ptr != NULL) {
+ mp_uint_t position = position_ptr - str;
+ result[0] = mp_obj_new_str_of_type(self_type, str, position);
+ result[1] = arg;
+ result[2] = mp_obj_new_str_of_type(self_type, str + position + sep_len, str_len - position - sep_len);
+ }
+
+ return mp_obj_new_tuple(3, result);
+}
+
+STATIC mp_obj_t str_partition(mp_obj_t self_in, mp_obj_t arg) {
+ return str_partitioner(self_in, arg, 1);
+}
+
+STATIC mp_obj_t str_rpartition(mp_obj_t self_in, mp_obj_t arg) {
+ return str_partitioner(self_in, arg, -1);
+}
+
+// Supposedly not too critical operations, so optimize for code size
+STATIC mp_obj_t str_caseconv(unichar (*op)(unichar), mp_obj_t self_in) {
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+ vstr_t vstr;
+ vstr_init_len(&vstr, self_len);
+ byte *data = (byte*)vstr.buf;
+ for (mp_uint_t i = 0; i < self_len; i++) {
+ *data++ = op(*self_data++);
+ }
+ return mp_obj_new_str_from_vstr(mp_obj_get_type(self_in), &vstr);
+}
+
+STATIC mp_obj_t str_lower(mp_obj_t self_in) {
+ return str_caseconv(unichar_tolower, self_in);
+}
+
+STATIC mp_obj_t str_upper(mp_obj_t self_in) {
+ return str_caseconv(unichar_toupper, self_in);
+}
+
+STATIC mp_obj_t str_uni_istype(bool (*f)(unichar), mp_obj_t self_in) {
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+
+ if (self_len == 0) {
+ return mp_const_false; // default to False for empty str
+ }
+
+ if (f != unichar_isupper && f != unichar_islower) {
+ for (mp_uint_t i = 0; i < self_len; i++) {
+ if (!f(*self_data++)) {
+ return mp_const_false;
+ }
+ }
+ } else {
+ bool contains_alpha = false;
+
+ for (mp_uint_t i = 0; i < self_len; i++) { // only check alphanumeric characters
+ if (unichar_isalpha(*self_data++)) {
+ contains_alpha = true;
+ if (!f(*(self_data - 1))) { // -1 because we already incremented above
+ return mp_const_false;
+ }
+ }
+ }
+
+ if (!contains_alpha) {
+ return mp_const_false;
+ }
+ }
+
+ return mp_const_true;
+}
+
+STATIC mp_obj_t str_isspace(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isspace, self_in);
+}
+
+STATIC mp_obj_t str_isalpha(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isalpha, self_in);
+}
+
+STATIC mp_obj_t str_isdigit(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isdigit, self_in);
+}
+
+STATIC mp_obj_t str_isupper(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isupper, self_in);
+}
+
+STATIC mp_obj_t str_islower(mp_obj_t self_in) {
+ return str_uni_istype(unichar_islower, self_in);
+}
+
+#if MICROPY_CPYTHON_COMPAT
+// These methods are superfluous in the presense of str() and bytes()
+// constructors.
+// TODO: should accept kwargs too
+STATIC mp_obj_t bytes_decode(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t new_args[2];
+ if (n_args == 1) {
+ new_args[0] = args[0];
+ new_args[1] = MP_OBJ_NEW_QSTR(MP_QSTR_utf_hyphen_8);
+ args = new_args;
+ n_args++;
+ }
+ return mp_obj_str_make_new(&mp_type_str, n_args, 0, args);
+}
+
+// TODO: should accept kwargs too
+STATIC mp_obj_t str_encode(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t new_args[2];
+ if (n_args == 1) {
+ new_args[0] = args[0];
+ new_args[1] = MP_OBJ_NEW_QSTR(MP_QSTR_utf_hyphen_8);
+ args = new_args;
+ n_args++;
+ }
+ return bytes_make_new(NULL, n_args, 0, args);
+}
+#endif
+
+mp_int_t mp_obj_str_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ if (flags == MP_BUFFER_READ) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ bufinfo->buf = (void*)str_data;
+ bufinfo->len = str_len;
+ bufinfo->typecode = 'b';
+ return 0;
+ } else {
+ // can't write to a string
+ bufinfo->buf = NULL;
+ bufinfo->len = 0;
+ bufinfo->typecode = -1;
+ return 1;
+ }
+}
+
+#if MICROPY_CPYTHON_COMPAT
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(bytes_decode_obj, 1, 3, bytes_decode);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_encode_obj, 1, 3, str_encode);
+#endif
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_find_obj, 2, 4, str_find);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rfind_obj, 2, 4, str_rfind);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_index_obj, 2, 4, str_index);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rindex_obj, 2, 4, str_rindex);
+MP_DEFINE_CONST_FUN_OBJ_2(str_join_obj, str_join);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_split_obj, 1, 3, mp_obj_str_split);
+#if MICROPY_PY_BUILTINS_STR_SPLITLINES
+MP_DEFINE_CONST_FUN_OBJ_KW(str_splitlines_obj, 1, str_splitlines);
+#endif
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rsplit_obj, 1, 3, str_rsplit);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_startswith_obj, 2, 3, str_startswith);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_endswith_obj, 2, 3, str_endswith);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_strip_obj, 1, 2, str_strip);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_lstrip_obj, 1, 2, str_lstrip);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rstrip_obj, 1, 2, str_rstrip);
+MP_DEFINE_CONST_FUN_OBJ_KW(str_format_obj, 1, mp_obj_str_format);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_replace_obj, 3, 4, str_replace);
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_count_obj, 2, 4, str_count);
+MP_DEFINE_CONST_FUN_OBJ_2(str_partition_obj, str_partition);
+MP_DEFINE_CONST_FUN_OBJ_2(str_rpartition_obj, str_rpartition);
+MP_DEFINE_CONST_FUN_OBJ_1(str_lower_obj, str_lower);
+MP_DEFINE_CONST_FUN_OBJ_1(str_upper_obj, str_upper);
+MP_DEFINE_CONST_FUN_OBJ_1(str_isspace_obj, str_isspace);
+MP_DEFINE_CONST_FUN_OBJ_1(str_isalpha_obj, str_isalpha);
+MP_DEFINE_CONST_FUN_OBJ_1(str_isdigit_obj, str_isdigit);
+MP_DEFINE_CONST_FUN_OBJ_1(str_isupper_obj, str_isupper);
+MP_DEFINE_CONST_FUN_OBJ_1(str_islower_obj, str_islower);
+
+STATIC const mp_rom_map_elem_t str8_locals_dict_table[] = {
+#if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_decode), MP_ROM_PTR(&bytes_decode_obj) },
+ #if !MICROPY_PY_BUILTINS_STR_UNICODE
+ // If we have separate unicode type, then here we have methods only
+ // for bytes type, and it should not have encode() methods. Otherwise,
+ // we have non-compliant-but-practical bytestring type, which shares
+ // method table with bytes, so they both have encode() and decode()
+ // methods (which should do type checking at runtime).
+ { MP_ROM_QSTR(MP_QSTR_encode), MP_ROM_PTR(&str_encode_obj) },
+ #endif
+#endif
+ { MP_ROM_QSTR(MP_QSTR_find), MP_ROM_PTR(&str_find_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rfind), MP_ROM_PTR(&str_rfind_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&str_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rindex), MP_ROM_PTR(&str_rindex_obj) },
+ { MP_ROM_QSTR(MP_QSTR_join), MP_ROM_PTR(&str_join_obj) },
+ { MP_ROM_QSTR(MP_QSTR_split), MP_ROM_PTR(&str_split_obj) },
+ #if MICROPY_PY_BUILTINS_STR_SPLITLINES
+ { MP_ROM_QSTR(MP_QSTR_splitlines), MP_ROM_PTR(&str_splitlines_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_rsplit), MP_ROM_PTR(&str_rsplit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_startswith), MP_ROM_PTR(&str_startswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_endswith), MP_ROM_PTR(&str_endswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_strip), MP_ROM_PTR(&str_strip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lstrip), MP_ROM_PTR(&str_lstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rstrip), MP_ROM_PTR(&str_rstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&str_format_obj) },
+ { MP_ROM_QSTR(MP_QSTR_replace), MP_ROM_PTR(&str_replace_obj) },
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&str_count_obj) },
+ { MP_ROM_QSTR(MP_QSTR_partition), MP_ROM_PTR(&str_partition_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rpartition), MP_ROM_PTR(&str_rpartition_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lower), MP_ROM_PTR(&str_lower_obj) },
+ { MP_ROM_QSTR(MP_QSTR_upper), MP_ROM_PTR(&str_upper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isspace), MP_ROM_PTR(&str_isspace_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isalpha), MP_ROM_PTR(&str_isalpha_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdigit), MP_ROM_PTR(&str_isdigit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isupper), MP_ROM_PTR(&str_isupper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_islower), MP_ROM_PTR(&str_islower_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(str8_locals_dict, str8_locals_dict_table);
+
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str);
+
+const mp_obj_type_t mp_type_str = {
+ { &mp_type_type },
+ .name = MP_QSTR_str,
+ .print = str_print,
+ .make_new = mp_obj_str_make_new,
+ .binary_op = mp_obj_str_binary_op,
+ .subscr = bytes_subscr,
+ .getiter = mp_obj_new_str_iterator,
+ .buffer_p = { .get_buffer = mp_obj_str_get_buffer },
+ .locals_dict = (mp_obj_dict_t*)&str8_locals_dict,
+};
+#endif
+
+// Reuses most of methods from str
+const mp_obj_type_t mp_type_bytes = {
+ { &mp_type_type },
+ .name = MP_QSTR_bytes,
+ .print = str_print,
+ .make_new = bytes_make_new,
+ .binary_op = mp_obj_str_binary_op,
+ .subscr = bytes_subscr,
+ .getiter = mp_obj_new_bytes_iterator,
+ .buffer_p = { .get_buffer = mp_obj_str_get_buffer },
+ .locals_dict = (mp_obj_dict_t*)&str8_locals_dict,
+};
+
+// the zero-length bytes
+const mp_obj_str_t mp_const_empty_bytes_obj = {{&mp_type_bytes}, 0, 0, NULL};
+
+// Create a str/bytes object using the given data. New memory is allocated and
+// the data is copied across.
+mp_obj_t mp_obj_new_str_of_type(const mp_obj_type_t *type, const byte* data, size_t len) {
+ mp_obj_str_t *o = m_new_obj(mp_obj_str_t);
+ o->base.type = type;
+ o->len = len;
+ if (data) {
+ o->hash = qstr_compute_hash(data, len);
+ byte *p = m_new(byte, len + 1);
+ o->data = p;
+ memcpy(p, data, len * sizeof(byte));
+ p[len] = '\0'; // for now we add null for compatibility with C ASCIIZ strings
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Create a str/bytes object from the given vstr. The vstr buffer is resized to
+// the exact length required and then reused for the str/bytes object. The vstr
+// is cleared and can safely be passed to vstr_free if it was heap allocated.
+mp_obj_t mp_obj_new_str_from_vstr(const mp_obj_type_t *type, vstr_t *vstr) {
+ // if not a bytes object, look if a qstr with this data already exists
+ if (type == &mp_type_str) {
+ qstr q = qstr_find_strn(vstr->buf, vstr->len);
+ if (q != MP_QSTR_NULL) {
+ vstr_clear(vstr);
+ vstr->alloc = 0;
+ return MP_OBJ_NEW_QSTR(q);
+ }
+ }
+
+ // make a new str/bytes object
+ mp_obj_str_t *o = m_new_obj(mp_obj_str_t);
+ o->base.type = type;
+ o->len = vstr->len;
+ o->hash = qstr_compute_hash((byte*)vstr->buf, vstr->len);
+ if (vstr->len + 1 == vstr->alloc) {
+ o->data = (byte*)vstr->buf;
+ } else {
+ o->data = (byte*)m_renew(char, vstr->buf, vstr->alloc, vstr->len + 1);
+ }
+ ((byte*)o->data)[o->len] = '\0'; // add null byte
+ vstr->buf = NULL;
+ vstr->alloc = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_str(const char* data, mp_uint_t len, bool make_qstr_if_not_already) {
+ if (make_qstr_if_not_already) {
+ // use existing, or make a new qstr
+ return MP_OBJ_NEW_QSTR(qstr_from_strn(data, len));
+ } else {
+ qstr q = qstr_find_strn(data, len);
+ if (q != MP_QSTR_NULL) {
+ // qstr with this data already exists
+ return MP_OBJ_NEW_QSTR(q);
+ } else {
+ // no existing qstr, don't make one
+ return mp_obj_new_str_of_type(&mp_type_str, (const byte*)data, len);
+ }
+ }
+}
+
+mp_obj_t mp_obj_str_intern(mp_obj_t str) {
+ GET_STR_DATA_LEN(str, data, len);
+ return MP_OBJ_NEW_QSTR(qstr_from_strn((const char*)data, len));
+}
+
+mp_obj_t mp_obj_new_bytes(const byte* data, mp_uint_t len) {
+ return mp_obj_new_str_of_type(&mp_type_bytes, data, len);
+}
+
+bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2) {
+ if (MP_OBJ_IS_QSTR(s1) && MP_OBJ_IS_QSTR(s2)) {
+ return s1 == s2;
+ } else {
+ GET_STR_HASH(s1, h1);
+ GET_STR_HASH(s2, h2);
+ // If any of hashes is 0, it means it's not valid
+ if (h1 != 0 && h2 != 0 && h1 != h2) {
+ return false;
+ }
+ GET_STR_DATA_LEN(s1, d1, l1);
+ GET_STR_DATA_LEN(s2, d2, l2);
+ if (l1 != l2) {
+ return false;
+ }
+ return memcmp(d1, d2, l1) == 0;
+ }
+}
+
+STATIC void bad_implicit_conversion(mp_obj_t self_in) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "can't convert to str implicitly"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "can't convert '%s' object to str implicitly",
+ mp_obj_get_type_str(self_in)));
+ }
+}
+
+// use this if you will anyway convert the string to a qstr
+// will be more efficient for the case where it's already a qstr
+qstr mp_obj_str_get_qstr(mp_obj_t self_in) {
+ if (MP_OBJ_IS_QSTR(self_in)) {
+ return MP_OBJ_QSTR_VALUE(self_in);
+ } else if (MP_OBJ_IS_TYPE(self_in, &mp_type_str)) {
+ mp_obj_str_t *self = MP_OBJ_TO_PTR(self_in);
+ return qstr_from_strn((char*)self->data, self->len);
+ } else {
+ bad_implicit_conversion(self_in);
+ }
+}
+
+// only use this function if you need the str data to be zero terminated
+// at the moment all strings are zero terminated to help with C ASCIIZ compatibility
+const char *mp_obj_str_get_str(mp_obj_t self_in) {
+ if (MP_OBJ_IS_STR_OR_BYTES(self_in)) {
+ GET_STR_DATA_LEN(self_in, s, l);
+ (void)l; // len unused
+ return (const char*)s;
+ } else {
+ bad_implicit_conversion(self_in);
+ }
+}
+
+const char *mp_obj_str_get_data(mp_obj_t self_in, mp_uint_t *len) {
+ if (MP_OBJ_IS_STR_OR_BYTES(self_in)) {
+ GET_STR_DATA_LEN(self_in, s, l);
+ *len = l;
+ return (const char*)s;
+ } else {
+ bad_implicit_conversion(self_in);
+ }
+}
+
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+const byte *mp_obj_str_get_data_no_check(mp_obj_t self_in, size_t *len) {
+ if (MP_OBJ_IS_QSTR(self_in)) {
+ return qstr_data(MP_OBJ_QSTR_VALUE(self_in), len);
+ } else {
+ *len = ((mp_obj_str_t*)self_in)->len;
+ return ((mp_obj_str_t*)self_in)->data;
+ }
+}
+#endif
+
+/******************************************************************************/
+/* str iterator */
+
+typedef struct _mp_obj_str8_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t str;
+ mp_uint_t cur;
+} mp_obj_str8_it_t;
+
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+STATIC mp_obj_t str_it_iternext(mp_obj_t self_in) {
+ mp_obj_str8_it_t *self = self_in;
+ GET_STR_DATA_LEN(self->str, str, len);
+ if (self->cur < len) {
+ mp_obj_t o_out = mp_obj_new_str((const char*)str + self->cur, 1, true);
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str) {
+ mp_obj_str8_it_t *o = m_new_obj(mp_obj_str8_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = str_it_iternext;
+ o->str = str;
+ o->cur = 0;
+ return o;
+}
+#endif
+
+STATIC mp_obj_t bytes_it_iternext(mp_obj_t self_in) {
+ mp_obj_str8_it_t *self = MP_OBJ_TO_PTR(self_in);
+ GET_STR_DATA_LEN(self->str, str, len);
+ if (self->cur < len) {
+ mp_obj_t o_out = MP_OBJ_NEW_SMALL_INT(str[self->cur]);
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+mp_obj_t mp_obj_new_bytes_iterator(mp_obj_t str) {
+ mp_obj_str8_it_t *o = m_new_obj(mp_obj_str8_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = bytes_it_iternext;
+ o->str = str;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objstr.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,103 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJSTR_H__
+#define __MICROPY_INCLUDED_PY_OBJSTR_H__
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_str_t {
+ mp_obj_base_t base;
+ mp_uint_t hash;
+ // len == number of bytes used in data, alloc = len + 1 because (at the moment) we also append a null byte
+ mp_uint_t len;
+ const byte *data;
+} mp_obj_str_t;
+
+#define MP_DEFINE_STR_OBJ(obj_name, str) mp_obj_str_t obj_name = {{&mp_type_str}, 0, sizeof(str) - 1, (const byte*)str}
+
+// use this macro to extract the string hash
+#define GET_STR_HASH(str_obj_in, str_hash) \
+ mp_uint_t str_hash; if (MP_OBJ_IS_QSTR(str_obj_in)) \
+ { str_hash = qstr_hash(MP_OBJ_QSTR_VALUE(str_obj_in)); } else { str_hash = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->hash; }
+
+// use this macro to extract the string length
+#define GET_STR_LEN(str_obj_in, str_len) \
+ size_t str_len; if (MP_OBJ_IS_QSTR(str_obj_in)) \
+ { str_len = qstr_len(MP_OBJ_QSTR_VALUE(str_obj_in)); } else { str_len = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->len; }
+
+// use this macro to extract the string data and length
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+const byte *mp_obj_str_get_data_no_check(mp_obj_t self_in, size_t *len);
+#define GET_STR_DATA_LEN(str_obj_in, str_data, str_len) \
+ size_t str_len; const byte *str_data = mp_obj_str_get_data_no_check(str_obj_in, &str_len);
+#else
+#define GET_STR_DATA_LEN(str_obj_in, str_data, str_len) \
+ const byte *str_data; size_t str_len; if (MP_OBJ_IS_QSTR(str_obj_in)) \
+ { str_data = qstr_data(MP_OBJ_QSTR_VALUE(str_obj_in), &str_len); } \
+ else { str_len = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->len; str_data = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->data; }
+#endif
+
+mp_obj_t mp_obj_str_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_str_print_json(const mp_print_t *print, const byte *str_data, size_t str_len);
+mp_obj_t mp_obj_str_format(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+mp_obj_t mp_obj_str_split(size_t n_args, const mp_obj_t *args);
+mp_obj_t mp_obj_new_str_of_type(const mp_obj_type_t *type, const byte* data, size_t len);
+
+mp_obj_t mp_obj_str_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in);
+mp_int_t mp_obj_str_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+
+const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ mp_obj_t index, bool is_slice);
+const byte *find_subbytes(const byte *haystack, mp_uint_t hlen, const byte *needle, mp_uint_t nlen, mp_int_t direction);
+
+MP_DECLARE_CONST_FUN_OBJ(str_encode_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_find_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_rfind_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_index_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_rindex_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_join_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_split_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_splitlines_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_rsplit_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_startswith_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_endswith_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_strip_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_lstrip_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_rstrip_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_format_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_replace_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_count_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_partition_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_rpartition_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_lower_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_upper_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_isspace_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_isalpha_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_isdigit_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_isupper_obj);
+MP_DECLARE_CONST_FUN_OBJ(str_islower_obj);
+
+#endif // __MICROPY_INCLUDED_PY_OBJSTR_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objstringio.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,193 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/objstr.h"
+#include "py/runtime.h"
+#include "py/stream.h"
+
+#if MICROPY_PY_IO
+
+typedef struct _mp_obj_stringio_t {
+ mp_obj_base_t base;
+ vstr_t *vstr;
+ // StringIO has single pointer used for both reading and writing
+ mp_uint_t pos;
+} mp_obj_stringio_t;
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC void check_stringio_is_open(const mp_obj_stringio_t *o) {
+ if (o->vstr == NULL) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "I/O operation on closed file"));
+ }
+}
+#else
+#define check_stringio_is_open(o)
+#endif
+
+STATIC void stringio_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_stringio_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, self->base.type == &mp_type_stringio ? "<io.StringIO 0x%x>" : "<io.BytesIO 0x%x>", self);
+}
+
+STATIC mp_uint_t stringio_read(mp_obj_t o_in, void *buf, mp_uint_t size, int *errcode) {
+ (void)errcode;
+ mp_obj_stringio_t *o = MP_OBJ_TO_PTR(o_in);
+ check_stringio_is_open(o);
+ mp_uint_t remaining = o->vstr->len - o->pos;
+ if (size > remaining) {
+ size = remaining;
+ }
+ memcpy(buf, o->vstr->buf + o->pos, size);
+ o->pos += size;
+ return size;
+}
+
+STATIC mp_uint_t stringio_write(mp_obj_t o_in, const void *buf, mp_uint_t size, int *errcode) {
+ (void)errcode;
+ mp_obj_stringio_t *o = MP_OBJ_TO_PTR(o_in);
+ check_stringio_is_open(o);
+ mp_uint_t remaining = o->vstr->alloc - o->pos;
+ if (size > remaining) {
+ // Take all what's already allocated...
+ o->vstr->len = o->vstr->alloc;
+ // ... and add more
+ vstr_add_len(o->vstr, size - remaining);
+ }
+ memcpy(o->vstr->buf + o->pos, buf, size);
+ o->pos += size;
+ if (o->pos > o->vstr->len) {
+ o->vstr->len = o->pos;
+ }
+ return size;
+}
+
+#define STREAM_TO_CONTENT_TYPE(o) (((o)->base.type == &mp_type_stringio) ? &mp_type_str : &mp_type_bytes)
+
+STATIC mp_obj_t stringio_getvalue(mp_obj_t self_in) {
+ mp_obj_stringio_t *self = MP_OBJ_TO_PTR(self_in);
+ check_stringio_is_open(self);
+ return mp_obj_new_str_of_type(STREAM_TO_CONTENT_TYPE(self), (byte*)self->vstr->buf, self->vstr->len);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(stringio_getvalue_obj, stringio_getvalue);
+
+STATIC mp_obj_t stringio_close(mp_obj_t self_in) {
+ mp_obj_stringio_t *self = MP_OBJ_TO_PTR(self_in);
+#if MICROPY_CPYTHON_COMPAT
+ vstr_free(self->vstr);
+ self->vstr = NULL;
+#else
+ vstr_clear(self->vstr);
+ self->vstr->alloc = 0;
+ self->vstr->len = 0;
+ self->pos = 0;
+#endif
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(stringio_close_obj, stringio_close);
+
+STATIC mp_obj_t stringio___exit__(size_t n_args, const mp_obj_t *args) {
+ (void)n_args;
+ return stringio_close(args[0]);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(stringio___exit___obj, 4, 4, stringio___exit__);
+
+STATIC mp_obj_stringio_t *stringio_new(const mp_obj_type_t *type) {
+ mp_obj_stringio_t *o = m_new_obj(mp_obj_stringio_t);
+ o->base.type = type;
+ o->vstr = vstr_new();
+ o->pos = 0;
+ return o;
+}
+
+STATIC mp_obj_t stringio_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)n_kw; // TODO check n_kw==0
+ mp_obj_stringio_t *o = stringio_new(type_in);
+
+ if (n_args > 0) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+ stringio_write(MP_OBJ_FROM_PTR(o), bufinfo.buf, bufinfo.len, NULL);
+ // Cur ptr is always at the beginning of buffer at the construction
+ o->pos = 0;
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC const mp_rom_map_elem_t stringio_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_read), MP_ROM_PTR(&mp_stream_read_obj) },
+ { MP_ROM_QSTR(MP_QSTR_readall), MP_ROM_PTR(&mp_stream_readall_obj) },
+ { MP_ROM_QSTR(MP_QSTR_readline), MP_ROM_PTR(&mp_stream_unbuffered_readline_obj) },
+ { MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj) },
+ { MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&stringio_close_obj) },
+ { MP_ROM_QSTR(MP_QSTR_getvalue), MP_ROM_PTR(&stringio_getvalue_obj) },
+ { MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&mp_identity_obj) },
+ { MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&stringio___exit___obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(stringio_locals_dict, stringio_locals_dict_table);
+
+STATIC const mp_stream_p_t stringio_stream_p = {
+ .read = stringio_read,
+ .write = stringio_write,
+ .is_text = true,
+};
+
+STATIC const mp_stream_p_t bytesio_stream_p = {
+ .read = stringio_read,
+ .write = stringio_write,
+};
+
+const mp_obj_type_t mp_type_stringio = {
+ { &mp_type_type },
+ .name = MP_QSTR_StringIO,
+ .print = stringio_print,
+ .make_new = stringio_make_new,
+ .getiter = mp_identity,
+ .iternext = mp_stream_unbuffered_iter,
+ .stream_p = &stringio_stream_p,
+ .locals_dict = (mp_obj_dict_t*)&stringio_locals_dict,
+};
+
+#if MICROPY_PY_IO_BYTESIO
+const mp_obj_type_t mp_type_bytesio = {
+ { &mp_type_type },
+ .name = MP_QSTR_BytesIO,
+ .print = stringio_print,
+ .make_new = stringio_make_new,
+ .getiter = mp_identity,
+ .iternext = mp_stream_unbuffered_iter,
+ .stream_p = &bytesio_stream_p,
+ .locals_dict = (mp_obj_dict_t*)&stringio_locals_dict,
+};
+#endif
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objstrunicode.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,298 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objstr.h"
+#include "py/objlist.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str);
+
+/******************************************************************************/
+/* str */
+
+STATIC void uni_print_quoted(const mp_print_t *print, const byte *str_data, uint str_len) {
+ // this escapes characters, but it will be very slow to print (calling print many times)
+ bool has_single_quote = false;
+ bool has_double_quote = false;
+ for (const byte *s = str_data, *top = str_data + str_len; !has_double_quote && s < top; s++) {
+ if (*s == '\'') {
+ has_single_quote = true;
+ } else if (*s == '"') {
+ has_double_quote = true;
+ }
+ }
+ unichar quote_char = '\'';
+ if (has_single_quote && !has_double_quote) {
+ quote_char = '"';
+ }
+ mp_printf(print, "%c", quote_char);
+ const byte *s = str_data, *top = str_data + str_len;
+ while (s < top) {
+ unichar ch;
+ ch = utf8_get_char(s);
+ s = utf8_next_char(s);
+ if (ch == quote_char) {
+ mp_printf(print, "\\%c", quote_char);
+ } else if (ch == '\\') {
+ mp_print_str(print, "\\\\");
+ } else if (32 <= ch && ch <= 126) {
+ mp_printf(print, "%c", ch);
+ } else if (ch == '\n') {
+ mp_print_str(print, "\\n");
+ } else if (ch == '\r') {
+ mp_print_str(print, "\\r");
+ } else if (ch == '\t') {
+ mp_print_str(print, "\\t");
+ } else if (ch < 0x100) {
+ mp_printf(print, "\\x%02x", ch);
+ } else if (ch < 0x10000) {
+ mp_printf(print, "\\u%04x", ch);
+ } else {
+ mp_printf(print, "\\U%08x", ch);
+ }
+ }
+ mp_printf(print, "%c", quote_char);
+}
+
+STATIC void uni_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ #if MICROPY_PY_UJSON
+ if (kind == PRINT_JSON) {
+ mp_str_print_json(print, str_data, str_len);
+ return;
+ }
+ #endif
+ if (kind == PRINT_STR) {
+ mp_printf(print, "%.*s", str_len, str_data);
+ } else {
+ uni_print_quoted(print, str_data, str_len);
+ }
+}
+
+STATIC mp_obj_t uni_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(str_len != 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(unichar_charlen((const char *)str_data, str_len));
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+// Convert an index into a pointer to its lead byte. Out of bounds indexing will raise IndexError or
+// be capped to the first/last character of the string, depending on is_slice.
+const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ mp_obj_t index, bool is_slice) {
+ (void)type;
+ mp_int_t i;
+ // Copied from mp_get_index; I don't want bounds checking, just give me
+ // the integer as-is. (I can't bounds-check without scanning the whole
+ // string; an out-of-bounds index will be caught in the loops below.)
+ if (MP_OBJ_IS_SMALL_INT(index)) {
+ i = MP_OBJ_SMALL_INT_VALUE(index);
+ } else if (!mp_obj_get_int_maybe(index, &i)) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "string indices must be integers, not %s", mp_obj_get_type_str(index)));
+ }
+ const byte *s, *top = self_data + self_len;
+ if (i < 0)
+ {
+ // Negative indexing is performed by counting from the end of the string.
+ for (s = top - 1; i; --s) {
+ if (s < self_data) {
+ if (is_slice) {
+ return self_data;
+ }
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_IndexError, "string index out of range"));
+ }
+ if (!UTF8_IS_CONT(*s)) {
+ ++i;
+ }
+ }
+ ++s;
+ } else if (!i) {
+ return self_data; // Shortcut - str[0] is its base pointer
+ } else {
+ // Positive indexing, correspondingly, counts from the start of the string.
+ // It's assumed that negative indexing will generally be used with small
+ // absolute values (eg str[-1], not str[-1000000]), which means it'll be
+ // more efficient this way.
+ for (s = self_data; true; ++s) {
+ if (s >= top) {
+ if (is_slice) {
+ return top;
+ }
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_IndexError, "string index out of range"));
+ }
+ while (UTF8_IS_CONT(*s)) {
+ ++s;
+ }
+ if (!i--) {
+ return s;
+ }
+ }
+ }
+ return s;
+}
+
+STATIC mp_obj_t str_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_type_t *type = mp_obj_get_type(self_in);
+ assert(type == &mp_type_str);
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_obj_t ostart, ostop, ostep;
+ mp_obj_slice_get(index, &ostart, &ostop, &ostep);
+ if (ostep != mp_const_none && ostep != MP_OBJ_NEW_SMALL_INT(1)) {
+ mp_not_implemented("only slices with step=1 (aka None) are supported");
+ }
+
+ const byte *pstart, *pstop;
+ if (ostart != mp_const_none) {
+ pstart = str_index_to_ptr(type, self_data, self_len, ostart, true);
+ } else {
+ pstart = self_data;
+ }
+ if (ostop != mp_const_none) {
+ // pstop will point just after the stop character. This depends on
+ // the \0 at the end of the string.
+ pstop = str_index_to_ptr(type, self_data, self_len, ostop, true);
+ } else {
+ pstop = self_data + self_len;
+ }
+ if (pstop < pstart) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_);
+ }
+ return mp_obj_new_str_of_type(type, (const byte *)pstart, pstop - pstart);
+ }
+#endif
+ const byte *s = str_index_to_ptr(type, self_data, self_len, index, false);
+ int len = 1;
+ if (UTF8_IS_NONASCII(*s)) {
+ // Count the number of 1 bits (after the first)
+ for (char mask = 0x40; *s & mask; mask >>= 1) {
+ ++len;
+ }
+ }
+ return mp_obj_new_str((const char*)s, len, true); // This will create a one-character string
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC const mp_rom_map_elem_t struni_locals_dict_table[] = {
+#if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_encode), MP_ROM_PTR(&str_encode_obj) },
+#endif
+ { MP_ROM_QSTR(MP_QSTR_find), MP_ROM_PTR(&str_find_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rfind), MP_ROM_PTR(&str_rfind_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&str_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rindex), MP_ROM_PTR(&str_rindex_obj) },
+ { MP_ROM_QSTR(MP_QSTR_join), MP_ROM_PTR(&str_join_obj) },
+ { MP_ROM_QSTR(MP_QSTR_split), MP_ROM_PTR(&str_split_obj) },
+ #if MICROPY_PY_BUILTINS_STR_SPLITLINES
+ { MP_ROM_QSTR(MP_QSTR_splitlines), MP_ROM_PTR(&str_splitlines_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_rsplit), MP_ROM_PTR(&str_rsplit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_startswith), MP_ROM_PTR(&str_startswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_endswith), MP_ROM_PTR(&str_endswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_strip), MP_ROM_PTR(&str_strip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lstrip), MP_ROM_PTR(&str_lstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rstrip), MP_ROM_PTR(&str_rstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&str_format_obj) },
+ { MP_ROM_QSTR(MP_QSTR_replace), MP_ROM_PTR(&str_replace_obj) },
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&str_count_obj) },
+ { MP_ROM_QSTR(MP_QSTR_partition), MP_ROM_PTR(&str_partition_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rpartition), MP_ROM_PTR(&str_rpartition_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lower), MP_ROM_PTR(&str_lower_obj) },
+ { MP_ROM_QSTR(MP_QSTR_upper), MP_ROM_PTR(&str_upper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isspace), MP_ROM_PTR(&str_isspace_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isalpha), MP_ROM_PTR(&str_isalpha_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdigit), MP_ROM_PTR(&str_isdigit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isupper), MP_ROM_PTR(&str_isupper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_islower), MP_ROM_PTR(&str_islower_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(struni_locals_dict, struni_locals_dict_table);
+
+const mp_obj_type_t mp_type_str = {
+ { &mp_type_type },
+ .name = MP_QSTR_str,
+ .print = uni_print,
+ .make_new = mp_obj_str_make_new,
+ .unary_op = uni_unary_op,
+ .binary_op = mp_obj_str_binary_op,
+ .subscr = str_subscr,
+ .getiter = mp_obj_new_str_iterator,
+ .buffer_p = { .get_buffer = mp_obj_str_get_buffer },
+ .locals_dict = (mp_obj_dict_t*)&struni_locals_dict,
+};
+
+/******************************************************************************/
+/* str iterator */
+
+typedef struct _mp_obj_str_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t str;
+ mp_uint_t cur;
+} mp_obj_str_it_t;
+
+STATIC mp_obj_t str_it_iternext(mp_obj_t self_in) {
+ mp_obj_str_it_t *self = MP_OBJ_TO_PTR(self_in);
+ GET_STR_DATA_LEN(self->str, str, len);
+ if (self->cur < len) {
+ const byte *cur = str + self->cur;
+ const byte *end = utf8_next_char(str + self->cur);
+ mp_obj_t o_out = mp_obj_new_str((const char*)cur, end - cur, true);
+ self->cur += end - cur;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str) {
+ mp_obj_str_it_t *o = m_new_obj(mp_obj_str_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = str_it_iternext;
+ o->str = str;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_PY_BUILTINS_STR_UNICODE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objtuple.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,296 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objtuple.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+STATIC mp_obj_t mp_obj_new_tuple_iterator(mp_obj_tuple_t *tuple, mp_uint_t cur);
+
+/******************************************************************************/
+/* tuple */
+
+void mp_obj_tuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_tuple_t *o = MP_OBJ_TO_PTR(o_in);
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ mp_print_str(print, "[");
+ } else {
+ mp_print_str(print, "(");
+ kind = PRINT_REPR;
+ }
+ for (mp_uint_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, ", ");
+ }
+ mp_obj_print_helper(print, o->items[i], kind);
+ }
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ mp_print_str(print, "]");
+ } else {
+ if (o->len == 1) {
+ mp_print_str(print, ",");
+ }
+ mp_print_str(print, ")");
+ }
+}
+
+STATIC mp_obj_t mp_obj_tuple_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ // return a empty tuple
+ return mp_const_empty_tuple;
+
+ case 1:
+ default: {
+ // 1 argument, an iterable from which we make a new tuple
+ if (MP_OBJ_IS_TYPE(args[0], &mp_type_tuple)) {
+ return args[0];
+ }
+
+ // TODO optimise for cases where we know the length of the iterator
+
+ mp_uint_t alloc = 4;
+ mp_uint_t len = 0;
+ mp_obj_t *items = m_new(mp_obj_t, alloc);
+
+ mp_obj_t iterable = mp_getiter(args[0]);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (len >= alloc) {
+ items = m_renew(mp_obj_t, items, alloc, alloc * 2);
+ alloc *= 2;
+ }
+ items[len++] = item;
+ }
+
+ mp_obj_t tuple = mp_obj_new_tuple(len, items);
+ m_del(mp_obj_t, items, alloc);
+
+ return tuple;
+ }
+ }
+}
+
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+STATIC bool tuple_cmp_helper(mp_uint_t op, mp_obj_t self_in, mp_obj_t another_in) {
+ mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+ if (self_type->getiter != mp_obj_tuple_getiter) {
+ assert(0);
+ }
+ mp_obj_type_t *another_type = mp_obj_get_type(another_in);
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ if (another_type->getiter != mp_obj_tuple_getiter) {
+ // Slow path for user subclasses
+ another_in = mp_instance_cast_to_native_base(another_in, MP_OBJ_FROM_PTR(&mp_type_tuple));
+ if (another_in == MP_OBJ_NULL) {
+ return false;
+ }
+ }
+ mp_obj_tuple_t *another = MP_OBJ_TO_PTR(another_in);
+
+ return mp_seq_cmp_objs(op, self->items, self->len, another->items, another->len);
+}
+
+mp_obj_t mp_obj_tuple_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return mp_obj_new_bool(self->len != 0);
+ case MP_UNARY_OP_HASH: {
+ // start hash with pointer to empty tuple, to make it fairly unique
+ mp_int_t hash = (mp_int_t)mp_const_empty_tuple;
+ for (mp_uint_t i = 0; i < self->len; i++) {
+ hash += MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, self->items[i]));
+ }
+ return MP_OBJ_NEW_SMALL_INT(hash);
+ }
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(self->len);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_tuple_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ mp_obj_tuple_t *o = MP_OBJ_TO_PTR(lhs);
+ switch (op) {
+ case MP_BINARY_OP_ADD: {
+ if (!mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(rhs)), MP_OBJ_FROM_PTR(&mp_type_tuple))) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ mp_obj_tuple_t *p = MP_OBJ_TO_PTR(rhs);
+ mp_obj_tuple_t *s = MP_OBJ_TO_PTR(mp_obj_new_tuple(o->len + p->len, NULL));
+ mp_seq_cat(s->items, o->items, o->len, p->items, p->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_MULTIPLY: {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs, &n)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (n <= 0) {
+ return mp_const_empty_tuple;
+ }
+ mp_obj_tuple_t *s = MP_OBJ_TO_PTR(mp_obj_new_tuple(o->len * n, NULL));
+ mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_EQUAL:
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(tuple_cmp_helper(op, lhs, rhs));
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_tuple_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) {
+ mp_not_implemented("only slices with step=1 (aka None) are supported");
+ }
+ mp_obj_tuple_t *res = MP_OBJ_TO_PTR(mp_obj_new_tuple(slice.stop - slice.start, NULL));
+ mp_seq_copy(res->items, self->items + slice.start, res->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(res);
+ }
+#endif
+ mp_uint_t index_value = mp_get_index(self->base.type, self->len, index, false);
+ return self->items[index_value];
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_tuple_getiter(mp_obj_t o_in) {
+ return mp_obj_new_tuple_iterator(MP_OBJ_TO_PTR(o_in), 0);
+}
+
+STATIC mp_obj_t tuple_count(mp_obj_t self_in, mp_obj_t value) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ return mp_seq_count_obj(self->items, self->len, value);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(tuple_count_obj, tuple_count);
+
+STATIC mp_obj_t tuple_index(size_t n_args, const mp_obj_t *args) {
+ assert(MP_OBJ_IS_TYPE(args[0], &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(args[0]);
+ return mp_seq_index_obj(self->items, self->len, n_args, args);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(tuple_index_obj, 2, 4, tuple_index);
+
+STATIC const mp_rom_map_elem_t tuple_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&tuple_count_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&tuple_index_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(tuple_locals_dict, tuple_locals_dict_table);
+
+const mp_obj_type_t mp_type_tuple = {
+ { &mp_type_type },
+ .name = MP_QSTR_tuple,
+ .print = mp_obj_tuple_print,
+ .make_new = mp_obj_tuple_make_new,
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+ .locals_dict = (mp_obj_dict_t*)&tuple_locals_dict,
+};
+
+// the zero-length tuple
+const mp_obj_tuple_t mp_const_empty_tuple_obj = {{&mp_type_tuple}, 0};
+
+mp_obj_t mp_obj_new_tuple(mp_uint_t n, const mp_obj_t *items) {
+ if (n == 0) {
+ return mp_const_empty_tuple;
+ }
+ mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n);
+ o->base.type = &mp_type_tuple;
+ o->len = n;
+ if (items) {
+ for (mp_uint_t i = 0; i < n; i++) {
+ o->items[i] = items[i];
+ }
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_tuple_get(mp_obj_t self_in, mp_uint_t *len, mp_obj_t **items) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ *len = self->len;
+ *items = &self->items[0];
+}
+
+void mp_obj_tuple_del(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ m_del_var(mp_obj_tuple_t, mp_obj_t, self->len, self);
+}
+
+/******************************************************************************/
+/* tuple iterator */
+
+typedef struct _mp_obj_tuple_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_tuple_t *tuple;
+ mp_uint_t cur;
+} mp_obj_tuple_it_t;
+
+STATIC mp_obj_t tuple_it_iternext(mp_obj_t self_in) {
+ mp_obj_tuple_it_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->cur < self->tuple->len) {
+ mp_obj_t o_out = self->tuple->items[self->cur];
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC mp_obj_t mp_obj_new_tuple_iterator(mp_obj_tuple_t *tuple, mp_uint_t cur) {
+ mp_obj_tuple_it_t *o = m_new_obj(mp_obj_tuple_it_t);
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = tuple_it_iternext;
+ o->tuple = tuple;
+ o->cur = cur;
+ return MP_OBJ_FROM_PTR(o);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objtuple.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,64 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJTUPLE_H__
+#define __MICROPY_INCLUDED_PY_OBJTUPLE_H__
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_tuple_t {
+ mp_obj_base_t base;
+ mp_uint_t len;
+ mp_obj_t items[];
+} mp_obj_tuple_t;
+
+typedef struct _mp_rom_obj_tuple_t {
+ mp_obj_base_t base;
+ mp_uint_t len;
+ mp_rom_obj_t items[];
+} mp_rom_obj_tuple_t;
+
+void mp_obj_tuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
+mp_obj_t mp_obj_tuple_unary_op(mp_uint_t op, mp_obj_t self_in);
+mp_obj_t mp_obj_tuple_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs);
+mp_obj_t mp_obj_tuple_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value);
+mp_obj_t mp_obj_tuple_getiter(mp_obj_t o_in);
+
+extern const mp_obj_type_t mp_type_attrtuple;
+
+#define MP_DEFINE_ATTRTUPLE(tuple_obj_name, fields, nitems, ...) \
+ const mp_rom_obj_tuple_t tuple_obj_name = { \
+ .base = {&mp_type_attrtuple}, \
+ .len = nitems, \
+ .items = { __VA_ARGS__ , MP_ROM_PTR((void*)fields) } \
+ }
+
+#if MICROPY_PY_COLLECTIONS
+void mp_obj_attrtuple_print_helper(const mp_print_t *print, const qstr *fields, mp_obj_tuple_t *o);
+#endif
+
+mp_obj_t mp_obj_new_attrtuple(const qstr *fields, mp_uint_t n, const mp_obj_t *items);
+
+#endif // __MICROPY_INCLUDED_PY_OBJTUPLE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objtype.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1130 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objtype.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+STATIC mp_obj_t static_class_method_make_new(const mp_obj_type_t *self_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+/******************************************************************************/
+// instance object
+
+STATIC mp_obj_t mp_obj_new_instance(const mp_obj_type_t *class, uint subobjs) {
+ mp_obj_instance_t *o = m_new_obj_var(mp_obj_instance_t, mp_obj_t, subobjs);
+ o->base.type = class;
+ mp_map_init(&o->members, 0);
+ mp_seq_clear(o->subobj, 0, subobjs, sizeof(*o->subobj));
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC int instance_count_native_bases(const mp_obj_type_t *type, const mp_obj_type_t **last_native_base) {
+ mp_uint_t len = type->bases_tuple->len;
+ mp_obj_t *items = type->bases_tuple->items;
+
+ int count = 0;
+ for (uint i = 0; i < len; i++) {
+ assert(MP_OBJ_IS_TYPE(items[i], &mp_type_type));
+ const mp_obj_type_t *bt = (const mp_obj_type_t *)MP_OBJ_TO_PTR(items[i]);
+ if (bt == &mp_type_object) {
+ // Not a "real" type
+ continue;
+ }
+ if (mp_obj_is_native_type(bt)) {
+ *last_native_base = bt;
+ count++;
+ } else {
+ count += instance_count_native_bases(bt, last_native_base);
+ }
+ }
+
+ return count;
+}
+
+// TODO
+// This implements depth-first left-to-right MRO, which is not compliant with Python3 MRO
+// http://python-history.blogspot.com/2010/06/method-resolution-order.html
+// https://www.python.org/download/releases/2.3/mro/
+//
+// will keep lookup->dest[0]'s value (should be MP_OBJ_NULL on invocation) if attribute
+// is not found
+// will set lookup->dest[0] to MP_OBJ_SENTINEL if special method was found in a native
+// type base via slot id (as specified by lookup->meth_offset). As there can be only one
+// native base, it's known that it applies to instance->subobj[0]. In most cases, we also
+// don't need to know which type it was - because instance->subobj[0] is of that type.
+// The only exception is when object is not yet constructed, then we need to know base
+// native type to construct its instance->subobj[0] from. But this case is handled via
+// instance_count_native_bases(), which returns a native base which it saw.
+struct class_lookup_data {
+ mp_obj_instance_t *obj;
+ qstr attr;
+ mp_uint_t meth_offset;
+ mp_obj_t *dest;
+ bool is_type;
+};
+
+STATIC void mp_obj_class_lookup(struct class_lookup_data *lookup, const mp_obj_type_t *type) {
+ assert(lookup->dest[0] == MP_OBJ_NULL);
+ assert(lookup->dest[1] == MP_OBJ_NULL);
+ for (;;) {
+ // Optimize special method lookup for native types
+ // This avoids extra method_name => slot lookup. On the other hand,
+ // this should not be applied to class types, as will result in extra
+ // lookup either.
+ if (lookup->meth_offset != 0 && mp_obj_is_native_type(type)) {
+ if (*(void**)((char*)type + lookup->meth_offset) != NULL) {
+ DEBUG_printf("mp_obj_class_lookup: matched special meth slot for %s\n", qstr_str(lookup->attr));
+ lookup->dest[0] = MP_OBJ_SENTINEL;
+ return;
+ }
+ }
+
+ if (type->locals_dict != NULL) {
+ // search locals_dict (the set of methods/attributes)
+ assert(type->locals_dict->base.type == &mp_type_dict); // MicroPython restriction, for now
+ mp_map_t *locals_map = &type->locals_dict->map;
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(lookup->attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ if (lookup->is_type) {
+ // If we look up a class method, we need to return original type for which we
+ // do a lookup, not a (base) type in which we found the class method.
+ const mp_obj_type_t *org_type = (const mp_obj_type_t*)lookup->obj;
+ mp_convert_member_lookup(MP_OBJ_NULL, org_type, elem->value, lookup->dest);
+ } else {
+ mp_obj_instance_t *obj = lookup->obj;
+ mp_obj_t obj_obj;
+ if (obj != NULL && mp_obj_is_native_type(type) && type != &mp_type_object /* object is not a real type */) {
+ // If we're dealing with native base class, then it applies to native sub-object
+ obj_obj = obj->subobj[0];
+ } else {
+ obj_obj = MP_OBJ_FROM_PTR(obj);
+ }
+ mp_convert_member_lookup(obj_obj, type, elem->value, lookup->dest);
+ }
+#if DEBUG_PRINT
+ printf("mp_obj_class_lookup: Returning: ");
+ mp_obj_print(lookup->dest[0], PRINT_REPR); printf(" ");
+ mp_obj_print(lookup->dest[1], PRINT_REPR); printf("\n");
+#endif
+ return;
+ }
+ }
+
+ // Previous code block takes care about attributes defined in .locals_dict,
+ // but some attributes of native types may be handled using .load_attr method,
+ // so make sure we try to lookup those too.
+ if (lookup->obj != NULL && !lookup->is_type && mp_obj_is_native_type(type) && type != &mp_type_object /* object is not a real type */) {
+ mp_load_method_maybe(lookup->obj->subobj[0], lookup->attr, lookup->dest);
+ if (lookup->dest[0] != MP_OBJ_NULL) {
+ return;
+ }
+ }
+
+ // attribute not found, keep searching base classes
+
+ // for a const struct, this entry might be NULL
+ if (type->bases_tuple == NULL) {
+ return;
+ }
+
+ mp_uint_t len = type->bases_tuple->len;
+ mp_obj_t *items = type->bases_tuple->items;
+ if (len == 0) {
+ return;
+ }
+ for (uint i = 0; i < len - 1; i++) {
+ assert(MP_OBJ_IS_TYPE(items[i], &mp_type_type));
+ mp_obj_type_t *bt = (mp_obj_type_t*)MP_OBJ_TO_PTR(items[i]);
+ if (bt == &mp_type_object) {
+ // Not a "real" type
+ continue;
+ }
+ mp_obj_class_lookup(lookup, bt);
+ if (lookup->dest[0] != MP_OBJ_NULL) {
+ return;
+ }
+ }
+
+ // search last base (simple tail recursion elimination)
+ assert(MP_OBJ_IS_TYPE(items[len - 1], &mp_type_type));
+ type = (mp_obj_type_t*)MP_OBJ_TO_PTR(items[len - 1]);
+ if (type == &mp_type_object) {
+ // Not a "real" type
+ return;
+ }
+ }
+}
+
+STATIC void instance_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ qstr meth = (kind == PRINT_STR) ? MP_QSTR___str__ : MP_QSTR___repr__;
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = meth,
+ .meth_offset = offsetof(mp_obj_type_t, print),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_NULL && kind == PRINT_STR) {
+ // If there's no __str__, fall back to __repr__
+ lookup.attr = MP_QSTR___repr__;
+ lookup.meth_offset = 0;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ }
+
+ if (member[0] == MP_OBJ_SENTINEL) {
+ // Handle Exception subclasses specially
+ if (mp_obj_is_native_exception_instance(self->subobj[0])) {
+ if (kind != PRINT_STR) {
+ mp_print_str(print, qstr_str(self->base.type->name));
+ }
+ mp_obj_print_helper(print, self->subobj[0], kind | PRINT_EXC_SUBCLASS);
+ } else {
+ mp_obj_print_helper(print, self->subobj[0], kind);
+ }
+ return;
+ }
+
+ if (member[0] != MP_OBJ_NULL) {
+ mp_obj_t r = mp_call_function_1(member[0], self_in);
+ mp_obj_print_helper(print, r, PRINT_STR);
+ return;
+ }
+
+ // TODO: CPython prints fully-qualified type name
+ mp_printf(print, "<%s object at %p>", mp_obj_get_type_str(self_in), self);
+}
+
+mp_obj_t mp_obj_instance_make_new(const mp_obj_type_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(mp_obj_is_instance_type(self));
+
+ const mp_obj_type_t *native_base;
+ uint num_native_bases = instance_count_native_bases(self, &native_base);
+ assert(num_native_bases < 2);
+
+ mp_obj_instance_t *o = MP_OBJ_TO_PTR(mp_obj_new_instance(self, num_native_bases));
+
+ // This executes only "__new__" part of obejection creation.
+ // TODO: This won't work will for classes with native bases.
+ // TODO: This is hack, should be resolved along the lines of
+ // https://github.com/micropython/micropython/issues/606#issuecomment-43685883
+ if (n_args == 1 && *args == MP_OBJ_SENTINEL) {
+ return MP_OBJ_FROM_PTR(o);
+ }
+
+ // look for __new__ function
+ mp_obj_t init_fn[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = NULL,
+ .attr = MP_QSTR___new__,
+ .meth_offset = offsetof(mp_obj_type_t, make_new),
+ .dest = init_fn,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self);
+
+ mp_obj_t new_ret = MP_OBJ_FROM_PTR(o);
+ if (init_fn[0] == MP_OBJ_SENTINEL) {
+ // Native type's constructor is what wins - it gets all our arguments,
+ // and none Python classes are initialized at all.
+ o->subobj[0] = native_base->make_new(native_base, n_args, n_kw, args);
+ } else if (init_fn[0] != MP_OBJ_NULL) {
+ // now call Python class __new__ function with all args
+ if (n_args == 0 && n_kw == 0) {
+ mp_obj_t args2[1] = {MP_OBJ_FROM_PTR(self)};
+ new_ret = mp_call_function_n_kw(init_fn[0], 1, 0, args2);
+ } else {
+ mp_obj_t *args2 = m_new(mp_obj_t, 1 + n_args + 2 * n_kw);
+ args2[0] = MP_OBJ_FROM_PTR(self);
+ memcpy(args2 + 1, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ new_ret = mp_call_function_n_kw(init_fn[0], n_args + 1, n_kw, args2);
+ m_del(mp_obj_t, args2, 1 + n_args + 2 * n_kw);
+ }
+
+ }
+
+ // https://docs.python.org/3.4/reference/datamodel.html#object.__new__
+ // "If __new__() does not return an instance of cls, then the new instance’s __init__() method will not be invoked."
+ if (mp_obj_get_type(new_ret) != self) {
+ return new_ret;
+ }
+
+ o = MP_OBJ_TO_PTR(new_ret);
+
+ // now call Python class __init__ function with all args
+ init_fn[0] = init_fn[1] = MP_OBJ_NULL;
+ lookup.obj = o;
+ lookup.attr = MP_QSTR___init__;
+ lookup.meth_offset = 0;
+ mp_obj_class_lookup(&lookup, self);
+ if (init_fn[0] != MP_OBJ_NULL) {
+ mp_obj_t init_ret;
+ if (n_args == 0 && n_kw == 0) {
+ init_ret = mp_call_method_n_kw(0, 0, init_fn);
+ } else {
+ mp_obj_t *args2 = m_new(mp_obj_t, 2 + n_args + 2 * n_kw);
+ args2[0] = init_fn[0];
+ args2[1] = init_fn[1];
+ memcpy(args2 + 2, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ init_ret = mp_call_method_n_kw(n_args, n_kw, args2);
+ m_del(mp_obj_t, args2, 2 + n_args + 2 * n_kw);
+ }
+ if (init_ret != mp_const_none) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "__init__() should return None"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "__init__() should return None, not '%s'", mp_obj_get_type_str(init_ret)));
+ }
+ }
+
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const qstr mp_unary_op_method_name[] = {
+ [MP_UNARY_OP_BOOL] = MP_QSTR___bool__,
+ [MP_UNARY_OP_LEN] = MP_QSTR___len__,
+ [MP_UNARY_OP_HASH] = MP_QSTR___hash__,
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ [MP_UNARY_OP_POSITIVE] = MP_QSTR___pos__,
+ [MP_UNARY_OP_NEGATIVE] = MP_QSTR___neg__,
+ [MP_UNARY_OP_INVERT] = MP_QSTR___invert__,
+ #endif
+ [MP_UNARY_OP_NOT] = MP_QSTR_, // don't need to implement this, used to make sure array has full size
+};
+
+STATIC mp_obj_t instance_unary_op(mp_uint_t op, mp_obj_t self_in) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ qstr op_name = mp_unary_op_method_name[op];
+ /* Still try to lookup native slot
+ if (op_name == 0) {
+ return MP_OBJ_NULL;
+ }
+ */
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = op_name,
+ .meth_offset = offsetof(mp_obj_type_t, unary_op),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_SENTINEL) {
+ return mp_unary_op(op, self->subobj[0]);
+ } else if (member[0] != MP_OBJ_NULL) {
+ mp_obj_t val = mp_call_function_1(member[0], self_in);
+ // __hash__ must return a small int
+ if (op == MP_UNARY_OP_HASH) {
+ val = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int_truncated(val));
+ }
+ return val;
+ } else {
+ if (op == MP_UNARY_OP_HASH) {
+ lookup.attr = MP_QSTR___eq__;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_NULL) {
+ // https://docs.python.org/3/reference/datamodel.html#object.__hash__
+ // "User-defined classes have __eq__() and __hash__() methods by default;
+ // with them, all objects compare unequal (except with themselves) and
+ // x.__hash__() returns an appropriate value such that x == y implies
+ // both that x is y and hash(x) == hash(y)."
+ return MP_OBJ_NEW_SMALL_INT((mp_uint_t)self_in);
+ }
+ // "A class that overrides __eq__() and does not define __hash__() will have its __hash__() implicitly set to None.
+ // When the __hash__() method of a class is None, instances of the class will raise an appropriate TypeError"
+ }
+
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+const qstr mp_binary_op_method_name[] = {
+ /*
+ MP_BINARY_OP_OR,
+ MP_BINARY_OP_XOR,
+ MP_BINARY_OP_AND,
+ MP_BINARY_OP_LSHIFT,
+ MP_BINARY_OP_RSHIFT,
+ */
+ [MP_BINARY_OP_ADD] = MP_QSTR___add__,
+ [MP_BINARY_OP_SUBTRACT] = MP_QSTR___sub__,
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ [MP_BINARY_OP_MULTIPLY] = MP_QSTR___mul__,
+ [MP_BINARY_OP_FLOOR_DIVIDE] = MP_QSTR___floordiv__,
+ [MP_BINARY_OP_TRUE_DIVIDE] = MP_QSTR___truediv__,
+ #endif
+ /*
+ MP_BINARY_OP_MODULO,
+ MP_BINARY_OP_POWER,
+ MP_BINARY_OP_DIVMOD,
+ MP_BINARY_OP_INPLACE_OR,
+ MP_BINARY_OP_INPLACE_XOR,
+ MP_BINARY_OP_INPLACE_AND,
+ MP_BINARY_OP_INPLACE_LSHIFT,
+ MP_BINARY_OP_INPLACE_RSHIFT,*/
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ [MP_BINARY_OP_INPLACE_ADD] = MP_QSTR___iadd__,
+ [MP_BINARY_OP_INPLACE_SUBTRACT] = MP_QSTR___isub__,
+ #endif
+ /*MP_BINARY_OP_INPLACE_MULTIPLY,
+ MP_BINARY_OP_INPLACE_FLOOR_DIVIDE,
+ MP_BINARY_OP_INPLACE_TRUE_DIVIDE,
+ MP_BINARY_OP_INPLACE_MODULO,
+ MP_BINARY_OP_INPLACE_POWER,*/
+ [MP_BINARY_OP_LESS] = MP_QSTR___lt__,
+ [MP_BINARY_OP_MORE] = MP_QSTR___gt__,
+ [MP_BINARY_OP_EQUAL] = MP_QSTR___eq__,
+ [MP_BINARY_OP_LESS_EQUAL] = MP_QSTR___le__,
+ [MP_BINARY_OP_MORE_EQUAL] = MP_QSTR___ge__,
+ /*
+ MP_BINARY_OP_NOT_EQUAL, // a != b calls a == b and inverts result
+ */
+ [MP_BINARY_OP_IN] = MP_QSTR___contains__,
+ /*
+ MP_BINARY_OP_IS,
+ */
+ [MP_BINARY_OP_EXCEPTION_MATCH] = MP_QSTR_, // not implemented, used to make sure array has full size
+};
+
+STATIC mp_obj_t instance_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ // Note: For ducktyping, CPython does not look in the instance members or use
+ // __getattr__ or __getattribute__. It only looks in the class dictionary.
+ mp_obj_instance_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+ qstr op_name = mp_binary_op_method_name[op];
+ /* Still try to lookup native slot
+ if (op_name == 0) {
+ return MP_OBJ_NULL;
+ }
+ */
+ mp_obj_t dest[3] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = lhs,
+ .attr = op_name,
+ .meth_offset = offsetof(mp_obj_type_t, binary_op),
+ .dest = dest,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, lhs->base.type);
+ if (dest[0] == MP_OBJ_SENTINEL) {
+ return mp_binary_op(op, lhs->subobj[0], rhs_in);
+ } else if (dest[0] != MP_OBJ_NULL) {
+ dest[2] = rhs_in;
+ return mp_call_method_n_kw(1, 0, dest);
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ // logic: look in instance members then class locals
+ assert(mp_obj_is_instance_type(mp_obj_get_type(self_in)));
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ // object member, always treated as a value
+ // TODO should we check for properties?
+ dest[0] = elem->value;
+ return;
+ }
+#if MICROPY_CPYTHON_COMPAT
+ if (attr == MP_QSTR___dict__) {
+ // Create a new dict with a copy of the instance's map items.
+ // This creates, unlike CPython, a 'read-only' __dict__: modifying
+ // it will not result in modifications to the actual instance members.
+ mp_map_t *map = &self->members;
+ mp_obj_t attr_dict = mp_obj_new_dict(map->used);
+ for (mp_uint_t i = 0; i < map->alloc; ++i) {
+ if (MP_MAP_SLOT_IS_FILLED(map, i)) {
+ mp_obj_dict_store(attr_dict, map->table[i].key, map->table[i].value);
+ }
+ }
+ dest[0] = attr_dict;
+ return;
+ }
+#endif
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = dest,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ mp_obj_t member = dest[0];
+ if (member != MP_OBJ_NULL) {
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ if (MP_OBJ_IS_TYPE(member, &mp_type_property)) {
+ // object member is a property; delegate the load to the property
+ // Note: This is an optimisation for code size and execution time.
+ // The proper way to do it is have the functionality just below
+ // in a __get__ method of the property object, and then it would
+ // be called by the descriptor code down below. But that way
+ // requires overhead for the nested mp_call's and overhead for
+ // the code.
+ const mp_obj_t *proxy = mp_obj_property_get(member);
+ if (proxy[0] == mp_const_none) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_AttributeError, "unreadable attribute"));
+ } else {
+ dest[0] = mp_call_function_n_kw(proxy[0], 1, 0, &self_in);
+ }
+ return;
+ }
+ #endif
+
+ #if MICROPY_PY_DESCRIPTORS
+ // found a class attribute; if it has a __get__ method then call it with the
+ // class instance and class as arguments and return the result
+ // Note that this is functionally correct but very slow: each load_attr
+ // requires an extra mp_load_method_maybe to check for the __get__.
+ mp_obj_t attr_get_method[4];
+ mp_load_method_maybe(member, MP_QSTR___get__, attr_get_method);
+ if (attr_get_method[0] != MP_OBJ_NULL) {
+ attr_get_method[2] = self_in;
+ attr_get_method[3] = MP_OBJ_FROM_PTR(mp_obj_get_type(self_in));
+ dest[0] = mp_call_method_n_kw(2, 0, attr_get_method);
+ }
+ #endif
+ return;
+ }
+
+ // try __getattr__
+ if (attr != MP_QSTR___getattr__) {
+ mp_obj_t dest2[3];
+ mp_load_method_maybe(self_in, MP_QSTR___getattr__, dest2);
+ if (dest2[0] != MP_OBJ_NULL) {
+ // __getattr__ exists, call it and return its result
+ // XXX if this fails to load the requested attr, should we catch the attribute error and return silently?
+ dest2[2] = MP_OBJ_NEW_QSTR(attr);
+ dest[0] = mp_call_method_n_kw(1, 0, dest2);
+ return;
+ }
+ }
+}
+
+STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t value) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+
+ #if MICROPY_PY_BUILTINS_PROPERTY || MICROPY_PY_DESCRIPTORS
+ // With property and/or descriptors enabled we need to do a lookup
+ // first in the class dict for the attribute to see if the store should
+ // be delegated.
+ // Note: this makes all stores slow... how to fix?
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+
+ if (member[0] != MP_OBJ_NULL) {
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ if (MP_OBJ_IS_TYPE(member[0], &mp_type_property)) {
+ // attribute exists and is a property; delegate the store/delete
+ // Note: This is an optimisation for code size and execution time.
+ // The proper way to do it is have the functionality just below in
+ // a __set__/__delete__ method of the property object, and then it
+ // would be called by the descriptor code down below. But that way
+ // requires overhead for the nested mp_call's and overhead for
+ // the code.
+ const mp_obj_t *proxy = mp_obj_property_get(member[0]);
+ mp_obj_t dest[2] = {self_in, value};
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ if (proxy[2] == mp_const_none) {
+ // TODO better error message?
+ return false;
+ } else {
+ mp_call_function_n_kw(proxy[2], 1, 0, dest);
+ return true;
+ }
+ } else {
+ // store attribute
+ if (proxy[1] == mp_const_none) {
+ // TODO better error message?
+ return false;
+ } else {
+ mp_call_function_n_kw(proxy[1], 2, 0, dest);
+ return true;
+ }
+ }
+ }
+ #endif
+
+ #if MICROPY_PY_DESCRIPTORS
+ // found a class attribute; if it has a __set__/__delete__ method then
+ // call it with the class instance (and value) as arguments
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ mp_obj_t attr_delete_method[3];
+ mp_load_method_maybe(member[0], MP_QSTR___delete__, attr_delete_method);
+ if (attr_delete_method[0] != MP_OBJ_NULL) {
+ attr_delete_method[2] = self_in;
+ mp_call_method_n_kw(1, 0, attr_delete_method);
+ return true;
+ }
+ } else {
+ // store attribute
+ mp_obj_t attr_set_method[4];
+ mp_load_method_maybe(member[0], MP_QSTR___set__, attr_set_method);
+ if (attr_set_method[0] != MP_OBJ_NULL) {
+ attr_set_method[2] = self_in;
+ attr_set_method[3] = value;
+ mp_call_method_n_kw(2, 0, attr_set_method);
+ return true;
+ }
+ }
+ #endif
+ }
+ #endif
+
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ return elem != NULL;
+ } else {
+ // store attribute
+ mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ return true;
+ }
+}
+
+void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] == MP_OBJ_NULL) {
+ mp_obj_instance_load_attr(self_in, attr, dest);
+ } else {
+ if (mp_obj_instance_store_attr(self_in, attr, dest[1])) {
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ }
+}
+
+STATIC mp_obj_t instance_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .meth_offset = offsetof(mp_obj_type_t, subscr),
+ .dest = member,
+ .is_type = false,
+ };
+ uint meth_args;
+ if (value == MP_OBJ_NULL) {
+ // delete item
+ lookup.attr = MP_QSTR___delitem__;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ meth_args = 2;
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load item
+ lookup.attr = MP_QSTR___getitem__;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ meth_args = 2;
+ } else {
+ // store item
+ lookup.attr = MP_QSTR___setitem__;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ meth_args = 3;
+ }
+ if (member[0] == MP_OBJ_SENTINEL) {
+ return mp_obj_subscr(self->subobj[0], index, value);
+ } else if (member[0] != MP_OBJ_NULL) {
+ mp_obj_t args[3] = {self_in, index, value};
+ // TODO probably need to call mp_convert_member_lookup, and use mp_call_method_n_kw
+ mp_obj_t ret = mp_call_function_n_kw(member[0], meth_args, 0, args);
+ if (value == MP_OBJ_SENTINEL) {
+ return ret;
+ } else {
+ return mp_const_none;
+ }
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t mp_obj_instance_get_call(mp_obj_t self_in) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[2] = {MP_OBJ_NULL, MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = MP_QSTR___call__,
+ .meth_offset = offsetof(mp_obj_type_t, call),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ return member[0];
+}
+
+bool mp_obj_instance_is_callable(mp_obj_t self_in) {
+ return mp_obj_instance_get_call(self_in) != MP_OBJ_NULL;
+}
+
+mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_t call = mp_obj_instance_get_call(self_in);
+ if (call == MP_OBJ_NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object not callable"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not callable", mp_obj_get_type_str(self_in)));
+ }
+ }
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (call == MP_OBJ_SENTINEL) {
+ return mp_call_function_n_kw(self->subobj[0], n_args, n_kw, args);
+ }
+ mp_obj_t meth = mp_obj_new_bound_meth(call, self_in);
+ return mp_call_function_n_kw(meth, n_args, n_kw, args);
+}
+
+STATIC mp_obj_t instance_getiter(mp_obj_t self_in) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = MP_QSTR___iter__,
+ .meth_offset = offsetof(mp_obj_type_t, getiter),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_NULL) {
+ return MP_OBJ_NULL;
+ } else if (member[0] == MP_OBJ_SENTINEL) {
+ mp_obj_type_t *type = mp_obj_get_type(self->subobj[0]);
+ return type->getiter(self->subobj[0]);
+ } else {
+ return mp_call_method_n_kw(0, 0, member);
+ }
+}
+
+STATIC mp_int_t instance_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = MP_QSTR_, // don't actually look for a method
+ .meth_offset = offsetof(mp_obj_type_t, buffer_p.get_buffer),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_SENTINEL) {
+ mp_obj_type_t *type = mp_obj_get_type(self->subobj[0]);
+ return type->buffer_p.get_buffer(self->subobj[0], bufinfo, flags);
+ } else {
+ return 1; // object does not support buffer protocol
+ }
+}
+
+/******************************************************************************/
+// type object
+// - the struct is mp_obj_type_t and is defined in obj.h so const types can be made
+// - there is a constant mp_obj_type_t (called mp_type_type) for the 'type' object
+// - creating a new class (a new type) creates a new mp_obj_type_t
+
+STATIC void type_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "<class '%q'>", self->name);
+}
+
+STATIC mp_obj_t type_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ mp_arg_check_num(n_args, n_kw, 1, 3, false);
+
+ switch (n_args) {
+ case 1:
+ return MP_OBJ_FROM_PTR(mp_obj_get_type(args[0]));
+
+ case 3:
+ // args[0] = name
+ // args[1] = bases tuple
+ // args[2] = locals dict
+ return mp_obj_new_type(mp_obj_str_get_qstr(args[0]), args[1], args[2]);
+
+ default:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "type takes 1 or 3 arguments"));
+ }
+}
+
+STATIC mp_obj_t type_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ // instantiate an instance of a class
+
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->make_new == NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "cannot create instance"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "cannot create '%q' instances", self->name));
+ }
+ }
+
+ // make new instance
+ mp_obj_t o = self->make_new(self, n_args, n_kw, args);
+
+ // return new instance
+ return o;
+}
+
+STATIC void type_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_type));
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ #if MICROPY_CPYTHON_COMPAT
+ if (attr == MP_QSTR___name__) {
+ dest[0] = MP_OBJ_NEW_QSTR(self->name);
+ return;
+ }
+ #endif
+ struct class_lookup_data lookup = {
+ .obj = (mp_obj_instance_t*)self,
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = dest,
+ .is_type = true,
+ };
+ mp_obj_class_lookup(&lookup, self);
+ } else {
+ // delete/store attribute
+
+ // TODO CPython allows STORE_ATTR to a class, but is this the correct implementation?
+
+ if (self->locals_dict != NULL) {
+ assert(self->locals_dict->base.type == &mp_type_dict); // MicroPython restriction, for now
+ mp_map_t *locals_map = &self->locals_dict->map;
+ if (dest[1] == MP_OBJ_NULL) {
+ // delete attribute
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ // note that locals_map may be in ROM, so remove will fail in that case
+ if (elem != NULL) {
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ } else {
+ // store attribute
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ // note that locals_map may be in ROM, so add will fail in that case
+ if (elem != NULL) {
+ elem->value = dest[1];
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ }
+ }
+ }
+}
+
+const mp_obj_type_t mp_type_type = {
+ { &mp_type_type },
+ .name = MP_QSTR_type,
+ .print = type_print,
+ .make_new = type_make_new,
+ .call = type_call,
+ .unary_op = mp_generic_unary_op,
+ .attr = type_attr,
+};
+
+mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict) {
+ assert(MP_OBJ_IS_TYPE(bases_tuple, &mp_type_tuple)); // Micro Python restriction, for now
+ assert(MP_OBJ_IS_TYPE(locals_dict, &mp_type_dict)); // Micro Python restriction, for now
+
+ // TODO might need to make a copy of locals_dict; at least that's how CPython does it
+
+ // Basic validation of base classes
+ mp_uint_t len;
+ mp_obj_t *items;
+ mp_obj_tuple_get(bases_tuple, &len, &items);
+ for (uint i = 0; i < len; i++) {
+ assert(MP_OBJ_IS_TYPE(items[i], &mp_type_type));
+ mp_obj_type_t *t = MP_OBJ_TO_PTR(items[i]);
+ // TODO: Verify with CPy, tested on function type
+ if (t->make_new == NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "type is not an acceptable base type"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "type '%q' is not an acceptable base type", t->name));
+ }
+ }
+ }
+
+ mp_obj_type_t *o = m_new0(mp_obj_type_t, 1);
+ o->base.type = &mp_type_type;
+ o->name = name;
+ o->print = instance_print;
+ o->make_new = mp_obj_instance_make_new;
+ o->call = mp_obj_instance_call;
+ o->unary_op = instance_unary_op;
+ o->binary_op = instance_binary_op;
+ o->attr = mp_obj_instance_attr;
+ o->subscr = instance_subscr;
+ o->getiter = instance_getiter;
+ //o->iternext = ; not implemented
+ o->buffer_p.get_buffer = instance_get_buffer;
+ //o->stream_p = ; not implemented
+ o->bases_tuple = MP_OBJ_TO_PTR(bases_tuple);
+ o->locals_dict = MP_OBJ_TO_PTR(locals_dict);
+
+ const mp_obj_type_t *native_base;
+ uint num_native_bases = instance_count_native_bases(o, &native_base);
+ if (num_native_bases > 1) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "multiple bases have instance lay-out conflict"));
+ }
+
+ mp_map_t *locals_map = &o->locals_dict->map;
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(MP_QSTR___new__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ // __new__ slot exists; check if it is a function
+ if (MP_OBJ_IS_FUN(elem->value)) {
+ // __new__ is a function, wrap it in a staticmethod decorator
+ elem->value = static_class_method_make_new(&mp_type_staticmethod, 1, 0, &elem->value);
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+// super object
+
+typedef struct _mp_obj_super_t {
+ mp_obj_base_t base;
+ mp_obj_t type;
+ mp_obj_t obj;
+} mp_obj_super_t;
+
+STATIC void super_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_super_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_print_str(print, "<super: ");
+ mp_obj_print_helper(print, self->type, PRINT_STR);
+ mp_print_str(print, ", ");
+ mp_obj_print_helper(print, self->obj, PRINT_STR);
+ mp_print_str(print, ">");
+}
+
+STATIC mp_obj_t super_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ // 0 arguments are turned into 2 in the compiler
+ // 1 argument is not yet implemented
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ return mp_obj_new_super(args[0], args[1]);
+}
+
+STATIC void super_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_super));
+ mp_obj_super_t *self = MP_OBJ_TO_PTR(self_in);
+
+ assert(MP_OBJ_IS_TYPE(self->type, &mp_type_type));
+
+ mp_obj_type_t *type = MP_OBJ_TO_PTR(self->type);
+
+ // for a const struct, this entry might be NULL
+ if (type->bases_tuple == NULL) {
+ return;
+ }
+
+ mp_uint_t len = type->bases_tuple->len;
+ mp_obj_t *items = type->bases_tuple->items;
+ struct class_lookup_data lookup = {
+ .obj = MP_OBJ_TO_PTR(self->obj),
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = dest,
+ .is_type = false,
+ };
+ for (uint i = 0; i < len; i++) {
+ assert(MP_OBJ_IS_TYPE(items[i], &mp_type_type));
+ mp_obj_class_lookup(&lookup, (mp_obj_type_t*)MP_OBJ_TO_PTR(items[i]));
+ if (dest[0] != MP_OBJ_NULL) {
+ return;
+ }
+ }
+ mp_obj_class_lookup(&lookup, &mp_type_object);
+}
+
+const mp_obj_type_t mp_type_super = {
+ { &mp_type_type },
+ .name = MP_QSTR_super,
+ .print = super_print,
+ .make_new = super_make_new,
+ .attr = super_attr,
+};
+
+mp_obj_t mp_obj_new_super(mp_obj_t type, mp_obj_t obj) {
+ mp_obj_super_t *o = m_new_obj(mp_obj_super_t);
+ *o = (mp_obj_super_t){{&mp_type_super}, type, obj};
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+// subclassing and built-ins specific to types
+
+// object and classinfo should be type objects
+// (but the function will fail gracefully if they are not)
+bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo) {
+ for (;;) {
+ if (object == classinfo) {
+ return true;
+ }
+
+ // not equivalent classes, keep searching base classes
+
+ // object should always be a type object, but just return false if it's not
+ if (!MP_OBJ_IS_TYPE(object, &mp_type_type)) {
+ return false;
+ }
+
+ const mp_obj_type_t *self = MP_OBJ_TO_PTR(object);
+
+ // for a const struct, this entry might be NULL
+ if (self->bases_tuple == NULL) {
+ return false;
+ }
+
+ // get the base objects (they should be type objects)
+ mp_uint_t len = self->bases_tuple->len;
+ mp_obj_t *items = self->bases_tuple->items;
+ if (len == 0) {
+ return false;
+ }
+
+ // iterate through the base objects
+ for (uint i = 0; i < len - 1; i++) {
+ if (mp_obj_is_subclass_fast(items[i], classinfo)) {
+ return true;
+ }
+ }
+
+ // search last base (simple tail recursion elimination)
+ object = items[len - 1];
+ }
+}
+
+STATIC mp_obj_t mp_obj_is_subclass(mp_obj_t object, mp_obj_t classinfo) {
+ mp_uint_t len;
+ mp_obj_t *items;
+ if (MP_OBJ_IS_TYPE(classinfo, &mp_type_type)) {
+ len = 1;
+ items = &classinfo;
+ } else if (MP_OBJ_IS_TYPE(classinfo, &mp_type_tuple)) {
+ mp_obj_tuple_get(classinfo, &len, &items);
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "issubclass() arg 2 must be a class or a tuple of classes"));
+ }
+
+ for (uint i = 0; i < len; i++) {
+ // We explicitly check for 'object' here since no-one explicitly derives from it
+ if (items[i] == MP_OBJ_FROM_PTR(&mp_type_object) || mp_obj_is_subclass_fast(object, items[i])) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+}
+
+STATIC mp_obj_t mp_builtin_issubclass(mp_obj_t object, mp_obj_t classinfo) {
+ if (!MP_OBJ_IS_TYPE(object, &mp_type_type)) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "issubclass() arg 1 must be a class"));
+ }
+ return mp_obj_is_subclass(object, classinfo);
+}
+
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_issubclass_obj, mp_builtin_issubclass);
+
+STATIC mp_obj_t mp_builtin_isinstance(mp_obj_t object, mp_obj_t classinfo) {
+ return mp_obj_is_subclass(MP_OBJ_FROM_PTR(mp_obj_get_type(object)), classinfo);
+}
+
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_isinstance_obj, mp_builtin_isinstance);
+
+mp_obj_t mp_instance_cast_to_native_base(mp_const_obj_t self_in, mp_const_obj_t native_type) {
+ mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+ if (!mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(self_type), native_type)) {
+ return MP_OBJ_NULL;
+ }
+ mp_obj_instance_t *self = (mp_obj_instance_t*)MP_OBJ_TO_PTR(self_in);
+ return self->subobj[0];
+}
+
+/******************************************************************************/
+// staticmethod and classmethod types (probably should go in a different file)
+
+STATIC mp_obj_t static_class_method_make_new(const mp_obj_type_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(self == &mp_type_staticmethod || self == &mp_type_classmethod);
+
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ mp_obj_static_class_method_t *o = m_new_obj(mp_obj_static_class_method_t);
+ *o = (mp_obj_static_class_method_t){{self}, args[0]};
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_staticmethod = {
+ { &mp_type_type },
+ .name = MP_QSTR_staticmethod,
+ .make_new = static_class_method_make_new,
+};
+
+const mp_obj_type_t mp_type_classmethod = {
+ { &mp_type_type },
+ .name = MP_QSTR_classmethod,
+ .make_new = static_class_method_make_new,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objtype.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,52 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJTYPE_H__
+#define __MICROPY_INCLUDED_PY_OBJTYPE_H__
+
+#include "py/obj.h"
+
+// instance object
+// creating an instance of a class makes one of these objects
+typedef struct _mp_obj_instance_t {
+ mp_obj_base_t base;
+ mp_map_t members;
+ mp_obj_t subobj[];
+ // TODO maybe cache __getattr__ and __setattr__ for efficient lookup of them
+} mp_obj_instance_t;
+
+// this needs to be exposed for MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE to work
+void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
+
+// these need to be exposed so mp_obj_is_callable can work correctly
+bool mp_obj_instance_is_callable(mp_obj_t self_in);
+mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+#define mp_obj_is_instance_type(type) ((type)->make_new == mp_obj_instance_make_new)
+#define mp_obj_is_native_type(type) ((type)->make_new != mp_obj_instance_make_new)
+// this needs to be exposed for the above macros to work correctly
+mp_obj_t mp_obj_instance_make_new(const mp_obj_type_t *self_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+#endif // __MICROPY_INCLUDED_PY_OBJTYPE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/objzip.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,76 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/objtuple.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_zip_t {
+ mp_obj_base_t base;
+ mp_uint_t n_iters;
+ mp_obj_t iters[];
+} mp_obj_zip_t;
+
+STATIC mp_obj_t zip_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);
+
+ mp_obj_zip_t *o = m_new_obj_var(mp_obj_zip_t, mp_obj_t, n_args);
+ o->base.type = type;
+ o->n_iters = n_args;
+ for (mp_uint_t i = 0; i < n_args; i++) {
+ o->iters[i] = mp_getiter(args[i]);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t zip_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_zip));
+ mp_obj_zip_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->n_iters == 0) {
+ return MP_OBJ_STOP_ITERATION;
+ }
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(self->n_iters, NULL));
+
+ for (mp_uint_t i = 0; i < self->n_iters; i++) {
+ mp_obj_t next = mp_iternext(self->iters[i]);
+ if (next == MP_OBJ_STOP_ITERATION) {
+ mp_obj_tuple_del(MP_OBJ_FROM_PTR(tuple));
+ return MP_OBJ_STOP_ITERATION;
+ }
+ tuple->items[i] = next;
+ }
+ return MP_OBJ_FROM_PTR(tuple);
+}
+
+const mp_obj_type_t mp_type_zip = {
+ { &mp_type_type },
+ .name = MP_QSTR_zip,
+ .make_new = zip_make_new,
+ .getiter = mp_identity,
+ .iternext = zip_iternext,
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/opmethods.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,52 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/runtime0.h"
+#include "py/builtin.h"
+
+STATIC mp_obj_t op_getitem(mp_obj_t self_in, mp_obj_t key_in) {
+ mp_obj_type_t *type = mp_obj_get_type(self_in);
+ return type->subscr(self_in, key_in, MP_OBJ_SENTINEL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_op_getitem_obj, op_getitem);
+
+STATIC mp_obj_t op_setitem(mp_obj_t self_in, mp_obj_t key_in, mp_obj_t value_in) {
+ mp_obj_type_t *type = mp_obj_get_type(self_in);
+ return type->subscr(self_in, key_in, value_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_3(mp_op_setitem_obj, op_setitem);
+
+STATIC mp_obj_t op_delitem(mp_obj_t self_in, mp_obj_t key_in) {
+ mp_obj_type_t *type = mp_obj_get_type(self_in);
+ return type->subscr(self_in, key_in, MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_op_delitem_obj, op_delitem);
+
+STATIC mp_obj_t op_contains(mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_type_t *type = mp_obj_get_type(lhs_in);
+ return type->binary_op(MP_BINARY_OP_IN, lhs_in, rhs_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_op_contains_obj, op_contains);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/parse.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1097 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h> // for ssize_t
+#include <assert.h>
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/lexer.h"
+#include "py/parse.h"
+#include "py/parsenum.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/objint.h"
+#include "py/builtin.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define RULE_ACT_ARG_MASK (0x0f)
+#define RULE_ACT_KIND_MASK (0x30)
+#define RULE_ACT_ALLOW_IDENT (0x40)
+#define RULE_ACT_ADD_BLANK (0x80)
+#define RULE_ACT_OR (0x10)
+#define RULE_ACT_AND (0x20)
+#define RULE_ACT_LIST (0x30)
+
+#define RULE_ARG_KIND_MASK (0xf000)
+#define RULE_ARG_ARG_MASK (0x0fff)
+#define RULE_ARG_TOK (0x1000)
+#define RULE_ARG_RULE (0x2000)
+#define RULE_ARG_OPT_RULE (0x3000)
+
+#define ADD_BLANK_NODE(rule) ((rule->act & RULE_ACT_ADD_BLANK) != 0)
+
+// (un)comment to use rule names; for debugging
+//#define USE_RULE_NAME (1)
+
+typedef struct _rule_t {
+ byte rule_id;
+ byte act;
+#ifdef USE_RULE_NAME
+ const char *rule_name;
+#endif
+ uint16_t arg[];
+} rule_t;
+
+enum {
+#define DEF_RULE(rule, comp, kind, ...) RULE_##rule,
+#include "py/grammar.h"
+#undef DEF_RULE
+ RULE_maximum_number_of,
+ RULE_string, // special node for non-interned string
+ RULE_bytes, // special node for non-interned bytes
+ RULE_const_object, // special node for a constant, generic Python object
+};
+
+#define ident (RULE_ACT_ALLOW_IDENT)
+#define blank (RULE_ACT_ADD_BLANK)
+#define or(n) (RULE_ACT_OR | n)
+#define and(n) (RULE_ACT_AND | n)
+#define one_or_more (RULE_ACT_LIST | 2)
+#define list (RULE_ACT_LIST | 1)
+#define list_with_end (RULE_ACT_LIST | 3)
+#define tok(t) (RULE_ARG_TOK | MP_TOKEN_##t)
+#define rule(r) (RULE_ARG_RULE | RULE_##r)
+#define opt_rule(r) (RULE_ARG_OPT_RULE | RULE_##r)
+#ifdef USE_RULE_NAME
+#define DEF_RULE(rule, comp, kind, ...) static const rule_t rule_##rule = { RULE_##rule, kind, #rule, { __VA_ARGS__ } };
+#else
+#define DEF_RULE(rule, comp, kind, ...) static const rule_t rule_##rule = { RULE_##rule, kind, { __VA_ARGS__ } };
+#endif
+#include "py/grammar.h"
+#undef or
+#undef and
+#undef list
+#undef list_with_end
+#undef tok
+#undef rule
+#undef opt_rule
+#undef one_or_more
+#undef DEF_RULE
+
+STATIC const rule_t *rules[] = {
+#define DEF_RULE(rule, comp, kind, ...) &rule_##rule,
+#include "py/grammar.h"
+#undef DEF_RULE
+};
+
+typedef struct _rule_stack_t {
+ size_t src_line : 8 * sizeof(size_t) - 8; // maximum bits storing source line number
+ size_t rule_id : 8; // this must be large enough to fit largest rule number
+ size_t arg_i; // this dictates the maximum nodes in a "list" of things
+} rule_stack_t;
+
+typedef struct _mp_parse_chunk_t {
+ size_t alloc;
+ union {
+ size_t used;
+ struct _mp_parse_chunk_t *next;
+ } union_;
+ byte data[];
+} mp_parse_chunk_t;
+
+typedef enum {
+ PARSE_ERROR_NONE = 0,
+ PARSE_ERROR_MEMORY,
+ PARSE_ERROR_CONST,
+} parse_error_t;
+
+typedef struct _parser_t {
+ parse_error_t parse_error;
+
+ size_t rule_stack_alloc;
+ size_t rule_stack_top;
+ rule_stack_t *rule_stack;
+
+ size_t result_stack_alloc;
+ size_t result_stack_top;
+ mp_parse_node_t *result_stack;
+
+ mp_lexer_t *lexer;
+
+ mp_parse_tree_t tree;
+ mp_parse_chunk_t *cur_chunk;
+
+ #if MICROPY_COMP_CONST
+ mp_map_t consts;
+ #endif
+} parser_t;
+
+STATIC void *parser_alloc(parser_t *parser, size_t num_bytes) {
+ // use a custom memory allocator to store parse nodes sequentially in large chunks
+
+ mp_parse_chunk_t *chunk = parser->cur_chunk;
+
+ if (chunk != NULL && chunk->union_.used + num_bytes > chunk->alloc) {
+ // not enough room at end of previously allocated chunk so try to grow
+ mp_parse_chunk_t *new_data = (mp_parse_chunk_t*)m_renew_maybe(byte, chunk,
+ sizeof(mp_parse_chunk_t) + chunk->alloc,
+ sizeof(mp_parse_chunk_t) + chunk->alloc + num_bytes, false);
+ if (new_data == NULL) {
+ // could not grow existing memory; shrink it to fit previous
+ (void)m_renew_maybe(byte, chunk, sizeof(mp_parse_chunk_t) + chunk->alloc,
+ sizeof(mp_parse_chunk_t) + chunk->union_.used, false);
+ chunk->alloc = chunk->union_.used;
+ chunk->union_.next = parser->tree.chunk;
+ parser->tree.chunk = chunk;
+ chunk = NULL;
+ } else {
+ // could grow existing memory
+ chunk->alloc += num_bytes;
+ }
+ }
+
+ if (chunk == NULL) {
+ // no previous chunk, allocate a new chunk
+ size_t alloc = MICROPY_ALLOC_PARSE_CHUNK_INIT;
+ if (alloc < num_bytes) {
+ alloc = num_bytes;
+ }
+ chunk = (mp_parse_chunk_t*)m_new(byte, sizeof(mp_parse_chunk_t) + alloc);
+ chunk->alloc = alloc;
+ chunk->union_.used = 0;
+ parser->cur_chunk = chunk;
+ }
+
+ byte *ret = chunk->data + chunk->union_.used;
+ chunk->union_.used += num_bytes;
+ return ret;
+}
+
+STATIC void push_rule(parser_t *parser, size_t src_line, const rule_t *rule, size_t arg_i) {
+ if (parser->parse_error) {
+ return;
+ }
+ if (parser->rule_stack_top >= parser->rule_stack_alloc) {
+ rule_stack_t *rs = m_renew_maybe(rule_stack_t, parser->rule_stack, parser->rule_stack_alloc, parser->rule_stack_alloc + MICROPY_ALLOC_PARSE_RULE_INC, true);
+ if (rs == NULL) {
+ parser->parse_error = PARSE_ERROR_MEMORY;
+ return;
+ }
+ parser->rule_stack = rs;
+ parser->rule_stack_alloc += MICROPY_ALLOC_PARSE_RULE_INC;
+ }
+ rule_stack_t *rs = &parser->rule_stack[parser->rule_stack_top++];
+ rs->src_line = src_line;
+ rs->rule_id = rule->rule_id;
+ rs->arg_i = arg_i;
+}
+
+STATIC void push_rule_from_arg(parser_t *parser, size_t arg) {
+ assert((arg & RULE_ARG_KIND_MASK) == RULE_ARG_RULE || (arg & RULE_ARG_KIND_MASK) == RULE_ARG_OPT_RULE);
+ size_t rule_id = arg & RULE_ARG_ARG_MASK;
+ assert(rule_id < RULE_maximum_number_of);
+ push_rule(parser, parser->lexer->tok_line, rules[rule_id], 0);
+}
+
+STATIC void pop_rule(parser_t *parser, const rule_t **rule, size_t *arg_i, size_t *src_line) {
+ assert(!parser->parse_error);
+ parser->rule_stack_top -= 1;
+ *rule = rules[parser->rule_stack[parser->rule_stack_top].rule_id];
+ *arg_i = parser->rule_stack[parser->rule_stack_top].arg_i;
+ *src_line = parser->rule_stack[parser->rule_stack_top].src_line;
+}
+
+mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg) {
+ if (kind == MP_PARSE_NODE_SMALL_INT) {
+ return (mp_parse_node_t)(kind | (arg << 1));
+ }
+ return (mp_parse_node_t)(kind | (arg << 4));
+}
+
+bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o) {
+ if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+ *o = MP_OBJ_NEW_SMALL_INT(MP_PARSE_NODE_LEAF_SMALL_INT(pn));
+ return true;
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, RULE_const_object)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // nodes are 32-bit pointers, but need to extract 64-bit object
+ *o = (uint64_t)pns->nodes[0] | ((uint64_t)pns->nodes[1] << 32);
+ #else
+ *o = (mp_obj_t)pns->nodes[0];
+ #endif
+ return MP_OBJ_IS_INT(*o);
+ } else {
+ return false;
+ }
+}
+
+int mp_parse_node_extract_list(mp_parse_node_t *pn, size_t pn_kind, mp_parse_node_t **nodes) {
+ if (MP_PARSE_NODE_IS_NULL(*pn)) {
+ *nodes = NULL;
+ return 0;
+ } else if (MP_PARSE_NODE_IS_LEAF(*pn)) {
+ *nodes = pn;
+ return 1;
+ } else {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)(*pn);
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) != pn_kind) {
+ *nodes = pn;
+ return 1;
+ } else {
+ *nodes = pns->nodes;
+ return MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ }
+ }
+}
+
+#if MICROPY_DEBUG_PRINTERS
+void mp_parse_node_print(mp_parse_node_t pn, size_t indent) {
+ if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ printf("[% 4d] ", (int)((mp_parse_node_struct_t*)pn)->source_line);
+ } else {
+ printf(" ");
+ }
+ for (size_t i = 0; i < indent; i++) {
+ printf(" ");
+ }
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ printf("NULL\n");
+ } else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+ mp_int_t arg = MP_PARSE_NODE_LEAF_SMALL_INT(pn);
+ printf("int(" INT_FMT ")\n", arg);
+ } else if (MP_PARSE_NODE_IS_LEAF(pn)) {
+ uintptr_t arg = MP_PARSE_NODE_LEAF_ARG(pn);
+ switch (MP_PARSE_NODE_LEAF_KIND(pn)) {
+ case MP_PARSE_NODE_ID: printf("id(%s)\n", qstr_str(arg)); break;
+ case MP_PARSE_NODE_STRING: printf("str(%s)\n", qstr_str(arg)); break;
+ case MP_PARSE_NODE_BYTES: printf("bytes(%s)\n", qstr_str(arg)); break;
+ case MP_PARSE_NODE_TOKEN: printf("tok(%u)\n", (uint)arg); break;
+ default: assert(0);
+ }
+ } else {
+ // node must be a mp_parse_node_struct_t
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_string) {
+ printf("literal str(%.*s)\n", (int)pns->nodes[1], (char*)pns->nodes[0]);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_bytes) {
+ printf("literal bytes(%.*s)\n", (int)pns->nodes[1], (char*)pns->nodes[0]);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_const_object) {
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ printf("literal const(%016llx)\n", (uint64_t)pns->nodes[0] | ((uint64_t)pns->nodes[1] << 32));
+ #else
+ printf("literal const(%p)\n", (mp_obj_t)pns->nodes[0]);
+ #endif
+ } else {
+ size_t n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+#ifdef USE_RULE_NAME
+ printf("%s(%u) (n=%u)\n", rules[MP_PARSE_NODE_STRUCT_KIND(pns)]->rule_name, (uint)MP_PARSE_NODE_STRUCT_KIND(pns), (uint)n);
+#else
+ printf("rule(%u) (n=%u)\n", (uint)MP_PARSE_NODE_STRUCT_KIND(pns), (uint)n);
+#endif
+ for (size_t i = 0; i < n; i++) {
+ mp_parse_node_print(pns->nodes[i], indent + 2);
+ }
+ }
+ }
+}
+#endif // MICROPY_DEBUG_PRINTERS
+
+/*
+STATIC void result_stack_show(parser_t *parser) {
+ printf("result stack, most recent first\n");
+ for (ssize_t i = parser->result_stack_top - 1; i >= 0; i--) {
+ mp_parse_node_print(parser->result_stack[i], 0);
+ }
+}
+*/
+
+STATIC mp_parse_node_t pop_result(parser_t *parser) {
+ if (parser->parse_error) {
+ return MP_PARSE_NODE_NULL;
+ }
+ assert(parser->result_stack_top > 0);
+ return parser->result_stack[--parser->result_stack_top];
+}
+
+STATIC mp_parse_node_t peek_result(parser_t *parser, size_t pos) {
+ if (parser->parse_error) {
+ return MP_PARSE_NODE_NULL;
+ }
+ assert(parser->result_stack_top > pos);
+ return parser->result_stack[parser->result_stack_top - 1 - pos];
+}
+
+STATIC void push_result_node(parser_t *parser, mp_parse_node_t pn) {
+ if (parser->parse_error) {
+ return;
+ }
+ if (parser->result_stack_top >= parser->result_stack_alloc) {
+ mp_parse_node_t *stack = m_renew_maybe(mp_parse_node_t, parser->result_stack, parser->result_stack_alloc, parser->result_stack_alloc + MICROPY_ALLOC_PARSE_RESULT_INC, true);
+ if (stack == NULL) {
+ parser->parse_error = PARSE_ERROR_MEMORY;
+ return;
+ }
+ parser->result_stack = stack;
+ parser->result_stack_alloc += MICROPY_ALLOC_PARSE_RESULT_INC;
+ }
+ parser->result_stack[parser->result_stack_top++] = pn;
+}
+
+STATIC mp_parse_node_t make_node_string_bytes(parser_t *parser, size_t src_line, size_t rule_kind, const char *str, size_t len) {
+ mp_parse_node_struct_t *pn = parser_alloc(parser, sizeof(mp_parse_node_struct_t) + sizeof(mp_parse_node_t) * 2);
+ if (pn == NULL) {
+ parser->parse_error = PARSE_ERROR_MEMORY;
+ return MP_PARSE_NODE_NULL;
+ }
+ pn->source_line = src_line;
+ pn->kind_num_nodes = rule_kind | (2 << 8);
+ char *p = m_new(char, len);
+ memcpy(p, str, len);
+ pn->nodes[0] = (uintptr_t)p;
+ pn->nodes[1] = len;
+ return (mp_parse_node_t)pn;
+}
+
+STATIC mp_parse_node_t make_node_const_object(parser_t *parser, size_t src_line, mp_obj_t obj) {
+ mp_parse_node_struct_t *pn = parser_alloc(parser, sizeof(mp_parse_node_struct_t) + sizeof(mp_obj_t));
+ if (pn == NULL) {
+ parser->parse_error = PARSE_ERROR_MEMORY;
+ return MP_PARSE_NODE_NULL;
+ }
+ pn->source_line = src_line;
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // nodes are 32-bit pointers, but need to store 64-bit object
+ pn->kind_num_nodes = RULE_const_object | (2 << 8);
+ pn->nodes[0] = (uint64_t)obj;
+ pn->nodes[1] = (uint64_t)obj >> 32;
+ #else
+ pn->kind_num_nodes = RULE_const_object | (1 << 8);
+ pn->nodes[0] = (uintptr_t)obj;
+ #endif
+ return (mp_parse_node_t)pn;
+}
+
+STATIC void push_result_token(parser_t *parser) {
+ mp_parse_node_t pn;
+ mp_lexer_t *lex = parser->lexer;
+ if (lex->tok_kind == MP_TOKEN_NAME) {
+ qstr id = qstr_from_strn(lex->vstr.buf, lex->vstr.len);
+ #if MICROPY_COMP_CONST
+ // lookup identifier in table of dynamic constants
+ mp_map_elem_t *elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(elem->value));
+ } else
+ #endif
+ {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_ID, id);
+ }
+ } else if (lex->tok_kind == MP_TOKEN_INTEGER) {
+ mp_obj_t o = mp_parse_num_integer(lex->vstr.buf, lex->vstr.len, 0, lex);
+ if (MP_OBJ_IS_SMALL_INT(o)) {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(o));
+ } else {
+ pn = make_node_const_object(parser, lex->tok_line, o);
+ }
+ } else if (lex->tok_kind == MP_TOKEN_FLOAT_OR_IMAG) {
+ mp_obj_t o = mp_parse_num_decimal(lex->vstr.buf, lex->vstr.len, true, false, lex);
+ pn = make_node_const_object(parser, lex->tok_line, o);
+ } else if (lex->tok_kind == MP_TOKEN_STRING || lex->tok_kind == MP_TOKEN_BYTES) {
+ // Don't automatically intern all strings/bytes. doc strings (which are usually large)
+ // will be discarded by the compiler, and so we shouldn't intern them.
+ qstr qst = MP_QSTR_NULL;
+ if (lex->vstr.len <= MICROPY_ALLOC_PARSE_INTERN_STRING_LEN) {
+ // intern short strings
+ qst = qstr_from_strn(lex->vstr.buf, lex->vstr.len);
+ } else {
+ // check if this string is already interned
+ qst = qstr_find_strn(lex->vstr.buf, lex->vstr.len);
+ }
+ if (qst != MP_QSTR_NULL) {
+ // qstr exists, make a leaf node
+ pn = mp_parse_node_new_leaf(lex->tok_kind == MP_TOKEN_STRING ? MP_PARSE_NODE_STRING : MP_PARSE_NODE_BYTES, qst);
+ } else {
+ // not interned, make a node holding a pointer to the string/bytes data
+ pn = make_node_string_bytes(parser, lex->tok_line, lex->tok_kind == MP_TOKEN_STRING ? RULE_string : RULE_bytes, lex->vstr.buf, lex->vstr.len);
+ }
+ } else {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_TOKEN, lex->tok_kind);
+ }
+ push_result_node(parser, pn);
+}
+
+#if MICROPY_COMP_MODULE_CONST
+STATIC const mp_rom_map_elem_t mp_constants_table[] = {
+ #if MICROPY_PY_UCTYPES
+ { MP_ROM_QSTR(MP_QSTR_uctypes), MP_ROM_PTR(&mp_module_uctypes) },
+ #endif
+ // Extra constants as defined by a port
+ MICROPY_PORT_CONSTANTS
+};
+STATIC MP_DEFINE_CONST_MAP(mp_constants_map, mp_constants_table);
+#endif
+
+#if MICROPY_COMP_CONST_FOLDING
+STATIC bool fold_constants(parser_t *parser, const rule_t *rule, size_t num_args) {
+ // this code does folding of arbitrary integer expressions, eg 1 + 2 * 3 + 4
+ // it does not do partial folding, eg 1 + 2 + x -> 3 + x
+
+ mp_obj_t arg0;
+ if (rule->rule_id == RULE_expr
+ || rule->rule_id == RULE_xor_expr
+ || rule->rule_id == RULE_and_expr) {
+ // folding for binary ops: | ^ &
+ mp_parse_node_t pn = peek_result(parser, num_args - 1);
+ if (!mp_parse_node_get_int_maybe(pn, &arg0)) {
+ return false;
+ }
+ mp_binary_op_t op;
+ if (rule->rule_id == RULE_expr) {
+ op = MP_BINARY_OP_OR;
+ } else if (rule->rule_id == RULE_xor_expr) {
+ op = MP_BINARY_OP_XOR;
+ } else {
+ op = MP_BINARY_OP_AND;
+ }
+ for (ssize_t i = num_args - 2; i >= 0; --i) {
+ pn = peek_result(parser, i);
+ mp_obj_t arg1;
+ if (!mp_parse_node_get_int_maybe(pn, &arg1)) {
+ return false;
+ }
+ arg0 = mp_binary_op(op, arg0, arg1);
+ }
+ } else if (rule->rule_id == RULE_shift_expr
+ || rule->rule_id == RULE_arith_expr
+ || rule->rule_id == RULE_term) {
+ // folding for binary ops: << >> + - * / % //
+ mp_parse_node_t pn = peek_result(parser, num_args - 1);
+ if (!mp_parse_node_get_int_maybe(pn, &arg0)) {
+ return false;
+ }
+ for (ssize_t i = num_args - 2; i >= 1; i -= 2) {
+ pn = peek_result(parser, i - 1);
+ mp_obj_t arg1;
+ if (!mp_parse_node_get_int_maybe(pn, &arg1)) {
+ return false;
+ }
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(peek_result(parser, i));
+ static const uint8_t token_to_op[] = {
+ MP_BINARY_OP_ADD,
+ MP_BINARY_OP_SUBTRACT,
+ MP_BINARY_OP_MULTIPLY,
+ 255,//MP_BINARY_OP_POWER,
+ 255,//MP_BINARY_OP_TRUE_DIVIDE,
+ MP_BINARY_OP_FLOOR_DIVIDE,
+ MP_BINARY_OP_MODULO,
+ 255,//MP_BINARY_OP_LESS
+ MP_BINARY_OP_LSHIFT,
+ 255,//MP_BINARY_OP_MORE
+ MP_BINARY_OP_RSHIFT,
+ };
+ mp_binary_op_t op = token_to_op[tok - MP_TOKEN_OP_PLUS];
+ if (op == (mp_binary_op_t)255) {
+ return false;
+ }
+ int rhs_sign = mp_obj_int_sign(arg1);
+ if (op <= MP_BINARY_OP_RSHIFT) {
+ // << and >> can't have negative rhs
+ if (rhs_sign < 0) {
+ return false;
+ }
+ } else if (op >= MP_BINARY_OP_FLOOR_DIVIDE) {
+ // % and // can't have zero rhs
+ if (rhs_sign == 0) {
+ return false;
+ }
+ }
+ arg0 = mp_binary_op(op, arg0, arg1);
+ }
+ } else if (rule->rule_id == RULE_factor_2) {
+ // folding for unary ops: + - ~
+ mp_parse_node_t pn = peek_result(parser, 0);
+ if (!mp_parse_node_get_int_maybe(pn, &arg0)) {
+ return false;
+ }
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(peek_result(parser, 1));
+ mp_unary_op_t op;
+ if (tok == MP_TOKEN_OP_PLUS) {
+ op = MP_UNARY_OP_POSITIVE;
+ } else if (tok == MP_TOKEN_OP_MINUS) {
+ op = MP_UNARY_OP_NEGATIVE;
+ } else {
+ assert(tok == MP_TOKEN_OP_TILDE); // should be
+ op = MP_UNARY_OP_INVERT;
+ }
+ arg0 = mp_unary_op(op, arg0);
+
+ #if MICROPY_COMP_CONST
+ } else if (rule->rule_id == RULE_expr_stmt) {
+ mp_parse_node_t pn1 = peek_result(parser, 0);
+ if (!MP_PARSE_NODE_IS_NULL(pn1)
+ && !(MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_expr_stmt_augassign)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_expr_stmt_assign_list))) {
+ // this node is of the form <x> = <y>
+ mp_parse_node_t pn0 = peek_result(parser, 1);
+ if (MP_PARSE_NODE_IS_ID(pn0)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_power)
+ && MP_PARSE_NODE_IS_ID(((mp_parse_node_struct_t*)pn1)->nodes[0])
+ && MP_PARSE_NODE_LEAF_ARG(((mp_parse_node_struct_t*)pn1)->nodes[0]) == MP_QSTR_const
+ && MP_PARSE_NODE_IS_STRUCT_KIND(((mp_parse_node_struct_t*)pn1)->nodes[1], RULE_trailer_paren)
+ && MP_PARSE_NODE_IS_NULL(((mp_parse_node_struct_t*)pn1)->nodes[2])
+ ) {
+ // code to assign dynamic constants: id = const(value)
+
+ // get the id
+ qstr id = MP_PARSE_NODE_LEAF_ARG(pn0);
+
+ // get the value
+ mp_parse_node_t pn_value = ((mp_parse_node_struct_t*)((mp_parse_node_struct_t*)pn1)->nodes[1])->nodes[0];
+ if (!MP_PARSE_NODE_IS_SMALL_INT(pn_value)) {
+ parser->parse_error = PARSE_ERROR_CONST;
+ return false;
+ }
+ mp_int_t value = MP_PARSE_NODE_LEAF_SMALL_INT(pn_value);
+
+ // store the value in the table of dynamic constants
+ mp_map_elem_t *elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ assert(elem->value == MP_OBJ_NULL);
+ elem->value = MP_OBJ_NEW_SMALL_INT(value);
+
+ // replace const(value) with value
+ pop_result(parser);
+ push_result_node(parser, pn_value);
+
+ // finished folding this assignment, but we still want it to be part of the tree
+ return false;
+ }
+ }
+ return false;
+ #endif
+
+ #if MICROPY_COMP_MODULE_CONST
+ } else if (rule->rule_id == RULE_power) {
+ mp_parse_node_t pn0 = peek_result(parser, 2);
+ mp_parse_node_t pn1 = peek_result(parser, 1);
+ mp_parse_node_t pn2 = peek_result(parser, 0);
+ if (!(MP_PARSE_NODE_IS_ID(pn0)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_trailer_period)
+ && MP_PARSE_NODE_IS_NULL(pn2))) {
+ return false;
+ }
+ // id1.id2
+ // look it up in constant table, see if it can be replaced with an integer
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pn1;
+ assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+ qstr q_base = MP_PARSE_NODE_LEAF_ARG(pn0);
+ qstr q_attr = MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]);
+ mp_map_elem_t *elem = mp_map_lookup((mp_map_t*)&mp_constants_map, MP_OBJ_NEW_QSTR(q_base), MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ return false;
+ }
+ mp_obj_t dest[2];
+ mp_load_method_maybe(elem->value, q_attr, dest);
+ if (!(dest[0] != MP_OBJ_NULL && MP_OBJ_IS_INT(dest[0]) && dest[1] == MP_OBJ_NULL)) {
+ return false;
+ }
+ arg0 = dest[0];
+ #endif
+
+ } else {
+ return false;
+ }
+
+ // success folding this rule
+
+ for (size_t i = num_args; i > 0; i--) {
+ pop_result(parser);
+ }
+ if (MP_OBJ_IS_SMALL_INT(arg0)) {
+ push_result_node(parser, mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(arg0)));
+ } else {
+ // TODO reuse memory for parse node struct?
+ push_result_node(parser, make_node_const_object(parser, 0, arg0));
+ }
+
+ return true;
+}
+#endif
+
+STATIC void push_result_rule(parser_t *parser, size_t src_line, const rule_t *rule, size_t num_args) {
+ // optimise away parenthesis around an expression if possible
+ if (rule->rule_id == RULE_atom_paren) {
+ // there should be just 1 arg for this rule
+ mp_parse_node_t pn = peek_result(parser, 0);
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // need to keep parenthesis for ()
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, RULE_testlist_comp)) {
+ // need to keep parenthesis for (a, b, ...)
+ } else {
+ // parenthesis around a single expression, so it's just the expression
+ return;
+ }
+ }
+
+ #if MICROPY_COMP_CONST_FOLDING
+ if (fold_constants(parser, rule, num_args)) {
+ // we folded this rule so return straight away
+ return;
+ }
+ #endif
+
+ mp_parse_node_struct_t *pn = parser_alloc(parser, sizeof(mp_parse_node_struct_t) + sizeof(mp_parse_node_t) * num_args);
+ if (pn == NULL) {
+ parser->parse_error = PARSE_ERROR_MEMORY;
+ return;
+ }
+ pn->source_line = src_line;
+ pn->kind_num_nodes = (rule->rule_id & 0xff) | (num_args << 8);
+ for (size_t i = num_args; i > 0; i--) {
+ pn->nodes[i - 1] = pop_result(parser);
+ }
+ push_result_node(parser, (mp_parse_node_t)pn);
+}
+
+mp_parse_tree_t mp_parse(mp_lexer_t *lex, mp_parse_input_kind_t input_kind) {
+
+ // initialise parser and allocate memory for its stacks
+
+ parser_t parser;
+
+ parser.parse_error = PARSE_ERROR_NONE;
+
+ parser.rule_stack_alloc = MICROPY_ALLOC_PARSE_RULE_INIT;
+ parser.rule_stack_top = 0;
+ parser.rule_stack = m_new_maybe(rule_stack_t, parser.rule_stack_alloc);
+
+ parser.result_stack_alloc = MICROPY_ALLOC_PARSE_RESULT_INIT;
+ parser.result_stack_top = 0;
+ parser.result_stack = m_new_maybe(mp_parse_node_t, parser.result_stack_alloc);
+
+ parser.lexer = lex;
+
+ parser.tree.chunk = NULL;
+ parser.cur_chunk = NULL;
+
+ #if MICROPY_COMP_CONST
+ mp_map_init(&parser.consts, 0);
+ #endif
+
+ // check if we could allocate the stacks
+ if (parser.rule_stack == NULL || parser.result_stack == NULL) {
+ goto memory_error;
+ }
+
+ // work out the top-level rule to use, and push it on the stack
+ size_t top_level_rule;
+ switch (input_kind) {
+ case MP_PARSE_SINGLE_INPUT: top_level_rule = RULE_single_input; break;
+ case MP_PARSE_EVAL_INPUT: top_level_rule = RULE_eval_input; break;
+ default: top_level_rule = RULE_file_input;
+ }
+ push_rule(&parser, lex->tok_line, rules[top_level_rule], 0);
+
+ // parse!
+
+ size_t n, i; // state for the current rule
+ size_t rule_src_line; // source line for the first token matched by the current rule
+ bool backtrack = false;
+ const rule_t *rule = NULL;
+
+ for (;;) {
+ next_rule:
+ if (parser.rule_stack_top == 0 || parser.parse_error) {
+ break;
+ }
+
+ pop_rule(&parser, &rule, &i, &rule_src_line);
+ n = rule->act & RULE_ACT_ARG_MASK;
+
+ /*
+ // debugging
+ printf("depth=%d ", parser.rule_stack_top);
+ for (int j = 0; j < parser.rule_stack_top; ++j) {
+ printf(" ");
+ }
+ printf("%s n=%d i=%d bt=%d\n", rule->rule_name, n, i, backtrack);
+ */
+
+ switch (rule->act & RULE_ACT_KIND_MASK) {
+ case RULE_ACT_OR:
+ if (i > 0 && !backtrack) {
+ goto next_rule;
+ } else {
+ backtrack = false;
+ }
+ for (; i < n; ++i) {
+ uint16_t kind = rule->arg[i] & RULE_ARG_KIND_MASK;
+ if (kind == RULE_ARG_TOK) {
+ if (lex->tok_kind == (rule->arg[i] & RULE_ARG_ARG_MASK)) {
+ push_result_token(&parser);
+ mp_lexer_to_next(lex);
+ goto next_rule;
+ }
+ } else {
+ assert(kind == RULE_ARG_RULE);
+ if (i + 1 < n) {
+ push_rule(&parser, rule_src_line, rule, i + 1); // save this or-rule
+ }
+ push_rule_from_arg(&parser, rule->arg[i]); // push child of or-rule
+ goto next_rule;
+ }
+ }
+ backtrack = true;
+ break;
+
+ case RULE_ACT_AND: {
+
+ // failed, backtrack if we can, else syntax error
+ if (backtrack) {
+ assert(i > 0);
+ if ((rule->arg[i - 1] & RULE_ARG_KIND_MASK) == RULE_ARG_OPT_RULE) {
+ // an optional rule that failed, so continue with next arg
+ push_result_node(&parser, MP_PARSE_NODE_NULL);
+ backtrack = false;
+ } else {
+ // a mandatory rule that failed, so propagate backtrack
+ if (i > 1) {
+ // already eaten tokens so can't backtrack
+ goto syntax_error;
+ } else {
+ goto next_rule;
+ }
+ }
+ }
+
+ // progress through the rule
+ for (; i < n; ++i) {
+ switch (rule->arg[i] & RULE_ARG_KIND_MASK) {
+ case RULE_ARG_TOK: {
+ // need to match a token
+ mp_token_kind_t tok_kind = rule->arg[i] & RULE_ARG_ARG_MASK;
+ if (lex->tok_kind == tok_kind) {
+ // matched token
+ if (tok_kind == MP_TOKEN_NAME) {
+ push_result_token(&parser);
+ }
+ mp_lexer_to_next(lex);
+ } else {
+ // failed to match token
+ if (i > 0) {
+ // already eaten tokens so can't backtrack
+ goto syntax_error;
+ } else {
+ // this rule failed, so backtrack
+ backtrack = true;
+ goto next_rule;
+ }
+ }
+ break;
+ }
+ case RULE_ARG_RULE:
+ case RULE_ARG_OPT_RULE:
+ rule_and_no_other_choice:
+ push_rule(&parser, rule_src_line, rule, i + 1); // save this and-rule
+ push_rule_from_arg(&parser, rule->arg[i]); // push child of and-rule
+ goto next_rule;
+ default:
+ assert(0);
+ goto rule_and_no_other_choice; // to help flow control analysis
+ }
+ }
+
+ assert(i == n);
+
+ // matched the rule, so now build the corresponding parse_node
+
+ // count number of arguments for the parse_node
+ i = 0;
+ bool emit_rule = false;
+ for (size_t x = 0; x < n; ++x) {
+ if ((rule->arg[x] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) {
+ mp_token_kind_t tok_kind = rule->arg[x] & RULE_ARG_ARG_MASK;
+ if (tok_kind >= MP_TOKEN_NAME) {
+ emit_rule = true;
+ }
+ if (tok_kind == MP_TOKEN_NAME) {
+ // only tokens which were names are pushed to stack
+ i += 1;
+ }
+ } else {
+ // rules are always pushed
+ i += 1;
+ }
+ }
+
+ #if !MICROPY_ENABLE_DOC_STRING
+ // this code discards lonely statements, such as doc strings
+ if (input_kind != MP_PARSE_SINGLE_INPUT && rule->rule_id == RULE_expr_stmt && peek_result(&parser, 0) == MP_PARSE_NODE_NULL) {
+ mp_parse_node_t p = peek_result(&parser, 1);
+ if ((MP_PARSE_NODE_IS_LEAF(p) && !MP_PARSE_NODE_IS_ID(p)) || MP_PARSE_NODE_IS_STRUCT_KIND(p, RULE_string)) {
+ pop_result(&parser); // MP_PARSE_NODE_NULL
+ mp_parse_node_t pn = pop_result(&parser); // possibly RULE_string
+ if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_string) {
+ m_del(char, (char*)pns->nodes[0], (size_t)pns->nodes[1]);
+ }
+ }
+ push_result_rule(&parser, rule_src_line, rules[RULE_pass_stmt], 0);
+ break;
+ }
+ }
+ #endif
+
+ // always emit these rules, even if they have only 1 argument
+ if (rule->rule_id == RULE_expr_stmt || rule->rule_id == RULE_yield_stmt) {
+ emit_rule = true;
+ }
+
+ // if a rule has the RULE_ACT_ALLOW_IDENT bit set then this
+ // rule should not be emitted if it has only 1 argument
+ if (rule->act & RULE_ACT_ALLOW_IDENT) {
+ emit_rule = false;
+ }
+
+ // always emit these rules, and add an extra blank node at the end (to be used by the compiler to store data)
+ if (ADD_BLANK_NODE(rule)) {
+ emit_rule = true;
+ push_result_node(&parser, MP_PARSE_NODE_NULL);
+ i += 1;
+ }
+
+ size_t num_not_nil = 0;
+ for (size_t x = 0; x < i; ++x) {
+ if (peek_result(&parser, x) != MP_PARSE_NODE_NULL) {
+ num_not_nil += 1;
+ }
+ }
+ if (emit_rule || num_not_nil != 1) {
+ // need to add rule when num_not_nil==0 for, eg, atom_paren, testlist_comp_3b
+ push_result_rule(&parser, rule_src_line, rule, i);
+ } else {
+ // single result, leave it on stack
+ mp_parse_node_t pn = MP_PARSE_NODE_NULL;
+ for (size_t x = 0; x < i; ++x) {
+ mp_parse_node_t pn2 = pop_result(&parser);
+ if (pn2 != MP_PARSE_NODE_NULL) {
+ pn = pn2;
+ }
+ }
+ push_result_node(&parser, pn);
+ }
+ break;
+ }
+
+ case RULE_ACT_LIST: {
+ // n=2 is: item item*
+ // n=1 is: item (sep item)*
+ // n=3 is: item (sep item)* [sep]
+ bool had_trailing_sep;
+ if (backtrack) {
+ list_backtrack:
+ had_trailing_sep = false;
+ if (n == 2) {
+ if (i == 1) {
+ // fail on item, first time round; propagate backtrack
+ goto next_rule;
+ } else {
+ // fail on item, in later rounds; finish with this rule
+ backtrack = false;
+ }
+ } else {
+ if (i == 1) {
+ // fail on item, first time round; propagate backtrack
+ goto next_rule;
+ } else if ((i & 1) == 1) {
+ // fail on item, in later rounds; have eaten tokens so can't backtrack
+ if (n == 3) {
+ // list allows trailing separator; finish parsing list
+ had_trailing_sep = true;
+ backtrack = false;
+ } else {
+ // list doesn't allowing trailing separator; fail
+ goto syntax_error;
+ }
+ } else {
+ // fail on separator; finish parsing list
+ backtrack = false;
+ }
+ }
+ } else {
+ for (;;) {
+ size_t arg = rule->arg[i & 1 & n];
+ switch (arg & RULE_ARG_KIND_MASK) {
+ case RULE_ARG_TOK:
+ if (lex->tok_kind == (arg & RULE_ARG_ARG_MASK)) {
+ if (i & 1 & n) {
+ // separators which are tokens are not pushed to result stack
+ } else {
+ push_result_token(&parser);
+ }
+ mp_lexer_to_next(lex);
+ // got element of list, so continue parsing list
+ i += 1;
+ } else {
+ // couldn't get element of list
+ i += 1;
+ backtrack = true;
+ goto list_backtrack;
+ }
+ break;
+ case RULE_ARG_RULE:
+ rule_list_no_other_choice:
+ push_rule(&parser, rule_src_line, rule, i + 1); // save this list-rule
+ push_rule_from_arg(&parser, arg); // push child of list-rule
+ goto next_rule;
+ default:
+ assert(0);
+ goto rule_list_no_other_choice; // to help flow control analysis
+ }
+ }
+ }
+ assert(i >= 1);
+
+ // compute number of elements in list, result in i
+ i -= 1;
+ if ((n & 1) && (rule->arg[1] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) {
+ // don't count separators when they are tokens
+ i = (i + 1) / 2;
+ }
+
+ if (i == 1) {
+ // list matched single item
+ if (had_trailing_sep) {
+ // if there was a trailing separator, make a list of a single item
+ push_result_rule(&parser, rule_src_line, rule, i);
+ } else {
+ // just leave single item on stack (ie don't wrap in a list)
+ }
+ } else {
+ push_result_rule(&parser, rule_src_line, rule, i);
+ }
+ break;
+ }
+
+ default:
+ assert(0);
+ }
+ }
+
+ #if MICROPY_COMP_CONST
+ mp_map_deinit(&parser.consts);
+ #endif
+
+ // truncate final chunk and link into chain of chunks
+ if (parser.cur_chunk != NULL) {
+ (void)m_renew(byte, parser.cur_chunk,
+ sizeof(mp_parse_chunk_t) + parser.cur_chunk->alloc,
+ sizeof(mp_parse_chunk_t) + parser.cur_chunk->union_.used);
+ parser.cur_chunk->alloc = parser.cur_chunk->union_.used;
+ parser.cur_chunk->union_.next = parser.tree.chunk;
+ parser.tree.chunk = parser.cur_chunk;
+ }
+
+ mp_obj_t exc;
+
+ if (parser.parse_error) {
+ #if MICROPY_COMP_CONST
+ if (parser.parse_error == PARSE_ERROR_CONST) {
+ exc = mp_obj_new_exception_msg(&mp_type_SyntaxError,
+ "constant must be an integer");
+ } else
+ #endif
+ {
+ assert(parser.parse_error == PARSE_ERROR_MEMORY);
+ memory_error:
+ exc = mp_obj_new_exception_msg(&mp_type_MemoryError,
+ "parser could not allocate enough memory");
+ }
+ parser.tree.root = MP_PARSE_NODE_NULL;
+ } else if (
+ lex->tok_kind != MP_TOKEN_END // check we are at the end of the token stream
+ || parser.result_stack_top == 0 // check that we got a node (can fail on empty input)
+ ) {
+ syntax_error:
+ if (lex->tok_kind == MP_TOKEN_INDENT) {
+ exc = mp_obj_new_exception_msg(&mp_type_IndentationError,
+ "unexpected indent");
+ } else if (lex->tok_kind == MP_TOKEN_DEDENT_MISMATCH) {
+ exc = mp_obj_new_exception_msg(&mp_type_IndentationError,
+ "unindent does not match any outer indentation level");
+ } else {
+ exc = mp_obj_new_exception_msg(&mp_type_SyntaxError,
+ "invalid syntax");
+ }
+ parser.tree.root = MP_PARSE_NODE_NULL;
+ } else {
+ // no errors
+
+ //result_stack_show(parser);
+ //printf("rule stack alloc: %d\n", parser.rule_stack_alloc);
+ //printf("result stack alloc: %d\n", parser.result_stack_alloc);
+ //printf("number of parse nodes allocated: %d\n", num_parse_nodes_allocated);
+
+ // get the root parse node that we created
+ assert(parser.result_stack_top == 1);
+ exc = MP_OBJ_NULL;
+ parser.tree.root = parser.result_stack[0];
+ }
+
+ // free the memory that we don't need anymore
+ m_del(rule_stack_t, parser.rule_stack, parser.rule_stack_alloc);
+ m_del(mp_parse_node_t, parser.result_stack, parser.result_stack_alloc);
+ // we also free the lexer on behalf of the caller (see below)
+
+ if (exc != MP_OBJ_NULL) {
+ // had an error so raise the exception
+ // add traceback to give info about file name and location
+ // we don't have a 'block' name, so just pass the NULL qstr to indicate this
+ mp_obj_exception_add_traceback(exc, lex->source_name, lex->tok_line, MP_QSTR_NULL);
+ mp_lexer_free(lex);
+ nlr_raise(exc);
+ } else {
+ mp_lexer_free(lex);
+ return parser.tree;
+ }
+}
+
+void mp_parse_tree_clear(mp_parse_tree_t *tree) {
+ mp_parse_chunk_t *chunk = tree->chunk;
+ while (chunk != NULL) {
+ mp_parse_chunk_t *next = chunk->union_.next;
+ m_del(byte, chunk, sizeof(mp_parse_chunk_t) + chunk->alloc);
+ chunk = next;
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/parse.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_PARSE_H__
+#define __MICROPY_INCLUDED_PY_PARSE_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "py/obj.h"
+
+struct _mp_lexer_t;
+
+// a mp_parse_node_t is:
+// - 0000...0000: no node
+// - xxxx...xxx1: a small integer; bits 1 and above are the signed value, 2's complement
+// - xxxx...xx00: pointer to mp_parse_node_struct_t
+// - xx...xx0010: an identifier; bits 4 and above are the qstr
+// - xx...xx0110: a string; bits 4 and above are the qstr holding the value
+// - xx...xx1010: a string of bytes; bits 4 and above are the qstr holding the value
+// - xx...xx1110: a token; bits 4 and above are mp_token_kind_t
+
+#define MP_PARSE_NODE_NULL (0)
+#define MP_PARSE_NODE_SMALL_INT (0x1)
+#define MP_PARSE_NODE_ID (0x02)
+#define MP_PARSE_NODE_STRING (0x06)
+#define MP_PARSE_NODE_BYTES (0x0a)
+#define MP_PARSE_NODE_TOKEN (0x0e)
+
+typedef uintptr_t mp_parse_node_t; // must be pointer size
+
+typedef struct _mp_parse_node_struct_t {
+ uint32_t source_line; // line number in source file
+ uint32_t kind_num_nodes; // parse node kind, and number of nodes
+ mp_parse_node_t nodes[]; // nodes
+} mp_parse_node_struct_t;
+
+// macros for mp_parse_node_t usage
+// some of these evaluate their argument more than once
+
+#define MP_PARSE_NODE_IS_NULL(pn) ((pn) == MP_PARSE_NODE_NULL)
+#define MP_PARSE_NODE_IS_LEAF(pn) ((pn) & 3)
+#define MP_PARSE_NODE_IS_STRUCT(pn) ((pn) != MP_PARSE_NODE_NULL && ((pn) & 3) == 0)
+#define MP_PARSE_NODE_IS_STRUCT_KIND(pn, k) ((pn) != MP_PARSE_NODE_NULL && ((pn) & 3) == 0 && MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t*)(pn)) == (k))
+
+#define MP_PARSE_NODE_IS_SMALL_INT(pn) (((pn) & 0x1) == MP_PARSE_NODE_SMALL_INT)
+#define MP_PARSE_NODE_IS_ID(pn) (((pn) & 0x0f) == MP_PARSE_NODE_ID)
+#define MP_PARSE_NODE_IS_TOKEN(pn) (((pn) & 0x0f) == MP_PARSE_NODE_TOKEN)
+#define MP_PARSE_NODE_IS_TOKEN_KIND(pn, k) ((pn) == (MP_PARSE_NODE_TOKEN | ((k) << 4)))
+
+#define MP_PARSE_NODE_LEAF_KIND(pn) ((pn) & 0x0f)
+#define MP_PARSE_NODE_LEAF_ARG(pn) (((uintptr_t)(pn)) >> 4)
+#define MP_PARSE_NODE_LEAF_SMALL_INT(pn) (((mp_int_t)(intptr_t)(pn)) >> 1)
+#define MP_PARSE_NODE_STRUCT_KIND(pns) ((pns)->kind_num_nodes & 0xff)
+#define MP_PARSE_NODE_STRUCT_NUM_NODES(pns) ((pns)->kind_num_nodes >> 8)
+
+mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg);
+bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o);
+int mp_parse_node_extract_list(mp_parse_node_t *pn, size_t pn_kind, mp_parse_node_t **nodes);
+void mp_parse_node_print(mp_parse_node_t pn, size_t indent);
+
+typedef enum {
+ MP_PARSE_SINGLE_INPUT,
+ MP_PARSE_FILE_INPUT,
+ MP_PARSE_EVAL_INPUT,
+} mp_parse_input_kind_t;
+
+typedef struct _mp_parse_t {
+ mp_parse_node_t root;
+ struct _mp_parse_chunk_t *chunk;
+} mp_parse_tree_t;
+
+// the parser will raise an exception if an error occurred
+// the parser will free the lexer before it returns
+mp_parse_tree_t mp_parse(struct _mp_lexer_t *lex, mp_parse_input_kind_t input_kind);
+void mp_parse_tree_clear(mp_parse_tree_t *tree);
+
+#endif // __MICROPY_INCLUDED_PY_PARSE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/parsenum.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,308 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include "py/nlr.h"
+#include "py/parsenumbase.h"
+#include "py/parsenum.h"
+#include "py/smallint.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+STATIC NORETURN void raise_exc(mp_obj_t exc, mp_lexer_t *lex) {
+ // if lex!=NULL then the parser called us and we need to convert the
+ // exception's type from ValueError to SyntaxError and add traceback info
+ if (lex != NULL) {
+ ((mp_obj_base_t*)MP_OBJ_TO_PTR(exc))->type = &mp_type_SyntaxError;
+ mp_obj_exception_add_traceback(exc, lex->source_name, lex->tok_line, MP_QSTR_NULL);
+ }
+ nlr_raise(exc);
+}
+
+mp_obj_t mp_parse_num_integer(const char *restrict str_, size_t len, int base, mp_lexer_t *lex) {
+ const byte *restrict str = (const byte *)str_;
+ const byte *restrict top = str + len;
+ bool neg = false;
+ mp_obj_t ret_val;
+
+ // check radix base
+ if ((base != 0 && base < 2) || base > 36) {
+ // this won't be reached if lex!=NULL
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "int() arg 2 must be >= 2 and <= 36"));
+ }
+
+ // skip leading space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // parse optional sign
+ if (str < top) {
+ if (*str == '+') {
+ str++;
+ } else if (*str == '-') {
+ str++;
+ neg = true;
+ }
+ }
+
+ // parse optional base prefix
+ str += mp_parse_num_base((const char*)str, top - str, &base);
+
+ // string should be an integer number
+ mp_int_t int_val = 0;
+ const byte *restrict str_val_start = str;
+ for (; str < top; str++) {
+ // get next digit as a value
+ mp_uint_t dig = *str;
+ if (unichar_isdigit(dig) && (int)dig - '0' < base) {
+ // 0-9 digit
+ dig = dig - '0';
+ } else if (base == 16) {
+ dig |= 0x20;
+ if ('a' <= dig && dig <= 'f') {
+ // a-f hex digit
+ dig = dig - 'a' + 10;
+ } else {
+ // unknown character
+ break;
+ }
+ } else {
+ // unknown character
+ break;
+ }
+
+ // add next digi and check for overflow
+ if (mp_small_int_mul_overflow(int_val, base)) {
+ goto overflow;
+ }
+ int_val = int_val * base + dig;
+ if (!MP_SMALL_INT_FITS(int_val)) {
+ goto overflow;
+ }
+ }
+
+ // negate value if needed
+ if (neg) {
+ int_val = -int_val;
+ }
+
+ // create the small int
+ ret_val = MP_OBJ_NEW_SMALL_INT(int_val);
+
+have_ret_val:
+ // check we parsed something
+ if (str == str_val_start) {
+ goto value_error;
+ }
+
+ // skip trailing space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // check we reached the end of the string
+ if (str != top) {
+ goto value_error;
+ }
+
+ // return the object
+ return ret_val;
+
+overflow:
+ // reparse using long int
+ {
+ const char *s2 = (const char*)str_val_start;
+ ret_val = mp_obj_new_int_from_str_len(&s2, top - str_val_start, neg, base);
+ str = (const byte*)s2;
+ goto have_ret_val;
+ }
+
+value_error:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ mp_obj_t exc = mp_obj_new_exception_msg(&mp_type_ValueError,
+ "invalid syntax for integer");
+ raise_exc(exc, lex);
+ } else if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL) {
+ mp_obj_t exc = mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "invalid syntax for integer with base %d", base);
+ raise_exc(exc, lex);
+ } else {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 50, &print);
+ mp_printf(&print, "invalid syntax for integer with base %d: ", base);
+ mp_str_print_quoted(&print, str_val_start, top - str_val_start, true);
+ mp_obj_t exc = mp_obj_new_exception_arg1(&mp_type_ValueError,
+ mp_obj_new_str_from_vstr(&mp_type_str, &vstr));
+ raise_exc(exc, lex);
+ }
+}
+
+typedef enum {
+ PARSE_DEC_IN_INTG,
+ PARSE_DEC_IN_FRAC,
+ PARSE_DEC_IN_EXP,
+} parse_dec_in_t;
+
+mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool force_complex, mp_lexer_t *lex) {
+#if MICROPY_PY_BUILTINS_FLOAT
+ const char *top = str + len;
+ mp_float_t dec_val = 0;
+ bool dec_neg = false;
+ bool imag = false;
+
+ // skip leading space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // parse optional sign
+ if (str < top) {
+ if (*str == '+') {
+ str++;
+ } else if (*str == '-') {
+ str++;
+ dec_neg = true;
+ }
+ }
+
+ const char *str_val_start = str;
+
+ // determine what the string is
+ if (str < top && (str[0] | 0x20) == 'i') {
+ // string starts with 'i', should be 'inf' or 'infinity' (case insensitive)
+ if (str + 2 < top && (str[1] | 0x20) == 'n' && (str[2] | 0x20) == 'f') {
+ // inf
+ str += 3;
+ dec_val = INFINITY;
+ if (str + 4 < top && (str[0] | 0x20) == 'i' && (str[1] | 0x20) == 'n' && (str[2] | 0x20) == 'i' && (str[3] | 0x20) == 't' && (str[4] | 0x20) == 'y') {
+ // infinity
+ str += 5;
+ }
+ }
+ } else if (str < top && (str[0] | 0x20) == 'n') {
+ // string starts with 'n', should be 'nan' (case insensitive)
+ if (str + 2 < top && (str[1] | 0x20) == 'a' && (str[2] | 0x20) == 'n') {
+ // NaN
+ str += 3;
+ dec_val = MICROPY_FLOAT_C_FUN(nan)("");
+ }
+ } else {
+ // string should be a decimal number
+ parse_dec_in_t in = PARSE_DEC_IN_INTG;
+ bool exp_neg = false;
+ mp_float_t frac_mult = 0.1;
+ mp_int_t exp_val = 0;
+ while (str < top) {
+ mp_uint_t dig = *str++;
+ if ('0' <= dig && dig <= '9') {
+ dig -= '0';
+ if (in == PARSE_DEC_IN_EXP) {
+ exp_val = 10 * exp_val + dig;
+ } else {
+ if (in == PARSE_DEC_IN_FRAC) {
+ dec_val += dig * frac_mult;
+ frac_mult *= 0.1;
+ } else {
+ dec_val = 10 * dec_val + dig;
+ }
+ }
+ } else if (in == PARSE_DEC_IN_INTG && dig == '.') {
+ in = PARSE_DEC_IN_FRAC;
+ } else if (in != PARSE_DEC_IN_EXP && ((dig | 0x20) == 'e')) {
+ in = PARSE_DEC_IN_EXP;
+ if (str < top) {
+ if (str[0] == '+') {
+ str++;
+ } else if (str[0] == '-') {
+ str++;
+ exp_neg = true;
+ }
+ }
+ if (str == top) {
+ goto value_error;
+ }
+ } else if (allow_imag && (dig | 0x20) == 'j') {
+ imag = true;
+ break;
+ } else {
+ // unknown character
+ str--;
+ break;
+ }
+ }
+
+ // work out the exponent
+ if (exp_neg) {
+ exp_val = -exp_val;
+ }
+
+ // apply the exponent
+ dec_val *= MICROPY_FLOAT_C_FUN(pow)(10, exp_val);
+ }
+
+ // negate value if needed
+ if (dec_neg) {
+ dec_val = -dec_val;
+ }
+
+ // check we parsed something
+ if (str == str_val_start) {
+ goto value_error;
+ }
+
+ // skip trailing space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // check we reached the end of the string
+ if (str != top) {
+ goto value_error;
+ }
+
+ // return the object
+#if MICROPY_PY_BUILTINS_COMPLEX
+ if (imag) {
+ return mp_obj_new_complex(0, dec_val);
+ } else if (force_complex) {
+ return mp_obj_new_complex(dec_val, 0);
+#else
+ if (imag || force_complex) {
+ raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, "complex values not supported"), lex);
+#endif
+ } else {
+ return mp_obj_new_float(dec_val);
+ }
+
+value_error:
+ raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, "invalid syntax for number"), lex);
+
+#else
+ raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, "decimal numbers not supported"), lex);
+#endif
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/parsenum.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,37 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_PARSENUM_H__ +#define __MICROPY_INCLUDED_PY_PARSENUM_H__ + +#include "py/mpconfig.h" +#include "py/lexer.h" +#include "py/obj.h" + +// these functions raise a SyntaxError if lex!=NULL, else a ValueError +mp_obj_t mp_parse_num_integer(const char *restrict str, size_t len, int base, mp_lexer_t *lex); +mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool force_complex, mp_lexer_t *lex); + +#endif // __MICROPY_INCLUDED_PY_PARSENUM_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/parsenumbase.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,71 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/parsenumbase.h"
+
+// find real radix base, and strip preceding '0x', '0o' and '0b'
+// puts base in *base, and returns number of bytes to skip the prefix
+size_t mp_parse_num_base(const char *str, size_t len, int *base) {
+ const byte *p = (const byte*)str;
+ if (len <= 1) {
+ goto no_prefix;
+ }
+ unichar c = *(p++);
+ if ((*base == 0 || *base == 16) && c == '0') {
+ c = *(p++);
+ if ((c | 32) == 'x') {
+ *base = 16;
+ } else if (*base == 0 && (c | 32) == 'o') {
+ *base = 8;
+ } else if (*base == 0 && (c | 32) == 'b') {
+ *base = 2;
+ } else {
+ if (*base == 0) {
+ *base = 10;
+ }
+ p -= 2;
+ }
+ } else if (*base == 8 && c == '0') {
+ c = *(p++);
+ if ((c | 32) != 'o') {
+ p -= 2;
+ }
+ } else if (*base == 2 && c == '0') {
+ c = *(p++);
+ if ((c | 32) != 'b') {
+ p -= 2;
+ }
+ } else {
+ p--;
+ no_prefix:
+ if (*base == 0) {
+ *base = 10;
+ }
+ }
+ return p - (const byte*)str;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/parsenumbase.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,33 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_PARSENUMBASE_H__ +#define __MICROPY_INCLUDED_PY_PARSENUMBASE_H__ + +#include "py/mpconfig.h" + +size_t mp_parse_num_base(const char *str, size_t len, int *base); + +#endif // __MICROPY_INCLUDED_PY_PARSENUMBASE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/py.mk Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,237 @@ +# where py object files go (they have a name prefix to prevent filename clashes) +PY_BUILD = $(BUILD)/py + +# where autogenerated header files go +HEADER_BUILD = $(BUILD)/genhdr + +# file containing qstr defs for the core Python bit +PY_QSTR_DEFS = $(PY_SRC)/qstrdefs.h + +# some code is performance bottleneck and compiled with other optimization options +CSUPEROPT = -O3 + +INC += -I../lib/netutils + +ifeq ($(MICROPY_PY_USSL),1) +CFLAGS_MOD += -DMICROPY_PY_USSL=1 -I../lib/axtls/ssl -I../lib/axtls/crypto -I../lib/axtls/config +LDFLAGS_MOD += -L../lib/axtls/_stage -laxtls +endif + +#ifeq ($(MICROPY_PY_LWIP),1) +#CFLAGS_MOD += -DMICROPY_PY_LWIP=1 -I../lib/lwip/src/include -I../lib/lwip/src/include/ipv4 -I../extmod/lwip-include +#endif + +ifeq ($(MICROPY_PY_LWIP),1) +LWIP_DIR = lib/lwip/src +INC += -I../lib/lwip/src/include -I../lib/lwip/src/include/ipv4 -I../extmod/lwip-include +CFLAGS_MOD += -DMICROPY_PY_LWIP=1 +SRC_MOD += extmod/modlwip.c lib/netutils/netutils.c +SRC_MOD += $(addprefix $(LWIP_DIR)/,\ + core/def.c \ + core/dns.c \ + core/init.c \ + core/mem.c \ + core/memp.c \ + core/netif.c \ + core/pbuf.c \ + core/raw.c \ + core/stats.c \ + core/sys.c \ + core/tcp.c \ + core/tcp_in.c \ + core/tcp_out.c \ + core/timers.c \ + core/udp.c \ + core/ipv4/autoip.c \ + core/ipv4/icmp.c \ + core/ipv4/igmp.c \ + core/ipv4/inet.c \ + core/ipv4/inet_chksum.c \ + core/ipv4/ip_addr.c \ + core/ipv4/ip.c \ + core/ipv4/ip_frag.c \ + ) +ifeq ($(MICROPY_PY_LWIP_SLIP),1) +CFLAGS_MOD += -DMICROPY_PY_LWIP_SLIP=1 +SRC_MOD += $(LWIP_DIR)/netif/slipif.c +endif +endif + +# py object files +PY_O_BASENAME = \ + mpstate.o \ + nlrx86.o \ + nlrx64.o \ + nlrthumb.o \ + nlrxtensa.o \ + nlrsetjmp.o \ + malloc.o \ + gc.o \ + qstr.o \ + vstr.o \ + mpprint.o \ + unicode.o \ + mpz.o \ + lexer.o \ + lexerstr.o \ + lexerunix.o \ + parse.o \ + scope.o \ + compile.o \ + emitcommon.o \ + emitbc.o \ + asmx64.o \ + emitnx64.o \ + asmx86.o \ + emitnx86.o \ + asmthumb.o \ + emitnthumb.o \ + emitinlinethumb.o \ + asmarm.o \ + emitnarm.o \ + formatfloat.o \ + parsenumbase.o \ + parsenum.o \ + emitglue.o \ + runtime.o \ + nativeglue.o \ + stackctrl.o \ + argcheck.o \ + warning.o \ + map.o \ + obj.o \ + objarray.o \ + objattrtuple.o \ + objbool.o \ + objboundmeth.o \ + objcell.o \ + objclosure.o \ + objcomplex.o \ + objdict.o \ + objenumerate.o \ + objexcept.o \ + objfilter.o \ + objfloat.o \ + objfun.o \ + objgenerator.o \ + objgetitemiter.o \ + objint.o \ + objint_longlong.o \ + objint_mpz.o \ + objlist.o \ + objmap.o \ + objmodule.o \ + objobject.o \ + objpolyiter.o \ + objproperty.o \ + objnone.o \ + objnamedtuple.o \ + objrange.o \ + objreversed.o \ + objset.o \ + objsingleton.o \ + objslice.o \ + objstr.o \ + objstrunicode.o \ + objstringio.o \ + objtuple.o \ + objtype.o \ + objzip.o \ + opmethods.o \ + sequence.o \ + stream.o \ + binary.o \ + builtinimport.o \ + builtinevex.o \ + modarray.o \ + modbuiltins.o \ + modcollections.o \ + modgc.o \ + modio.o \ + modmath.o \ + modcmath.o \ + modmicropython.o \ + modstruct.o \ + modsys.o \ + vm.o \ + bc.o \ + showbc.o \ + repl.o \ + smallint.o \ + frozenmod.o \ + ../extmod/moductypes.o \ + ../extmod/modujson.o \ + ../extmod/modure.o \ + ../extmod/moduzlib.o \ + ../extmod/moduheapq.o \ + ../extmod/moduhashlib.o \ + ../extmod/modubinascii.o \ + ../extmod/machine_mem.o \ + ../extmod/modussl.o \ + ../extmod/modurandom.o \ + ../extmod/modwebsocket.o \ + ../extmod/fsusermount.o \ + ../extmod/vfs_fat.o \ + ../extmod/vfs_fat_ffconf.o \ + ../extmod/vfs_fat_diskio.o \ + ../extmod/vfs_fat_file.o \ + ../extmod/vfs_fat_lexer.o \ + ../extmod/vfs_fat_misc.o \ + ../extmod/moduos_dupterm.o \ + +# prepend the build destination prefix to the py object files +PY_O = $(addprefix $(PY_BUILD)/, $(PY_O_BASENAME)) + +# Anything that depends on FORCE will be considered out-of-date +FORCE: +.PHONY: FORCE + +$(HEADER_BUILD)/mpversion.h: FORCE | $(HEADER_BUILD) + $(Q)$(PYTHON) $(PY_SRC)/makeversionhdr.py $@ + +# mpconfigport.mk is optional, but changes to it may drastically change +# overall config, so they need to be caught +MPCONFIGPORT_MK = $(wildcard mpconfigport.mk) + +# qstr data + +# Adding an order only dependency on $(HEADER_BUILD) causes $(HEADER_BUILD) to get +# created before we run the script to generate the .h +# Note: we need to protect the qstr names from the preprocessor, so we wrap +# the lines in "" and then unwrap after the preprocessor is finished. +$(HEADER_BUILD)/qstrdefs.generated.h: $(PY_QSTR_DEFS) $(QSTR_DEFS) $(PY_SRC)/makeqstrdata.py mpconfigport.h $(MPCONFIGPORT_MK) $(PY_SRC)/mpconfig.h | $(HEADER_BUILD) + $(ECHO) "GEN $@" + $(Q)cat $(PY_QSTR_DEFS) $(QSTR_DEFS) | $(SED) 's/^Q(.*)/"&"/' | $(CPP) $(CFLAGS) - | sed 's/^"\(Q(.*)\)"/\1/' > $(HEADER_BUILD)/qstrdefs.preprocessed.h + $(Q)$(PYTHON) $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@ + +# emitters + +$(PY_BUILD)/emitnx64.o: CFLAGS += -DN_X64 +$(PY_BUILD)/emitnx64.o: py/emitnative.c + $(call compile_c) + +$(PY_BUILD)/emitnx86.o: CFLAGS += -DN_X86 +$(PY_BUILD)/emitnx86.o: py/emitnative.c + $(call compile_c) + +$(PY_BUILD)/emitnthumb.o: CFLAGS += -DN_THUMB +$(PY_BUILD)/emitnthumb.o: py/emitnative.c + $(call compile_c) + +$(PY_BUILD)/emitnarm.o: CFLAGS += -DN_ARM +$(PY_BUILD)/emitnarm.o: py/emitnative.c + $(call compile_c) + +# optimising gc for speed; 5ms down to 4ms on pybv2 +$(PY_BUILD)/gc.o: CFLAGS += $(CSUPEROPT) + +# optimising vm for speed, adds only a small amount to code size but makes a huge difference to speed (20% faster) +$(PY_BUILD)/vm.o: CFLAGS += $(CSUPEROPT) +# Optimizing vm.o for modern deeply pipelined CPUs with branch predictors +# may require disabling tail jump optimization. This will make sure that +# each opcode has its own dispatching jump which will improve branch +# branch predictor efficiency. +# http://article.gmane.org/gmane.comp.lang.lua.general/75426 +# http://hg.python.org/cpython/file/b127046831e2/Python/ceval.c#l828 +# http://www.emulators.com/docs/nx25_nostradamus.htm +#-fno-crossjumping
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/qstr.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,284 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "py/mpstate.h"
+#include "py/qstr.h"
+#include "py/gc.h"
+
+// NOTE: we are using linear arrays to store and search for qstr's (unique strings, interned strings)
+// ultimately we will replace this with a static hash table of some kind
+// also probably need to include the length in the string data, to allow null bytes in the string
+
+#if 0 // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+// A qstr is an index into the qstr pool.
+// The data for a qstr contains (hash, length, data):
+// - hash (configurable number of bytes)
+// - length (configurable number of bytes)
+// - data ("length" number of bytes)
+// - \0 terminated (so they can be printed using printf)
+
+#if MICROPY_QSTR_BYTES_IN_HASH == 1
+ #define Q_HASH_MASK (0xff)
+ #define Q_GET_HASH(q) ((mp_uint_t)(q)[0])
+ #define Q_SET_HASH(q, hash) do { (q)[0] = (hash); } while (0)
+#elif MICROPY_QSTR_BYTES_IN_HASH == 2
+ #define Q_HASH_MASK (0xffff)
+ #define Q_GET_HASH(q) ((mp_uint_t)(q)[0] | ((mp_uint_t)(q)[1] << 8))
+ #define Q_SET_HASH(q, hash) do { (q)[0] = (hash); (q)[1] = (hash) >> 8; } while (0)
+#else
+ #error unimplemented qstr hash decoding
+#endif
+#define Q_GET_ALLOC(q) (MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + Q_GET_LENGTH(q) + 1)
+#define Q_GET_DATA(q) ((q) + MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN)
+#if MICROPY_QSTR_BYTES_IN_LEN == 1
+ #define Q_GET_LENGTH(q) ((q)[MICROPY_QSTR_BYTES_IN_HASH])
+ #define Q_SET_LENGTH(q, len) do { (q)[MICROPY_QSTR_BYTES_IN_HASH] = (len); } while (0)
+#elif MICROPY_QSTR_BYTES_IN_LEN == 2
+ #define Q_GET_LENGTH(q) ((q)[MICROPY_QSTR_BYTES_IN_HASH] | ((q)[MICROPY_QSTR_BYTES_IN_HASH + 1] << 8))
+ #define Q_SET_LENGTH(q, len) do { (q)[MICROPY_QSTR_BYTES_IN_HASH] = (len); (q)[MICROPY_QSTR_BYTES_IN_HASH + 1] = (len) >> 8; } while (0)
+#else
+ #error unimplemented qstr length decoding
+#endif
+
+// this must match the equivalent function in makeqstrdata.py
+mp_uint_t qstr_compute_hash(const byte *data, size_t len) {
+ // djb2 algorithm; see http://www.cse.yorku.ca/~oz/hash.html
+ mp_uint_t hash = 5381;
+ for (const byte *top = data + len; data < top; data++) {
+ hash = ((hash << 5) + hash) ^ (*data); // hash * 33 ^ data
+ }
+ hash &= Q_HASH_MASK;
+ // Make sure that valid hash is never zero, zero means "hash not computed"
+ if (hash == 0) {
+ hash++;
+ }
+ return hash;
+}
+
+STATIC const qstr_pool_t const_pool = {
+ NULL, // no previous pool
+ 0, // no previous pool
+ 10, // set so that the first dynamically allocated pool is twice this size; must be <= the len (just below)
+ MP_QSTR_number_of, // corresponds to number of strings in array just below
+ {
+#define QDEF(id, str) str,
+#include "genhdr/qstrdefs.generated.h"
+#undef QDEF
+ },
+};
+
+void qstr_init(void) {
+ MP_STATE_VM(last_pool) = (qstr_pool_t*)&const_pool; // we won't modify the const_pool since it has no allocated room left
+ MP_STATE_VM(qstr_last_chunk) = NULL;
+}
+
+STATIC const byte *find_qstr(qstr q) {
+ // search pool for this qstr
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
+ if (q >= pool->total_prev_len) {
+ return pool->qstrs[q - pool->total_prev_len];
+ }
+ }
+
+ // not found
+ return 0;
+}
+
+STATIC qstr qstr_add(const byte *q_ptr) {
+ DEBUG_printf("QSTR: add hash=%d len=%d data=%.*s\n", Q_GET_HASH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_DATA(q_ptr));
+
+ // make sure we have room in the pool for a new qstr
+ if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) {
+ qstr_pool_t *pool = m_new_obj_var(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2);
+ pool->prev = MP_STATE_VM(last_pool);
+ pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len;
+ pool->alloc = MP_STATE_VM(last_pool)->alloc * 2;
+ pool->len = 0;
+ MP_STATE_VM(last_pool) = pool;
+ DEBUG_printf("QSTR: allocate new pool of size %d\n", MP_STATE_VM(last_pool)->alloc);
+ }
+
+ // add the new qstr
+ MP_STATE_VM(last_pool)->qstrs[MP_STATE_VM(last_pool)->len++] = q_ptr;
+
+ // return id for the newly-added qstr
+ return MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len - 1;
+}
+
+qstr qstr_find_strn(const char *str, size_t str_len) {
+ // work out hash of str
+ mp_uint_t str_hash = qstr_compute_hash((const byte*)str, str_len);
+
+ // search pools for the data
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
+ for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
+ if (Q_GET_HASH(*q) == str_hash && Q_GET_LENGTH(*q) == str_len && memcmp(Q_GET_DATA(*q), str, str_len) == 0) {
+ return pool->total_prev_len + (q - pool->qstrs);
+ }
+ }
+ }
+
+ // not found; return null qstr
+ return 0;
+}
+
+qstr qstr_from_str(const char *str) {
+ return qstr_from_strn(str, strlen(str));
+}
+
+qstr qstr_from_strn(const char *str, size_t len) {
+ assert(len < (1 << (8 * MICROPY_QSTR_BYTES_IN_LEN)));
+ qstr q = qstr_find_strn(str, len);
+ if (q == 0) {
+ // qstr does not exist in interned pool so need to add it
+
+ // compute number of bytes needed to intern this string
+ size_t n_bytes = MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len + 1;
+
+ if (MP_STATE_VM(qstr_last_chunk) != NULL && MP_STATE_VM(qstr_last_used) + n_bytes > MP_STATE_VM(qstr_last_alloc)) {
+ // not enough room at end of previously interned string so try to grow
+ byte *new_p = m_renew_maybe(byte, MP_STATE_VM(qstr_last_chunk), MP_STATE_VM(qstr_last_alloc), MP_STATE_VM(qstr_last_alloc) + n_bytes, false);
+ if (new_p == NULL) {
+ // could not grow existing memory; shrink it to fit previous
+ (void)m_renew(byte, MP_STATE_VM(qstr_last_chunk), MP_STATE_VM(qstr_last_alloc), MP_STATE_VM(qstr_last_used));
+ MP_STATE_VM(qstr_last_chunk) = NULL;
+ } else {
+ // could grow existing memory
+ MP_STATE_VM(qstr_last_alloc) += n_bytes;
+ }
+ }
+
+ if (MP_STATE_VM(qstr_last_chunk) == NULL) {
+ // no existing memory for the interned string so allocate a new chunk
+ size_t al = n_bytes;
+ if (al < MICROPY_ALLOC_QSTR_CHUNK_INIT) {
+ al = MICROPY_ALLOC_QSTR_CHUNK_INIT;
+ }
+ MP_STATE_VM(qstr_last_chunk) = m_new_maybe(byte, al);
+ if (MP_STATE_VM(qstr_last_chunk) == NULL) {
+ // failed to allocate a large chunk so try with exact size
+ MP_STATE_VM(qstr_last_chunk) = m_new(byte, n_bytes);
+ al = n_bytes;
+ }
+ MP_STATE_VM(qstr_last_alloc) = al;
+ MP_STATE_VM(qstr_last_used) = 0;
+ }
+
+ // allocate memory from the chunk for this new interned string's data
+ byte *q_ptr = MP_STATE_VM(qstr_last_chunk) + MP_STATE_VM(qstr_last_used);
+ MP_STATE_VM(qstr_last_used) += n_bytes;
+
+ // store the interned strings' data
+ mp_uint_t hash = qstr_compute_hash((const byte*)str, len);
+ Q_SET_HASH(q_ptr, hash);
+ Q_SET_LENGTH(q_ptr, len);
+ memcpy(q_ptr + MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN, str, len);
+ q_ptr[MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len] = '\0';
+ q = qstr_add(q_ptr);
+ }
+ return q;
+}
+
+byte *qstr_build_start(size_t len, byte **q_ptr) {
+ assert(len < (1 << (8 * MICROPY_QSTR_BYTES_IN_LEN)));
+ *q_ptr = m_new(byte, MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len + 1);
+ Q_SET_LENGTH(*q_ptr, len);
+ return Q_GET_DATA(*q_ptr);
+}
+
+qstr qstr_build_end(byte *q_ptr) {
+ qstr q = qstr_find_strn((const char*)Q_GET_DATA(q_ptr), Q_GET_LENGTH(q_ptr));
+ if (q == 0) {
+ size_t len = Q_GET_LENGTH(q_ptr);
+ mp_uint_t hash = qstr_compute_hash(Q_GET_DATA(q_ptr), len);
+ Q_SET_HASH(q_ptr, hash);
+ q_ptr[MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len] = '\0';
+ q = qstr_add(q_ptr);
+ } else {
+ m_del(byte, q_ptr, Q_GET_ALLOC(q_ptr));
+ }
+ return q;
+}
+
+mp_uint_t qstr_hash(qstr q) {
+ return Q_GET_HASH(find_qstr(q));
+}
+
+size_t qstr_len(qstr q) {
+ const byte *qd = find_qstr(q);
+ return Q_GET_LENGTH(qd);
+}
+
+// XXX to remove!
+const char *qstr_str(qstr q) {
+ const byte *qd = find_qstr(q);
+ return (const char*)Q_GET_DATA(qd);
+}
+
+const byte *qstr_data(qstr q, size_t *len) {
+ const byte *qd = find_qstr(q);
+ *len = Q_GET_LENGTH(qd);
+ return Q_GET_DATA(qd);
+}
+
+void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, size_t *n_total_bytes) {
+ *n_pool = 0;
+ *n_qstr = 0;
+ *n_str_data_bytes = 0;
+ *n_total_bytes = 0;
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &const_pool; pool = pool->prev) {
+ *n_pool += 1;
+ *n_qstr += pool->len;
+ for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
+ *n_str_data_bytes += Q_GET_ALLOC(*q);
+ }
+ #if MICROPY_ENABLE_GC
+ *n_total_bytes += gc_nbytes(pool); // this counts actual bytes used in heap
+ #else
+ *n_total_bytes += sizeof(qstr_pool_t) + sizeof(qstr) * pool->alloc;
+ #endif
+ }
+ *n_total_bytes += *n_str_data_bytes;
+}
+
+#if MICROPY_PY_MICROPYTHON_MEM_INFO
+void qstr_dump_data(void) {
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &const_pool; pool = pool->prev) {
+ for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
+ mp_printf(&mp_plat_print, "Q(%s)\n", Q_GET_DATA(*q));
+ }
+ }
+}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/qstr.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,77 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_QSTR_H__
+#define __MICROPY_INCLUDED_PY_QSTR_H__
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+// See qstrdefs.h for a list of qstr's that are available as constants.
+// Reference them as MP_QSTR_xxxx.
+//
+// Note: it would be possible to define MP_QSTR_xxx as qstr_from_str_static("xxx")
+// for qstrs that are referenced this way, but you don't want to have them in ROM.
+
+// first entry in enum will be MP_QSTR_NULL=0, which indicates invalid/no qstr
+enum {
+#define QDEF(id, str) id,
+#include "genhdr/qstrdefs.generated.h"
+#undef QDEF
+ MP_QSTR_number_of,
+};
+
+typedef size_t qstr;
+
+typedef struct _qstr_pool_t {
+ struct _qstr_pool_t *prev;
+ size_t total_prev_len;
+ size_t alloc;
+ size_t len;
+ const byte *qstrs[];
+} qstr_pool_t;
+
+#define QSTR_FROM_STR_STATIC(s) (qstr_from_strn((s), strlen(s)))
+
+void qstr_init(void);
+
+mp_uint_t qstr_compute_hash(const byte *data, size_t len);
+qstr qstr_find_strn(const char *str, size_t str_len); // returns MP_QSTR_NULL if not found
+
+qstr qstr_from_str(const char *str);
+qstr qstr_from_strn(const char *str, size_t len);
+
+byte *qstr_build_start(size_t len, byte **q_ptr);
+qstr qstr_build_end(byte *q_ptr);
+
+mp_uint_t qstr_hash(qstr q);
+const char *qstr_str(qstr q);
+size_t qstr_len(qstr q);
+const byte *qstr_data(qstr q, size_t *len);
+
+void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, size_t *n_total_bytes);
+void qstr_dump_data(void);
+
+#endif // __MICROPY_INCLUDED_PY_QSTR_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/qstrdefs.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,763 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+
+// All the qstr definitions in this file are available as constants.
+// That is, they are in ROM and you can reference them simply as MP_QSTR_xxxx.
+
+// qstr configuration passed to makeqstrdata.py of the form QCFG(key, value)
+QCFG(BYTES_IN_LEN, MICROPY_QSTR_BYTES_IN_LEN)
+QCFG(BYTES_IN_HASH, MICROPY_QSTR_BYTES_IN_HASH)
+
+Q()
+Q(*)
+Q(_)
+Q(__build_class__)
+Q(__class__)
+Q(__doc__)
+Q(__import__)
+Q(__init__)
+Q(__new__)
+Q(__locals__)
+Q(__main__)
+Q(__module__)
+Q(__name__)
+Q(__dict__)
+Q(__hash__)
+Q(__next__)
+Q(__qualname__)
+Q(__path__)
+Q(__repl_print__)
+#if MICROPY_PY___FILE__
+Q(__file__)
+#endif
+
+Q(__bool__)
+Q(__contains__)
+Q(__enter__)
+Q(__exit__)
+Q(__len__)
+Q(__iter__)
+Q(__getitem__)
+Q(__setitem__)
+Q(__delitem__)
+Q(__add__)
+Q(__sub__)
+Q(__repr__)
+Q(__str__)
+#if MICROPY_PY_DESCRIPTORS
+Q(__get__)
+Q(__set__)
+Q(__delete__)
+#endif
+Q(__getattr__)
+Q(__del__)
+Q(__call__)
+Q(__lt__)
+Q(__gt__)
+Q(__eq__)
+Q(__le__)
+Q(__ge__)
+Q(__reversed__)
+#if MICROPY_PY_ALL_SPECIAL_METHODS
+Q(__mul__)
+Q(__truediv__)
+Q(__floordiv__)
+Q(__iadd__)
+Q(__isub__)
+Q(__invert__)
+Q(__neg__)
+Q(__pos__)
+#endif
+
+Q(micropython)
+Q(bytecode)
+Q(const)
+
+#if MICROPY_EMIT_NATIVE
+Q(native)
+Q(viper)
+Q(uint)
+Q(ptr)
+Q(ptr8)
+Q(ptr16)
+Q(ptr32)
+#endif
+
+#if MICROPY_EMIT_INLINE_THUMB
+Q(asm_thumb)
+Q(label)
+Q(align)
+Q(data)
+Q(uint)
+Q(nop)
+Q(mov)
+Q(and_)
+Q(cmp)
+Q(add)
+Q(sub)
+Q(lsl)
+Q(lsr)
+Q(asr)
+Q(ldr)
+Q(ldrb)
+Q(ldrh)
+Q(str)
+Q(strb)
+Q(strh)
+Q(b)
+Q(bl)
+Q(bx)
+Q(push)
+Q(pop)
+Q(cpsid)
+Q(cpsie)
+Q(wfi)
+Q(clz)
+Q(rbit)
+Q(movw)
+Q(movt)
+Q(movwt)
+Q(mrs)
+Q(sdiv)
+Q(udiv)
+Q(ldrex)
+Q(strex)
+#if MICROPY_EMIT_INLINE_THUMB_FLOAT
+Q(vcmp)
+Q(vneg)
+Q(vcvt_f32_s32)
+Q(vcvt_s32_f32)
+Q(vsqrt)
+Q(vmov)
+Q(vmrs)
+Q(vldr)
+Q(vstr)
+#endif
+#endif
+
+Q(builtins)
+
+Q(Ellipsis)
+Q(StopIteration)
+#if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+Q(NotImplemented)
+#endif
+
+Q(BaseException)
+Q(ArithmeticError)
+Q(AssertionError)
+Q(AttributeError)
+Q(BufferError)
+Q(EOFError)
+Q(Exception)
+Q(FileExistsError)
+Q(FileNotFoundError)
+Q(FloatingPointError)
+Q(GeneratorExit)
+Q(ImportError)
+Q(IndentationError)
+Q(IndexError)
+Q(KeyboardInterrupt)
+Q(KeyError)
+Q(LookupError)
+Q(MemoryError)
+Q(NameError)
+Q(NotImplementedError)
+Q(OSError)
+#if MICROPY_PY_BUILTINS_TIMEOUTERROR
+Q(TimeoutError)
+#endif
+Q(OverflowError)
+Q(RuntimeError)
+Q(SyntaxError)
+Q(SystemExit)
+Q(TypeError)
+Q(UnboundLocalError)
+Q(ValueError)
+#if MICROPY_EMIT_NATIVE
+Q(ViperTypeError)
+#endif
+Q(ZeroDivisionError)
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+Q(UnicodeError)
+#endif
+
+Q(None)
+Q(False)
+Q(True)
+Q(object)
+
+Q(NoneType)
+
+#if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+Q(OrderedDict)
+#endif
+
+Q(abs)
+Q(all)
+Q(any)
+Q(args)
+#if MICROPY_PY_ARRAY
+Q(array)
+#endif
+Q(bin)
+Q({:#b})
+Q(bool)
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+Q(bytearray)
+#endif
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+Q(memoryview)
+#endif
+Q(bytes)
+Q(callable)
+Q(chr)
+Q(classmethod)
+Q(_collections)
+#if MICROPY_PY_BUILTINS_COMPLEX
+Q(complex)
+Q(real)
+Q(imag)
+#endif
+Q(dict)
+Q(dir)
+Q(divmod)
+#if MICROPY_PY_BUILTINS_ENUMERATE
+Q(enumerate)
+#endif
+Q(eval)
+Q(exec)
+#if MICROPY_PY_BUILTINS_EXECFILE
+Q(execfile)
+#endif
+#if MICROPY_PY_BUILTINS_FILTER
+Q(filter)
+#endif
+#if MICROPY_PY_BUILTINS_FLOAT
+Q(float)
+#endif
+Q(from_bytes)
+Q(getattr)
+Q(setattr)
+Q(globals)
+Q(hasattr)
+Q(hash)
+Q(hex)
+Q(%#x)
+Q(id)
+Q(int)
+Q(isinstance)
+Q(issubclass)
+Q(iter)
+Q(len)
+Q(list)
+Q(locals)
+Q(map)
+#if MICROPY_PY_BUILTINS_MIN_MAX
+Q(max)
+Q(min)
+Q(default)
+#endif
+Q(namedtuple)
+Q(next)
+Q(oct)
+Q(%#o)
+Q(open)
+Q(ord)
+Q(path)
+Q(pow)
+Q(print)
+Q(range)
+Q(read)
+Q(repr)
+Q(reversed)
+Q(round)
+Q(sorted)
+Q(staticmethod)
+Q(sum)
+Q(super)
+Q(str)
+Q(sys)
+Q(to_bytes)
+Q(tuple)
+Q(type)
+Q(value)
+Q(write)
+Q(zip)
+
+#if MICROPY_PY_BUILTINS_COMPILE
+Q(compile)
+Q(code)
+Q(single)
+#endif
+
+Q(sep)
+Q(end)
+
+#if MICROPY_PY_BUILTINS_RANGE_ATTRS
+Q(step)
+Q(stop)
+#endif
+
+Q(clear)
+Q(copy)
+Q(fromkeys)
+Q(get)
+Q(items)
+Q(keys)
+Q(pop)
+Q(popitem)
+Q(setdefault)
+Q(update)
+Q(values)
+Q(append)
+Q(close)
+Q(send)
+Q(throw)
+Q(count)
+Q(extend)
+Q(index)
+Q(remove)
+Q(insert)
+Q(pop)
+Q(sort)
+Q(join)
+Q(strip)
+Q(lstrip)
+Q(rstrip)
+Q(format)
+Q(key)
+Q(reverse)
+Q(add)
+Q(clear)
+Q(copy)
+Q(pop)
+Q(remove)
+Q(find)
+Q(rfind)
+Q(rindex)
+Q(split)
+#if MICROPY_PY_BUILTINS_STR_SPLITLINES
+Q(splitlines)
+Q(keepends)
+Q(\n)
+#endif
+Q(rsplit)
+Q(startswith)
+Q(endswith)
+Q(replace)
+Q(partition)
+Q(rpartition)
+Q(lower)
+Q(upper)
+Q(isspace)
+Q(isalpha)
+Q(isdigit)
+Q(isupper)
+Q(islower)
+Q(iterable)
+Q(start)
+
+Q(bound_method)
+Q(closure)
+Q(dict_view)
+Q(function)
+Q(generator)
+Q(iterator)
+Q(module)
+Q(slice)
+
+#if MICROPY_PY_BUILTINS_SET
+Q(discard)
+Q(difference)
+Q(difference_update)
+Q(intersection)
+Q(intersection_update)
+Q(isdisjoint)
+Q(issubset)
+Q(issuperset)
+Q(set)
+Q(symmetric_difference)
+Q(symmetric_difference_update)
+Q(union)
+Q(update)
+#endif
+
+#if MICROPY_PY_BUILTINS_FROZENSET
+Q(frozenset)
+#endif
+
+#if MICROPY_PY_MATH || MICROPY_PY_CMATH
+Q(math)
+Q(e)
+Q(pi)
+Q(sqrt)
+Q(pow)
+Q(exp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+Q(expm1)
+#endif
+Q(log)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+Q(log2)
+Q(log10)
+Q(cosh)
+Q(sinh)
+Q(tanh)
+Q(acosh)
+Q(asinh)
+Q(atanh)
+#endif
+Q(cos)
+Q(sin)
+Q(tan)
+Q(acos)
+Q(asin)
+Q(atan)
+Q(atan2)
+Q(ceil)
+Q(copysign)
+Q(fabs)
+Q(fmod)
+Q(floor)
+Q(isfinite)
+Q(isinf)
+Q(isnan)
+Q(trunc)
+Q(modf)
+Q(frexp)
+Q(ldexp)
+Q(degrees)
+Q(radians)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+Q(erf)
+Q(erfc)
+Q(gamma)
+Q(lgamma)
+#endif
+#endif
+
+#if MICROPY_PY_CMATH
+Q(cmath)
+Q(phase)
+Q(polar)
+Q(rect)
+#endif
+
+#if MICROPY_PY_MICROPYTHON_MEM_INFO
+#if MICROPY_MEM_STATS
+Q(mem_total)
+Q(mem_current)
+Q(mem_peak)
+#endif
+Q(mem_info)
+Q(qstr_info)
+#if MICROPY_STACK_CHECK
+Q(stack_use)
+#endif
+#endif
+#if MICROPY_ENABLE_GC
+Q(heap_lock)
+Q(heap_unlock)
+#endif
+
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+Q(alloc_emergency_exception_buf)
+#endif
+Q(maximum recursion depth exceeded)
+
+Q(<module>)
+Q(<lambda>)
+Q(<listcomp>)
+Q(<dictcomp>)
+Q(<setcomp>)
+Q(<genexpr>)
+Q(<string>)
+Q(<stdin>)
+
+#if MICROPY_CPYTHON_COMPAT
+Q(encode)
+Q(decode)
+Q(utf-8)
+#endif
+
+#if MICROPY_PY_SYS
+Q(argv)
+Q(byteorder)
+Q(big)
+Q(exit)
+Q(little)
+#ifdef MICROPY_PY_SYS_PLATFORM
+Q(platform)
+#endif
+Q(stdin)
+Q(stdout)
+Q(stderr)
+#if MICROPY_PY_SYS_STDIO_BUFFER
+Q(buffer)
+#endif
+Q(version)
+Q(version_info)
+#if MICROPY_PY_ATTRTUPLE
+Q(name)
+#endif
+Q(implementation)
+#if MICROPY_PY_SYS_MAXSIZE
+Q(maxsize)
+#endif
+#if MICROPY_PY_SYS_MODULES
+Q(modules)
+#endif
+#if MICROPY_PY_SYS_EXC_INFO
+Q(exc_info)
+#endif
+Q(print_exception)
+#endif
+
+#if MICROPY_PY_STRUCT
+Q(struct)
+Q(ustruct)
+Q(pack)
+Q(pack_into)
+Q(unpack)
+Q(unpack_from)
+Q(calcsize)
+#endif
+
+#if MICROPY_PY_UCTYPES
+Q(uctypes)
+Q(struct)
+Q(sizeof)
+Q(addressof)
+Q(bytes_at)
+Q(bytearray_at)
+
+Q(NATIVE)
+Q(LITTLE_ENDIAN)
+Q(BIG_ENDIAN)
+
+Q(VOID)
+
+Q(UINT8)
+Q(INT8)
+Q(UINT16)
+Q(INT16)
+Q(UINT32)
+Q(INT32)
+Q(UINT64)
+Q(INT64)
+
+Q(BFUINT8)
+Q(BFINT8)
+Q(BFUINT16)
+Q(BFINT16)
+Q(BFUINT32)
+Q(BFINT32)
+
+Q(FLOAT32)
+Q(FLOAT64)
+
+Q(ARRAY)
+Q(PTR)
+//Q(BITFIELD)
+
+Q(BF_POS)
+Q(BF_LEN)
+#endif
+
+#if MICROPY_PY_IO
+Q(_io)
+Q(readall)
+Q(readinto)
+Q(readline)
+Q(readlines)
+Q(seek)
+Q(tell)
+Q(FileIO)
+Q(TextIOWrapper)
+Q(StringIO)
+Q(BytesIO)
+Q(getvalue)
+Q(file)
+Q(mode)
+Q(r)
+Q(encoding)
+#if MICROPY_PY_IO_BUFFEREDWRITER
+Q(BufferedWriter)
+#endif
+#endif
+
+#if MICROPY_PY_GC
+Q(gc)
+Q(collect)
+Q(disable)
+Q(enable)
+Q(isenabled)
+Q(mem_free)
+Q(mem_alloc)
+#endif
+
+#if MICROPY_PY_BUILTINS_PROPERTY
+Q(property)
+Q(getter)
+Q(setter)
+Q(deleter)
+Q(doc)
+#endif
+
+#if MICROPY_PY_UZLIB
+Q(uzlib)
+Q(decompress)
+#endif
+
+#if MICROPY_PY_UJSON
+Q(ujson)
+Q(dumps)
+Q(loads)
+#endif
+
+#if MICROPY_PY_URE
+Q(ure)
+Q(compile)
+Q(match)
+Q(search)
+Q(group)
+Q(DEBUG)
+#endif
+
+#if MICROPY_PY_UHEAPQ
+Q(uheapq)
+Q(heappush)
+Q(heappop)
+Q(heapify)
+#endif
+
+#if MICROPY_PY_UHASHLIB
+Q(uhashlib)
+Q(update)
+Q(digest)
+Q(sha256)
+Q(sha1)
+#endif
+
+#if MICROPY_PY_UBINASCII
+Q(ubinascii)
+Q(hexlify)
+Q(unhexlify)
+Q(a2b_base64)
+Q(b2a_base64)
+#endif
+
+#if MICROPY_PY_MACHINE
+Q(umachine)
+Q(mem)
+Q(mem8)
+Q(mem16)
+Q(mem32)
+#endif
+
+#if MICROPY_PY_USSL
+Q(ussl)
+Q(wrap_socket)
+#endif
+
+#if MICROPY_PY_LWIP
+// for lwip module
+Q(lwip)
+Q(reset)
+Q(callback)
+Q(socket)
+Q(AF_INET)
+Q(AF_INET6)
+Q(SOCK_STREAM)
+Q(SOCK_DGRAM)
+Q(SOCK_RAW)
+Q(SOL_SOCKET)
+Q(SO_REUSEADDR)
+// for lwip.socket
+Q(close)
+Q(bind)
+Q(listen)
+Q(accept)
+Q(connect)
+Q(send)
+Q(recv)
+Q(sendto)
+Q(recvfrom)
+Q(settimeout)
+Q(setsockopt)
+Q(makefile)
+#if MICROPY_PY_LWIP_SLIP
+// for lwip.slip
+Q(slip)
+Q(status)
+#endif
+#endif
+
+#if MICROPY_FSUSERMOUNT
+// for user-mountable block devices
+Q(mount)
+Q(umount)
+Q(readonly)
+Q(mkfs)
+Q(listdir)
+Q(mkdir)
+Q(remove)
+Q(rename)
+Q(readblocks)
+Q(writeblocks)
+Q(ioctl)
+Q(sync)
+Q(count)
+#endif
+
+#if MICROPY_PY_OS_DUPTERM
+Q(dupterm)
+#endif
+
+#if MICROPY_PY_URANDOM
+Q(urandom)
+Q(getrandbits)
+Q(seed)
+#if MICROPY_PY_URANDOM_EXTRA_FUNCS
+Q(randrange)
+Q(randint)
+Q(choice)
+Q(random)
+Q(uniform)
+#endif
+#endif
+
+#if MICROPY_VFS_FAT
+Q(VfsFat)
+Q(flush)
+#endif
+
+#if MICROPY_PY_WEBSOCKET
+Q(websocket)
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/repl.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,264 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include "py/obj.h"
+#include "py/runtime.h"
+#include "py/repl.h"
+
+#if MICROPY_HELPER_REPL
+
+STATIC bool str_startswith_word(const char *str, const char *head) {
+ mp_uint_t i;
+ for (i = 0; str[i] && head[i]; i++) {
+ if (str[i] != head[i]) {
+ return false;
+ }
+ }
+ return head[i] == '\0' && (str[i] == '\0' || !unichar_isident(str[i]));
+}
+
+bool mp_repl_continue_with_input(const char *input) {
+ // check for blank input
+ if (input[0] == '\0') {
+ return false;
+ }
+
+ // check if input starts with a certain keyword
+ bool starts_with_compound_keyword =
+ input[0] == '@'
+ || str_startswith_word(input, "if")
+ || str_startswith_word(input, "while")
+ || str_startswith_word(input, "for")
+ || str_startswith_word(input, "try")
+ || str_startswith_word(input, "with")
+ || str_startswith_word(input, "def")
+ || str_startswith_word(input, "class")
+ ;
+
+ // check for unmatched open bracket, quote or escape quote
+ #define Q_NONE (0)
+ #define Q_1_SINGLE (1)
+ #define Q_1_DOUBLE (2)
+ #define Q_3_SINGLE (3)
+ #define Q_3_DOUBLE (4)
+ int n_paren = 0;
+ int n_brack = 0;
+ int n_brace = 0;
+ int in_quote = Q_NONE;
+ const char *i;
+ for (i = input; *i; i++) {
+ if (*i == '\'') {
+ if ((in_quote == Q_NONE || in_quote == Q_3_SINGLE) && i[1] == '\'' && i[2] == '\'') {
+ i += 2;
+ in_quote = Q_3_SINGLE - in_quote;
+ } else if (in_quote == Q_NONE || in_quote == Q_1_SINGLE) {
+ in_quote = Q_1_SINGLE - in_quote;
+ }
+ } else if (*i == '"') {
+ if ((in_quote == Q_NONE || in_quote == Q_3_DOUBLE) && i[1] == '"' && i[2] == '"') {
+ i += 2;
+ in_quote = Q_3_DOUBLE - in_quote;
+ } else if (in_quote == Q_NONE || in_quote == Q_1_DOUBLE) {
+ in_quote = Q_1_DOUBLE - in_quote;
+ }
+ } else if (*i == '\\' && (i[1] == '\'' || i[1] == '"')) {
+ if (in_quote != Q_NONE) {
+ i++;
+ }
+ } else if (in_quote == Q_NONE) {
+ switch (*i) {
+ case '(': n_paren += 1; break;
+ case ')': n_paren -= 1; break;
+ case '[': n_brack += 1; break;
+ case ']': n_brack -= 1; break;
+ case '{': n_brace += 1; break;
+ case '}': n_brace -= 1; break;
+ default: break;
+ }
+ }
+ }
+
+ // continue if unmatched brackets or quotes
+ if (n_paren > 0 || n_brack > 0 || n_brace > 0 || in_quote == Q_3_SINGLE || in_quote == Q_3_DOUBLE) {
+ return true;
+ }
+
+ // continue if last character was backslash (for line continuation)
+ if (i[-1] == '\\') {
+ return true;
+ }
+
+ // continue if compound keyword and last line was not empty
+ if (starts_with_compound_keyword && i[-1] != '\n') {
+ return true;
+ }
+
+ // otherwise, don't continue
+ return false;
+}
+
+mp_uint_t mp_repl_autocomplete(const char *str, mp_uint_t len, const mp_print_t *print, const char **compl_str) {
+ // scan backwards to find start of "a.b.c" chain
+ const char *top = str + len;
+ for (const char *s = top; --s >= str;) {
+ if (!(unichar_isalpha(*s) || unichar_isdigit(*s) || *s == '_' || *s == '.')) {
+ ++s;
+ str = s;
+ break;
+ }
+ }
+
+ // begin search in locals dict
+ mp_obj_dict_t *dict = mp_locals_get();
+
+ for (;;) {
+ // get next word in string to complete
+ const char *s_start = str;
+ while (str < top && *str != '.') {
+ ++str;
+ }
+ mp_uint_t s_len = str - s_start;
+
+ if (str < top) {
+ // a complete word, lookup in current dict
+
+ mp_obj_t obj = MP_OBJ_NULL;
+ for (mp_uint_t i = 0; i < dict->map.alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) {
+ mp_uint_t d_len;
+ const char *d_str = mp_obj_str_get_data(dict->map.table[i].key, &d_len);
+ if (s_len == d_len && strncmp(s_start, d_str, d_len) == 0) {
+ obj = dict->map.table[i].value;
+ break;
+ }
+ }
+ }
+
+ if (obj == MP_OBJ_NULL) {
+ // lookup failed
+ return 0;
+ }
+
+ // found an object of this name; try to get its dict
+ if (MP_OBJ_IS_TYPE(obj, &mp_type_module)) {
+ dict = mp_obj_module_get_globals(obj);
+ } else {
+ mp_obj_type_t *type;
+ if (MP_OBJ_IS_TYPE(obj, &mp_type_type)) {
+ type = MP_OBJ_TO_PTR(obj);
+ } else {
+ type = mp_obj_get_type(obj);
+ }
+ if (type->locals_dict != NULL && type->locals_dict->base.type == &mp_type_dict) {
+ dict = type->locals_dict;
+ } else {
+ // obj has no dict
+ return 0;
+ }
+ }
+
+ // skip '.' to move to next word
+ ++str;
+
+ } else {
+ // end of string, do completion on this partial name
+
+ // look for matches
+ int n_found = 0;
+ const char *match_str = NULL;
+ mp_uint_t match_len = 0;
+ for (mp_uint_t i = 0; i < dict->map.alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) {
+ mp_uint_t d_len;
+ const char *d_str = mp_obj_str_get_data(dict->map.table[i].key, &d_len);
+ if (s_len <= d_len && strncmp(s_start, d_str, s_len) == 0) {
+ if (match_str == NULL) {
+ match_str = d_str;
+ match_len = d_len;
+ } else {
+ // search for longest common prefix of match_str and d_str
+ // (assumes these strings are null-terminated)
+ for (mp_uint_t j = s_len; j <= match_len && j <= d_len; ++j) {
+ if (match_str[j] != d_str[j]) {
+ match_len = j;
+ break;
+ }
+ }
+ }
+ ++n_found;
+ }
+ }
+ }
+
+ // nothing found
+ if (n_found == 0) {
+ return 0;
+ }
+
+ // 1 match found, or multiple matches with a common prefix
+ if (n_found == 1 || match_len > s_len) {
+ *compl_str = match_str + s_len;
+ return match_len - s_len;
+ }
+
+ // multiple matches found, print them out
+
+ #define WORD_SLOT_LEN (16)
+ #define MAX_LINE_LEN (4 * WORD_SLOT_LEN)
+
+ int line_len = MAX_LINE_LEN; // force a newline for first word
+ for (mp_uint_t i = 0; i < dict->map.alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) {
+ mp_uint_t d_len;
+ const char *d_str = mp_obj_str_get_data(dict->map.table[i].key, &d_len);
+ if (s_len <= d_len && strncmp(s_start, d_str, s_len) == 0) {
+ int gap = (line_len + WORD_SLOT_LEN - 1) / WORD_SLOT_LEN * WORD_SLOT_LEN - line_len;
+ if (gap < 2) {
+ gap += WORD_SLOT_LEN;
+ }
+ if (line_len + gap + d_len <= MAX_LINE_LEN) {
+ // TODO optimise printing of gap?
+ for (int j = 0; j < gap; ++j) {
+ mp_print_str(print, " ");
+ }
+ mp_print_str(print, d_str);
+ line_len += gap + d_len;
+ } else {
+ mp_printf(print, "\n%s", d_str);
+ line_len = d_len;
+ }
+ }
+ }
+ }
+ mp_print_str(print, "\n");
+
+ return (mp_uint_t)(-1); // indicate many matches
+ }
+ }
+}
+
+#endif // MICROPY_HELPER_REPL
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/repl.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,38 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_REPL_H__ +#define __MICROPY_INCLUDED_PY_REPL_H__ + +#include "py/mpconfig.h" +#include "py/misc.h" +#include "py/mpprint.h" + +#if MICROPY_HELPER_REPL +bool mp_repl_continue_with_input(const char *input); +mp_uint_t mp_repl_autocomplete(const char *str, mp_uint_t len, const mp_print_t *print, const char **compl_str); +#endif + +#endif // __MICROPY_INCLUDED_PY_REPL_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/ringbuf.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,72 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_RINGBUF_H__
+#define __MICROPY_INCLUDED_PY_RINGBUF_H__
+
+typedef struct _ringbuf_t {
+ uint8_t *buf;
+ uint16_t size;
+ uint16_t iget;
+ uint16_t iput;
+} ringbuf_t;
+
+// Static initalization:
+// byte buf_array[N];
+// ringbuf_t buf = {buf_array, sizeof(buf_array)};
+
+// Dynamic initialization. This creates root pointer!
+#define ringbuf_alloc(r, sz) \
+{ \
+ (r)->buf = m_new(uint8_t, sz); \
+ (r)->size = sz; \
+ (r)->iget = (r)->iput = 0; \
+}
+
+static inline int ringbuf_get(ringbuf_t *r) {
+ if (r->iget == r->iput) {
+ return -1;
+ }
+ uint8_t v = r->buf[r->iget++];
+ if (r->iget >= r->size) {
+ r->iget = 0;
+ }
+ return v;
+}
+
+static inline int ringbuf_put(ringbuf_t *r, uint8_t v) {
+ uint32_t iput_new = r->iput + 1;
+ if (iput_new >= r->size) {
+ iput_new = 0;
+ }
+ if (iput_new == r->iget) {
+ return -1;
+ }
+ r->buf[r->iput] = v;
+ r->iput = iput_new;
+ return 0;
+}
+
+#endif // __MICROPY_INCLUDED_PY_RINGBUF_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/runtime.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1369 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/nlr.h"
+#include "py/parsenum.h"
+#include "py/compile.h"
+#include "py/objstr.h"
+#include "py/objtuple.h"
+#include "py/objlist.h"
+#include "py/objmodule.h"
+#include "py/objgenerator.h"
+#include "py/smallint.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stackctrl.h"
+#include "py/gc.h"
+
+#if 0 // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#define DEBUG_OP_printf(...) (void)0
+#endif
+
+const mp_obj_module_t mp_module___main__ = {
+ .base = { &mp_type_module },
+ .name = MP_QSTR___main__,
+ .globals = (mp_obj_dict_t*)&MP_STATE_VM(dict_main),
+};
+
+void mp_init(void) {
+ qstr_init();
+
+ // no pending exceptions to start with
+ MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
+
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ mp_init_emergency_exception_buf();
+#endif
+
+ // call port specific initialization if any
+#ifdef MICROPY_PORT_INIT_FUNC
+ MICROPY_PORT_INIT_FUNC;
+#endif
+
+ // optimization disabled by default
+ MP_STATE_VM(mp_optimise_value) = 0;
+
+ // init global module stuff
+ mp_module_init();
+
+ // initialise the __main__ module
+ mp_obj_dict_init(&MP_STATE_VM(dict_main), 1);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(&MP_STATE_VM(dict_main)), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+
+ // locals = globals for outer module (see Objects/frameobject.c/PyFrame_New())
+ MP_STATE_CTX(dict_locals) = MP_STATE_CTX(dict_globals) = &MP_STATE_VM(dict_main);
+
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ // start with no extensions to builtins
+ MP_STATE_VM(mp_module_builtins_override_dict) = NULL;
+ #endif
+}
+
+void mp_deinit(void) {
+ //mp_obj_dict_free(&dict_main);
+ mp_module_deinit();
+
+ // call port specific deinitialization if any
+#ifdef MICROPY_PORT_INIT_FUNC
+ MICROPY_PORT_DEINIT_FUNC;
+#endif
+}
+
+mp_obj_t mp_load_name(qstr qst) {
+ // logic: search locals, globals, builtins
+ DEBUG_OP_printf("load name %s\n", qstr_str(qst));
+ // If we're at the outer scope (locals == globals), dispatch to load_global right away
+ if (MP_STATE_CTX(dict_locals) != MP_STATE_CTX(dict_globals)) {
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_locals)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ return elem->value;
+ }
+ }
+ return mp_load_global(qst);
+}
+
+mp_obj_t mp_load_global(qstr qst) {
+ // logic: search globals, builtins
+ DEBUG_OP_printf("load global %s\n", qstr_str(qst));
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_globals)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
+ // lookup in additional dynamic table of builtins first
+ elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ return elem->value;
+ }
+ }
+ #endif
+ elem = mp_map_lookup((mp_map_t*)&mp_module_builtins_globals.map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_NameError,
+ "name not defined"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_NameError,
+ "name '%q' is not defined", qst));
+ }
+ }
+ }
+ return elem->value;
+}
+
+mp_obj_t mp_load_build_class(void) {
+ DEBUG_OP_printf("load_build_class\n");
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
+ // lookup in additional dynamic table of builtins first
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ return elem->value;
+ }
+ }
+ #endif
+ return MP_OBJ_FROM_PTR(&mp_builtin___build_class___obj);
+}
+
+void mp_store_name(qstr qst, mp_obj_t obj) {
+ DEBUG_OP_printf("store name %s <- %p\n", qstr_str(qst), obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(MP_STATE_CTX(dict_locals)), MP_OBJ_NEW_QSTR(qst), obj);
+}
+
+void mp_delete_name(qstr qst) {
+ DEBUG_OP_printf("delete name %s\n", qstr_str(qst));
+ // TODO convert KeyError to NameError if qst not found
+ mp_obj_dict_delete(MP_OBJ_FROM_PTR(MP_STATE_CTX(dict_locals)), MP_OBJ_NEW_QSTR(qst));
+}
+
+void mp_store_global(qstr qst, mp_obj_t obj) {
+ DEBUG_OP_printf("store global %s <- %p\n", qstr_str(qst), obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(MP_STATE_CTX(dict_globals)), MP_OBJ_NEW_QSTR(qst), obj);
+}
+
+void mp_delete_global(qstr qst) {
+ DEBUG_OP_printf("delete global %s\n", qstr_str(qst));
+ // TODO convert KeyError to NameError if qst not found
+ mp_obj_dict_delete(MP_OBJ_FROM_PTR(MP_STATE_CTX(dict_globals)), MP_OBJ_NEW_QSTR(qst));
+}
+
+mp_obj_t mp_unary_op(mp_uint_t op, mp_obj_t arg) {
+ DEBUG_OP_printf("unary " UINT_FMT " %p\n", op, arg);
+
+ if (op == MP_UNARY_OP_NOT) {
+ // "not x" is the negative of whether "x" is true per Python semantics
+ return mp_obj_new_bool(mp_obj_is_true(arg) == 0);
+ } else if (MP_OBJ_IS_SMALL_INT(arg)) {
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(arg);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(val != 0);
+ case MP_UNARY_OP_HASH:
+ return arg;
+ case MP_UNARY_OP_POSITIVE:
+ return arg;
+ case MP_UNARY_OP_NEGATIVE:
+ // check for overflow
+ if (val == MP_SMALL_INT_MIN) {
+ return mp_obj_new_int(-val);
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(-val);
+ }
+ case MP_UNARY_OP_INVERT:
+ return MP_OBJ_NEW_SMALL_INT(~val);
+ default:
+ assert(0);
+ return arg;
+ }
+ } else if (op == MP_UNARY_OP_HASH && MP_OBJ_IS_STR_OR_BYTES(arg)) {
+ // fast path for hashing str/bytes
+ GET_STR_HASH(arg, h);
+ return MP_OBJ_NEW_SMALL_INT(h);
+ } else {
+ mp_obj_type_t *type = mp_obj_get_type(arg);
+ if (type->unary_op != NULL) {
+ mp_obj_t result = type->unary_op(op, arg);
+ if (result != MP_OBJ_NULL) {
+ return result;
+ }
+ }
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "unsupported type for operator"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "unsupported type for %q: '%s'",
+ mp_unary_op_method_name[op], mp_obj_get_type_str(arg)));
+ }
+ }
+}
+
+mp_obj_t mp_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ DEBUG_OP_printf("binary " UINT_FMT " %p %p\n", op, lhs, rhs);
+
+ // TODO correctly distinguish inplace operators for mutable objects
+ // lookup logic that CPython uses for +=:
+ // check for implemented +=
+ // then check for implemented +
+ // then check for implemented seq.inplace_concat
+ // then check for implemented seq.concat
+ // then fail
+ // note that list does not implement + or +=, so that inplace_concat is reached first for +=
+
+ // deal with is
+ if (op == MP_BINARY_OP_IS) {
+ return mp_obj_new_bool(lhs == rhs);
+ }
+
+ // deal with == and != for all types
+ if (op == MP_BINARY_OP_EQUAL || op == MP_BINARY_OP_NOT_EQUAL) {
+ if (mp_obj_equal(lhs, rhs)) {
+ if (op == MP_BINARY_OP_EQUAL) {
+ return mp_const_true;
+ } else {
+ return mp_const_false;
+ }
+ } else {
+ if (op == MP_BINARY_OP_EQUAL) {
+ return mp_const_false;
+ } else {
+ return mp_const_true;
+ }
+ }
+ }
+
+ // deal with exception_match for all types
+ if (op == MP_BINARY_OP_EXCEPTION_MATCH) {
+ // rhs must be issubclass(rhs, BaseException)
+ if (mp_obj_is_exception_type(rhs)) {
+ if (mp_obj_exception_match(lhs, rhs)) {
+ return mp_const_true;
+ } else {
+ return mp_const_false;
+ }
+ } else if (MP_OBJ_IS_TYPE(rhs, &mp_type_tuple)) {
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(rhs);
+ for (mp_uint_t i = 0; i < tuple->len; i++) {
+ rhs = tuple->items[i];
+ if (!mp_obj_is_exception_type(rhs)) {
+ goto unsupported_op;
+ }
+ if (mp_obj_exception_match(lhs, rhs)) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+ }
+ goto unsupported_op;
+ }
+
+ if (MP_OBJ_IS_SMALL_INT(lhs)) {
+ mp_int_t lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs);
+ if (MP_OBJ_IS_SMALL_INT(rhs)) {
+ mp_int_t rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs);
+ // This is a binary operation: lhs_val op rhs_val
+ // We need to be careful to handle overflow; see CERT INT32-C
+ // Operations that can overflow:
+ // + result always fits in mp_int_t, then handled by SMALL_INT check
+ // - result always fits in mp_int_t, then handled by SMALL_INT check
+ // * checked explicitly
+ // / if lhs=MIN and rhs=-1; result always fits in mp_int_t, then handled by SMALL_INT check
+ // % if lhs=MIN and rhs=-1; result always fits in mp_int_t, then handled by SMALL_INT check
+ // << checked explicitly
+ switch (op) {
+ case MP_BINARY_OP_OR:
+ case MP_BINARY_OP_INPLACE_OR: lhs_val |= rhs_val; break;
+ case MP_BINARY_OP_XOR:
+ case MP_BINARY_OP_INPLACE_XOR: lhs_val ^= rhs_val; break;
+ case MP_BINARY_OP_AND:
+ case MP_BINARY_OP_INPLACE_AND: lhs_val &= rhs_val; break;
+ case MP_BINARY_OP_LSHIFT:
+ case MP_BINARY_OP_INPLACE_LSHIFT: {
+ if (rhs_val < 0) {
+ // negative shift not allowed
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "negative shift count"));
+ } else if (rhs_val >= (mp_int_t)BITS_PER_WORD || lhs_val > (MP_SMALL_INT_MAX >> rhs_val) || lhs_val < (MP_SMALL_INT_MIN >> rhs_val)) {
+ // left-shift will overflow, so use higher precision integer
+ lhs = mp_obj_new_int_from_ll(lhs_val);
+ goto generic_binary_op;
+ } else {
+ // use standard precision
+ lhs_val <<= rhs_val;
+ }
+ break;
+ }
+ case MP_BINARY_OP_RSHIFT:
+ case MP_BINARY_OP_INPLACE_RSHIFT:
+ if (rhs_val < 0) {
+ // negative shift not allowed
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "negative shift count"));
+ } else {
+ // standard precision is enough for right-shift
+ if (rhs_val >= (mp_int_t)BITS_PER_WORD) {
+ // Shifting to big amounts is underfined behavior
+ // in C and is CPU-dependent; propagate sign bit.
+ rhs_val = BITS_PER_WORD - 1;
+ }
+ lhs_val >>= rhs_val;
+ }
+ break;
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD: lhs_val += rhs_val; break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT: lhs_val -= rhs_val; break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: {
+
+ // If long long type exists and is larger than mp_int_t, then
+ // we can use the following code to perform overflow-checked multiplication.
+ // Otherwise (eg in x64 case) we must use mp_small_int_mul_overflow.
+ #if 0
+ // compute result using long long precision
+ long long res = (long long)lhs_val * (long long)rhs_val;
+ if (res > MP_SMALL_INT_MAX || res < MP_SMALL_INT_MIN) {
+ // result overflowed SMALL_INT, so return higher precision integer
+ return mp_obj_new_int_from_ll(res);
+ } else {
+ // use standard precision
+ lhs_val = (mp_int_t)res;
+ }
+ #endif
+
+ if (mp_small_int_mul_overflow(lhs_val, rhs_val)) {
+ // use higher precision
+ lhs = mp_obj_new_int_from_ll(lhs_val);
+ goto generic_binary_op;
+ } else {
+ // use standard precision
+ return MP_OBJ_NEW_SMALL_INT(lhs_val * rhs_val);
+ }
+ break;
+ }
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ lhs_val = mp_small_int_floor_divide(lhs_val, rhs_val);
+ break;
+
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case MP_BINARY_OP_TRUE_DIVIDE:
+ case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ return mp_obj_new_float((mp_float_t)lhs_val / (mp_float_t)rhs_val);
+ #endif
+
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO: {
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ lhs_val = mp_small_int_modulo(lhs_val, rhs_val);
+ break;
+ }
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER:
+ if (rhs_val < 0) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ lhs = mp_obj_new_float(lhs_val);
+ goto generic_binary_op;
+ #else
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "negative power with no float support"));
+ #endif
+ } else {
+ mp_int_t ans = 1;
+ while (rhs_val > 0) {
+ if (rhs_val & 1) {
+ if (mp_small_int_mul_overflow(ans, lhs_val)) {
+ goto power_overflow;
+ }
+ ans *= lhs_val;
+ }
+ if (rhs_val == 1) {
+ break;
+ }
+ rhs_val /= 2;
+ if (mp_small_int_mul_overflow(lhs_val, lhs_val)) {
+ goto power_overflow;
+ }
+ lhs_val *= lhs_val;
+ }
+ lhs_val = ans;
+ }
+ break;
+
+ power_overflow:
+ // use higher precision
+ lhs = mp_obj_new_int_from_ll(MP_OBJ_SMALL_INT_VALUE(lhs));
+ goto generic_binary_op;
+
+ case MP_BINARY_OP_DIVMOD: {
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ // to reduce stack usage we don't pass a temp array of the 2 items
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(2, NULL));
+ tuple->items[0] = MP_OBJ_NEW_SMALL_INT(mp_small_int_floor_divide(lhs_val, rhs_val));
+ tuple->items[1] = MP_OBJ_NEW_SMALL_INT(mp_small_int_modulo(lhs_val, rhs_val));
+ return MP_OBJ_FROM_PTR(tuple);
+ }
+
+ case MP_BINARY_OP_LESS: return mp_obj_new_bool(lhs_val < rhs_val); break;
+ case MP_BINARY_OP_MORE: return mp_obj_new_bool(lhs_val > rhs_val); break;
+ case MP_BINARY_OP_LESS_EQUAL: return mp_obj_new_bool(lhs_val <= rhs_val); break;
+ case MP_BINARY_OP_MORE_EQUAL: return mp_obj_new_bool(lhs_val >= rhs_val); break;
+
+ default:
+ goto unsupported_op;
+ }
+ // TODO: We just should make mp_obj_new_int() inline and use that
+ if (MP_SMALL_INT_FITS(lhs_val)) {
+ return MP_OBJ_NEW_SMALL_INT(lhs_val);
+ } else {
+ return mp_obj_new_int(lhs_val);
+ }
+#if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(rhs)) {
+ mp_obj_t res = mp_obj_float_binary_op(op, lhs_val, rhs);
+ if (res == MP_OBJ_NULL) {
+ goto unsupported_op;
+ } else {
+ return res;
+ }
+#if MICROPY_PY_BUILTINS_COMPLEX
+ } else if (MP_OBJ_IS_TYPE(rhs, &mp_type_complex)) {
+ mp_obj_t res = mp_obj_complex_binary_op(op, lhs_val, 0, rhs);
+ if (res == MP_OBJ_NULL) {
+ goto unsupported_op;
+ } else {
+ return res;
+ }
+#endif
+#endif
+ }
+ }
+
+ /* deal with `in`
+ *
+ * NOTE `a in b` is `b.__contains__(a)`, hence why the generic dispatch
+ * needs to go below with swapped arguments
+ */
+ if (op == MP_BINARY_OP_IN) {
+ mp_obj_type_t *type = mp_obj_get_type(rhs);
+ if (type->binary_op != NULL) {
+ mp_obj_t res = type->binary_op(op, rhs, lhs);
+ if (res != MP_OBJ_NULL) {
+ return res;
+ }
+ }
+ if (type->getiter != NULL) {
+ /* second attempt, walk the iterator */
+ mp_obj_t iter = mp_getiter(rhs);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_obj_equal(next, lhs)) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+ }
+
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object not iterable"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not iterable", mp_obj_get_type_str(rhs)));
+ }
+ }
+
+ // generic binary_op supplied by type
+ mp_obj_type_t *type;
+generic_binary_op:
+ type = mp_obj_get_type(lhs);
+ if (type->binary_op != NULL) {
+ mp_obj_t result = type->binary_op(op, lhs, rhs);
+ if (result != MP_OBJ_NULL) {
+ return result;
+ }
+ }
+
+ // TODO implement dispatch for reverse binary ops
+
+unsupported_op:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "unsupported type for operator"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "unsupported types for %q: '%s', '%s'",
+ mp_binary_op_method_name[op], mp_obj_get_type_str(lhs), mp_obj_get_type_str(rhs)));
+ }
+
+zero_division:
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ZeroDivisionError, "division by zero"));
+}
+
+mp_obj_t mp_call_function_0(mp_obj_t fun) {
+ return mp_call_function_n_kw(fun, 0, 0, NULL);
+}
+
+mp_obj_t mp_call_function_1(mp_obj_t fun, mp_obj_t arg) {
+ return mp_call_function_n_kw(fun, 1, 0, &arg);
+}
+
+mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2) {
+ mp_obj_t args[2];
+ args[0] = arg1;
+ args[1] = arg2;
+ return mp_call_function_n_kw(fun, 2, 0, args);
+}
+
+// args contains, eg: arg0 arg1 key0 value0 key1 value1
+mp_obj_t mp_call_function_n_kw(mp_obj_t fun_in, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args) {
+ // TODO improve this: fun object can specify its type and we parse here the arguments,
+ // passing to the function arrays of fixed and keyword arguments
+
+ DEBUG_OP_printf("calling function %p(n_args=" UINT_FMT ", n_kw=" UINT_FMT ", args=%p)\n", fun_in, n_args, n_kw, args);
+
+ // get the type
+ mp_obj_type_t *type = mp_obj_get_type(fun_in);
+
+ // do the call
+ if (type->call != NULL) {
+ return type->call(fun_in, n_args, n_kw, args);
+ }
+
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object not callable"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not callable", mp_obj_get_type_str(fun_in)));
+ }
+}
+
+// args contains: fun self/NULL arg(0) ... arg(n_args-2) arg(n_args-1) kw_key(0) kw_val(0) ... kw_key(n_kw-1) kw_val(n_kw-1)
+// if n_args==0 and n_kw==0 then there are only fun and self/NULL
+mp_obj_t mp_call_method_n_kw(mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args) {
+ DEBUG_OP_printf("call method (fun=%p, self=%p, n_args=" UINT_FMT ", n_kw=" UINT_FMT ", args=%p)\n", args[0], args[1], n_args, n_kw, args);
+ int adjust = (args[1] == MP_OBJ_NULL) ? 0 : 1;
+ return mp_call_function_n_kw(args[0], n_args + adjust, n_kw, args + 2 - adjust);
+}
+
+// This function only needs to be exposed externally when in stackless mode.
+#if !MICROPY_STACKLESS
+STATIC
+#endif
+void mp_call_prepare_args_n_kw_var(bool have_self, mp_uint_t n_args_n_kw, const mp_obj_t *args, mp_call_args_t *out_args) {
+ mp_obj_t fun = *args++;
+ mp_obj_t self = MP_OBJ_NULL;
+ if (have_self) {
+ self = *args++; // may be MP_OBJ_NULL
+ }
+ uint n_args = n_args_n_kw & 0xff;
+ uint n_kw = (n_args_n_kw >> 8) & 0xff;
+ mp_obj_t pos_seq = args[n_args + 2 * n_kw]; // may be MP_OBJ_NULL
+ mp_obj_t kw_dict = args[n_args + 2 * n_kw + 1]; // may be MP_OBJ_NULL
+
+ DEBUG_OP_printf("call method var (fun=%p, self=%p, n_args=%u, n_kw=%u, args=%p, seq=%p, dict=%p)\n", fun, self, n_args, n_kw, args, pos_seq, kw_dict);
+
+ // We need to create the following array of objects:
+ // args[0 .. n_args] unpacked(pos_seq) args[n_args .. n_args + 2 * n_kw] unpacked(kw_dict)
+ // TODO: optimize one day to avoid constructing new arg array? Will be hard.
+
+ // The new args array
+ mp_obj_t *args2;
+ uint args2_alloc;
+ uint args2_len = 0;
+
+ // Try to get a hint for the size of the kw_dict
+ uint kw_dict_len = 0;
+ if (kw_dict != MP_OBJ_NULL && MP_OBJ_IS_TYPE(kw_dict, &mp_type_dict)) {
+ kw_dict_len = mp_obj_dict_len(kw_dict);
+ }
+
+ // Extract the pos_seq sequence to the new args array.
+ // Note that it can be arbitrary iterator.
+ if (pos_seq == MP_OBJ_NULL) {
+ // no sequence
+
+ // allocate memory for the new array of args
+ args2_alloc = 1 + n_args + 2 * (n_kw + kw_dict_len);
+ args2 = m_new(mp_obj_t, args2_alloc);
+
+ // copy the self
+ if (self != MP_OBJ_NULL) {
+ args2[args2_len++] = self;
+ }
+
+ // copy the fixed pos args
+ mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t);
+ args2_len += n_args;
+
+ } else if (MP_OBJ_IS_TYPE(pos_seq, &mp_type_tuple) || MP_OBJ_IS_TYPE(pos_seq, &mp_type_list)) {
+ // optimise the case of a tuple and list
+
+ // get the items
+ mp_uint_t len;
+ mp_obj_t *items;
+ mp_obj_get_array(pos_seq, &len, &items);
+
+ // allocate memory for the new array of args
+ args2_alloc = 1 + n_args + len + 2 * (n_kw + kw_dict_len);
+ args2 = m_new(mp_obj_t, args2_alloc);
+
+ // copy the self
+ if (self != MP_OBJ_NULL) {
+ args2[args2_len++] = self;
+ }
+
+ // copy the fixed and variable position args
+ mp_seq_cat(args2 + args2_len, args, n_args, items, len, mp_obj_t);
+ args2_len += n_args + len;
+
+ } else {
+ // generic iterator
+
+ // allocate memory for the new array of args
+ args2_alloc = 1 + n_args + 2 * (n_kw + kw_dict_len) + 3;
+ args2 = m_new(mp_obj_t, args2_alloc);
+
+ // copy the self
+ if (self != MP_OBJ_NULL) {
+ args2[args2_len++] = self;
+ }
+
+ // copy the fixed position args
+ mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t);
+ args2_len += n_args;
+
+ // extract the variable position args from the iterator
+ mp_obj_t iterable = mp_getiter(pos_seq);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (args2_len >= args2_alloc) {
+ args2 = m_renew(mp_obj_t, args2, args2_alloc, args2_alloc * 2);
+ args2_alloc *= 2;
+ }
+ args2[args2_len++] = item;
+ }
+ }
+
+ // The size of the args2 array now is the number of positional args.
+ uint pos_args_len = args2_len;
+
+ // Copy the fixed kw args.
+ mp_seq_copy(args2 + args2_len, args + n_args, 2 * n_kw, mp_obj_t);
+ args2_len += 2 * n_kw;
+
+ // Extract (key,value) pairs from kw_dict dictionary and append to args2.
+ // Note that it can be arbitrary iterator.
+ if (kw_dict == MP_OBJ_NULL) {
+ // pass
+ } else if (MP_OBJ_IS_TYPE(kw_dict, &mp_type_dict)) {
+ // dictionary
+ mp_map_t *map = mp_obj_dict_get_map(kw_dict);
+ assert(args2_len + 2 * map->used <= args2_alloc); // should have enough, since kw_dict_len is in this case hinted correctly above
+ for (mp_uint_t i = 0; i < map->alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(map, i)) {
+ args2[args2_len++] = map->table[i].key;
+ args2[args2_len++] = map->table[i].value;
+ }
+ }
+ } else {
+ // generic mapping
+ // TODO is calling 'items' on the mapping the correct thing to do here?
+ mp_obj_t dest[2];
+ mp_load_method(kw_dict, MP_QSTR_items, dest);
+ mp_obj_t iterable = mp_getiter(mp_call_method_n_kw(0, 0, dest));
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (args2_len + 1 >= args2_alloc) {
+ uint new_alloc = args2_alloc * 2;
+ if (new_alloc < 4) {
+ new_alloc = 4;
+ }
+ args2 = m_renew(mp_obj_t, args2, args2_alloc, new_alloc);
+ args2_alloc = new_alloc;
+ }
+ mp_obj_t *items;
+ mp_obj_get_array_fixed_n(item, 2, &items);
+ args2[args2_len++] = items[0];
+ args2[args2_len++] = items[1];
+ }
+ }
+
+ out_args->fun = fun;
+ out_args->args = args2;
+ out_args->n_args = pos_args_len;
+ out_args->n_kw = (args2_len - pos_args_len) / 2;
+ out_args->n_alloc = args2_alloc;
+}
+
+mp_obj_t mp_call_method_n_kw_var(bool have_self, mp_uint_t n_args_n_kw, const mp_obj_t *args) {
+ mp_call_args_t out_args;
+ mp_call_prepare_args_n_kw_var(have_self, n_args_n_kw, args, &out_args);
+
+ mp_obj_t res = mp_call_function_n_kw(out_args.fun, out_args.n_args, out_args.n_kw, out_args.args);
+ m_del(mp_obj_t, out_args.args, out_args.n_alloc);
+
+ return res;
+}
+
+// unpacked items are stored in reverse order into the array pointed to by items
+void mp_unpack_sequence(mp_obj_t seq_in, mp_uint_t num, mp_obj_t *items) {
+ mp_uint_t seq_len;
+ if (MP_OBJ_IS_TYPE(seq_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(seq_in, &mp_type_list)) {
+ mp_obj_t *seq_items;
+ if (MP_OBJ_IS_TYPE(seq_in, &mp_type_tuple)) {
+ mp_obj_tuple_get(seq_in, &seq_len, &seq_items);
+ } else {
+ mp_obj_list_get(seq_in, &seq_len, &seq_items);
+ }
+ if (seq_len < num) {
+ goto too_short;
+ } else if (seq_len > num) {
+ goto too_long;
+ }
+ for (mp_uint_t i = 0; i < num; i++) {
+ items[i] = seq_items[num - 1 - i];
+ }
+ } else {
+ mp_obj_t iterable = mp_getiter(seq_in);
+
+ for (seq_len = 0; seq_len < num; seq_len++) {
+ mp_obj_t el = mp_iternext(iterable);
+ if (el == MP_OBJ_STOP_ITERATION) {
+ goto too_short;
+ }
+ items[num - 1 - seq_len] = el;
+ }
+ if (mp_iternext(iterable) != MP_OBJ_STOP_ITERATION) {
+ goto too_long;
+ }
+ }
+ return;
+
+too_short:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "wrong number of values to unpack"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "need more than %d values to unpack", (int)seq_len));
+ }
+too_long:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "wrong number of values to unpack"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "too many values to unpack (expected %d)", (int)num));
+ }
+}
+
+// unpacked items are stored in reverse order into the array pointed to by items
+void mp_unpack_ex(mp_obj_t seq_in, mp_uint_t num_in, mp_obj_t *items) {
+ mp_uint_t num_left = num_in & 0xff;
+ mp_uint_t num_right = (num_in >> 8) & 0xff;
+ DEBUG_OP_printf("unpack ex " UINT_FMT " " UINT_FMT "\n", num_left, num_right);
+ mp_uint_t seq_len;
+ if (MP_OBJ_IS_TYPE(seq_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(seq_in, &mp_type_list)) {
+ mp_obj_t *seq_items;
+ if (MP_OBJ_IS_TYPE(seq_in, &mp_type_tuple)) {
+ mp_obj_tuple_get(seq_in, &seq_len, &seq_items);
+ } else {
+ if (num_left == 0 && num_right == 0) {
+ // *a, = b # sets a to b if b is a list
+ items[0] = seq_in;
+ return;
+ }
+ mp_obj_list_get(seq_in, &seq_len, &seq_items);
+ }
+ if (seq_len < num_left + num_right) {
+ goto too_short;
+ }
+ for (mp_uint_t i = 0; i < num_right; i++) {
+ items[i] = seq_items[seq_len - 1 - i];
+ }
+ items[num_right] = mp_obj_new_list(seq_len - num_left - num_right, seq_items + num_left);
+ for (mp_uint_t i = 0; i < num_left; i++) {
+ items[num_right + 1 + i] = seq_items[num_left - 1 - i];
+ }
+ } else {
+ // Generic iterable; this gets a bit messy: we unpack known left length to the
+ // items destination array, then the rest to a dynamically created list. Once the
+ // iterable is exhausted, we take from this list for the right part of the items.
+ // TODO Improve to waste less memory in the dynamically created list.
+ mp_obj_t iterable = mp_getiter(seq_in);
+ mp_obj_t item;
+ for (seq_len = 0; seq_len < num_left; seq_len++) {
+ item = mp_iternext(iterable);
+ if (item == MP_OBJ_STOP_ITERATION) {
+ goto too_short;
+ }
+ items[num_left + num_right + 1 - 1 - seq_len] = item;
+ }
+ mp_obj_list_t *rest = MP_OBJ_TO_PTR(mp_obj_new_list(0, NULL));
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_list_append(MP_OBJ_FROM_PTR(rest), item);
+ }
+ if (rest->len < num_right) {
+ goto too_short;
+ }
+ items[num_right] = MP_OBJ_FROM_PTR(rest);
+ for (mp_uint_t i = 0; i < num_right; i++) {
+ items[num_right - 1 - i] = rest->items[rest->len - num_right + i];
+ }
+ mp_obj_list_set_len(MP_OBJ_FROM_PTR(rest), rest->len - num_right);
+ }
+ return;
+
+too_short:
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "wrong number of values to unpack"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ "need more than %d values to unpack", (int)seq_len));
+ }
+}
+
+mp_obj_t mp_load_attr(mp_obj_t base, qstr attr) {
+ DEBUG_OP_printf("load attr %p.%s\n", base, qstr_str(attr));
+ // use load_method
+ mp_obj_t dest[2];
+ mp_load_method(base, attr, dest);
+ if (dest[1] == MP_OBJ_NULL) {
+ // load_method returned just a normal attribute
+ return dest[0];
+ } else {
+ // load_method returned a method, so build a bound method object
+ return mp_obj_new_bound_meth(dest[0], dest[1]);
+ }
+}
+
+#if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+
+// The following "checked fun" type is local to the mp_convert_member_lookup
+// function, and serves to check that the first argument to a builtin function
+// has the correct type.
+
+typedef struct _mp_obj_checked_fun_t {
+ mp_obj_base_t base;
+ const mp_obj_type_t *type;
+ mp_obj_t fun;
+} mp_obj_checked_fun_t;
+
+STATIC mp_obj_t checked_fun_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_checked_fun_t *self = MP_OBJ_TO_PTR(self_in);
+ if (n_args > 0) {
+ const mp_obj_type_t *arg0_type = mp_obj_get_type(args[0]);
+ if (arg0_type != self->type) {
+ if (MICROPY_ERROR_REPORTING != MICROPY_ERROR_REPORTING_DETAILED) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "argument has wrong type"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "argument should be a '%q' not a '%q'", self->type->name, arg0_type->name));
+ }
+ }
+ }
+ return mp_call_function_n_kw(self->fun, n_args, n_kw, args);
+}
+
+STATIC const mp_obj_type_t mp_type_checked_fun = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+ .call = checked_fun_call,
+};
+
+STATIC mp_obj_t mp_obj_new_checked_fun(const mp_obj_type_t *type, mp_obj_t fun) {
+ mp_obj_checked_fun_t *o = m_new_obj(mp_obj_checked_fun_t);
+ o->base.type = &mp_type_checked_fun;
+ o->type = type;
+ o->fun = fun;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+
+// Given a member that was extracted from an instance, convert it correctly
+// and put the result in the dest[] array for a possible method call.
+// Conversion means dealing with static/class methods, callables, and values.
+// see http://docs.python.org/3/howto/descriptor.html
+void mp_convert_member_lookup(mp_obj_t self, const mp_obj_type_t *type, mp_obj_t member, mp_obj_t *dest) {
+ if (MP_OBJ_IS_TYPE(member, &mp_type_staticmethod)) {
+ // return just the function
+ dest[0] = ((mp_obj_static_class_method_t*)MP_OBJ_TO_PTR(member))->fun;
+ } else if (MP_OBJ_IS_TYPE(member, &mp_type_classmethod)) {
+ // return a bound method, with self being the type of this object
+ // this type should be the type of the original instance, not the base
+ // type (which is what is passed in the 'type' argument to this function)
+ if (self != MP_OBJ_NULL) {
+ type = mp_obj_get_type(self);
+ }
+ dest[0] = ((mp_obj_static_class_method_t*)MP_OBJ_TO_PTR(member))->fun;
+ dest[1] = MP_OBJ_FROM_PTR(type);
+ } else if (MP_OBJ_IS_TYPE(member, &mp_type_type)) {
+ // Don't try to bind types (even though they're callable)
+ dest[0] = member;
+ } else if (MP_OBJ_IS_FUN(member)
+ || (MP_OBJ_IS_OBJ(member)
+ && (((mp_obj_base_t*)MP_OBJ_TO_PTR(member))->type->name == MP_QSTR_closure
+ || ((mp_obj_base_t*)MP_OBJ_TO_PTR(member))->type->name == MP_QSTR_generator))) {
+ // only functions, closures and generators objects can be bound to self
+ #if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+ if (self == MP_OBJ_NULL && mp_obj_get_type(member) == &mp_type_fun_builtin) {
+ // we extracted a builtin method without a first argument, so we must
+ // wrap this function in a type checker
+ dest[0] = mp_obj_new_checked_fun(type, member);
+ } else
+ #endif
+ {
+ // return a bound method, with self being this object
+ dest[0] = member;
+ dest[1] = self;
+ }
+ } else {
+ // class member is a value, so just return that value
+ dest[0] = member;
+ }
+}
+
+// no attribute found, returns: dest[0] == MP_OBJ_NULL, dest[1] == MP_OBJ_NULL
+// normal attribute found, returns: dest[0] == <attribute>, dest[1] == MP_OBJ_NULL
+// method attribute found, returns: dest[0] == <method>, dest[1] == <self>
+void mp_load_method_maybe(mp_obj_t obj, qstr attr, mp_obj_t *dest) {
+ // clear output to indicate no attribute/method found yet
+ dest[0] = MP_OBJ_NULL;
+ dest[1] = MP_OBJ_NULL;
+
+ // get the type
+ mp_obj_type_t *type = mp_obj_get_type(obj);
+
+ // look for built-in names
+ if (0) {
+#if MICROPY_CPYTHON_COMPAT
+ } else if (attr == MP_QSTR___class__) {
+ // a.__class__ is equivalent to type(a)
+ dest[0] = MP_OBJ_FROM_PTR(type);
+#endif
+
+ } else if (attr == MP_QSTR___next__ && type->iternext != NULL) {
+ dest[0] = MP_OBJ_FROM_PTR(&mp_builtin_next_obj);
+ dest[1] = obj;
+
+ } else if (type->attr != NULL) {
+ // this type can do its own load, so call it
+ type->attr(obj, attr, dest);
+
+ } else if (type->locals_dict != NULL) {
+ // generic method lookup
+ // this is a lookup in the object (ie not class or type)
+ assert(type->locals_dict->base.type == &mp_type_dict); // Micro Python restriction, for now
+ mp_map_t *locals_map = &type->locals_dict->map;
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ mp_convert_member_lookup(obj, type, elem->value, dest);
+ }
+ }
+}
+
+void mp_load_method(mp_obj_t base, qstr attr, mp_obj_t *dest) {
+ DEBUG_OP_printf("load method %p.%s\n", base, qstr_str(attr));
+
+ mp_load_method_maybe(base, attr, dest);
+
+ if (dest[0] == MP_OBJ_NULL) {
+ // no attribute/method called attr
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_AttributeError,
+ "no such attribute"));
+ } else {
+ // following CPython, we give a more detailed error message for type objects
+ if (MP_OBJ_IS_TYPE(base, &mp_type_type)) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_AttributeError,
+ "type object '%q' has no attribute '%q'",
+ ((mp_obj_type_t*)MP_OBJ_TO_PTR(base))->name, attr));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_AttributeError,
+ "'%s' object has no attribute '%q'",
+ mp_obj_get_type_str(base), attr));
+ }
+ }
+ }
+}
+
+void mp_store_attr(mp_obj_t base, qstr attr, mp_obj_t value) {
+ DEBUG_OP_printf("store attr %p.%s <- %p\n", base, qstr_str(attr), value);
+ mp_obj_type_t *type = mp_obj_get_type(base);
+ if (type->attr != NULL) {
+ mp_obj_t dest[2] = {MP_OBJ_SENTINEL, value};
+ type->attr(base, attr, dest);
+ if (dest[0] == MP_OBJ_NULL) {
+ // success
+ return;
+ }
+ }
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_AttributeError,
+ "no such attribute"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_AttributeError,
+ "'%s' object has no attribute '%q'",
+ mp_obj_get_type_str(base), attr));
+ }
+}
+
+mp_obj_t mp_getiter(mp_obj_t o_in) {
+ assert(o_in);
+
+ // check for native getiter (corresponds to __iter__)
+ mp_obj_type_t *type = mp_obj_get_type(o_in);
+ if (type->getiter != NULL) {
+ mp_obj_t iter = type->getiter(o_in);
+ if (iter != MP_OBJ_NULL) {
+ return iter;
+ }
+ }
+
+ // check for __getitem__
+ mp_obj_t dest[2];
+ mp_load_method_maybe(o_in, MP_QSTR___getitem__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // __getitem__ exists, create and return an iterator
+ return mp_obj_new_getitem_iter(dest);
+ }
+
+ // object not iterable
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object not iterable"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not iterable", mp_obj_get_type_str(o_in)));
+ }
+}
+
+// may return MP_OBJ_STOP_ITERATION as an optimisation instead of raise StopIteration()
+// may also raise StopIteration()
+mp_obj_t mp_iternext_allow_raise(mp_obj_t o_in) {
+ mp_obj_type_t *type = mp_obj_get_type(o_in);
+ if (type->iternext != NULL) {
+ return type->iternext(o_in);
+ } else {
+ // check for __next__ method
+ mp_obj_t dest[2];
+ mp_load_method_maybe(o_in, MP_QSTR___next__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // __next__ exists, call it and return its result
+ return mp_call_method_n_kw(0, 0, dest);
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object not an iterator"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not an iterator", mp_obj_get_type_str(o_in)));
+ }
+ }
+ }
+}
+
+// will always return MP_OBJ_STOP_ITERATION instead of raising StopIteration() (or any subclass thereof)
+// may raise other exceptions
+mp_obj_t mp_iternext(mp_obj_t o_in) {
+ MP_STACK_CHECK(); // enumerate, filter, map and zip can recursively call mp_iternext
+ mp_obj_type_t *type = mp_obj_get_type(o_in);
+ if (type->iternext != NULL) {
+ return type->iternext(o_in);
+ } else {
+ // check for __next__ method
+ mp_obj_t dest[2];
+ mp_load_method_maybe(o_in, MP_QSTR___next__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // __next__ exists, call it and return its result
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t ret = mp_call_method_n_kw(0, 0, dest);
+ nlr_pop();
+ return ret;
+ } else {
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ nlr_jump(nlr.ret_val);
+ }
+ }
+ } else {
+ if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
+ "object not an iterator"));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
+ "'%s' object is not an iterator", mp_obj_get_type_str(o_in)));
+ }
+ }
+ }
+}
+
+// TODO: Unclear what to do with StopIterarion exception here.
+mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) {
+ assert((send_value != MP_OBJ_NULL) ^ (throw_value != MP_OBJ_NULL));
+ mp_obj_type_t *type = mp_obj_get_type(self_in);
+
+ if (type == &mp_type_gen_instance) {
+ return mp_obj_gen_resume(self_in, send_value, throw_value, ret_val);
+ }
+
+ if (type->iternext != NULL && send_value == mp_const_none) {
+ mp_obj_t ret = type->iternext(self_in);
+ if (ret != MP_OBJ_STOP_ITERATION) {
+ *ret_val = ret;
+ return MP_VM_RETURN_YIELD;
+ } else {
+ // Emulate raise StopIteration()
+ // Special case, handled in vm.c
+ *ret_val = MP_OBJ_NULL;
+ return MP_VM_RETURN_NORMAL;
+ }
+ }
+
+ mp_obj_t dest[3]; // Reserve slot for send() arg
+
+ if (send_value == mp_const_none) {
+ mp_load_method_maybe(self_in, MP_QSTR___next__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ *ret_val = mp_call_method_n_kw(0, 0, dest);
+ return MP_VM_RETURN_YIELD;
+ }
+ }
+
+ if (send_value != MP_OBJ_NULL) {
+ mp_load_method(self_in, MP_QSTR_send, dest);
+ dest[2] = send_value;
+ *ret_val = mp_call_method_n_kw(1, 0, dest);
+ return MP_VM_RETURN_YIELD;
+ }
+
+ if (throw_value != MP_OBJ_NULL) {
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(throw_value)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
+ mp_load_method_maybe(self_in, MP_QSTR_close, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // TODO: Exceptions raised in close() are not propagated,
+ // printed to sys.stderr
+ *ret_val = mp_call_method_n_kw(0, 0, dest);
+ // We assume one can't "yield" from close()
+ return MP_VM_RETURN_NORMAL;
+ }
+ }
+ mp_load_method_maybe(self_in, MP_QSTR_throw, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ *ret_val = mp_call_method_n_kw(1, 0, &throw_value);
+ // If .throw() method returned, we assume it's value to yield
+ // - any exception would be thrown with nlr_raise().
+ return MP_VM_RETURN_YIELD;
+ }
+ // If there's nowhere to throw exception into, then we assume that object
+ // is just incapable to handle it, so any exception thrown into it
+ // will be propagated up. This behavior is approved by test_pep380.py
+ // test_delegation_of_close_to_non_generator(),
+ // test_delegating_throw_to_non_generator()
+ *ret_val = throw_value;
+ return MP_VM_RETURN_EXCEPTION;
+ }
+
+ assert(0);
+ return MP_VM_RETURN_NORMAL; // Should be unreachable
+}
+
+mp_obj_t mp_make_raise_obj(mp_obj_t o) {
+ DEBUG_printf("raise %p\n", o);
+ if (mp_obj_is_exception_type(o)) {
+ // o is an exception type (it is derived from BaseException (or is BaseException))
+ // create and return a new exception instance by calling o
+ // TODO could have an option to disable traceback, then builtin exceptions (eg TypeError)
+ // could have const instances in ROM which we return here instead
+ return mp_call_function_n_kw(o, 0, 0, NULL);
+ } else if (mp_obj_is_exception_instance(o)) {
+ // o is an instance of an exception, so use it as the exception
+ return o;
+ } else {
+ // o cannot be used as an exception, so return a type error (which will be raised by the caller)
+ return mp_obj_new_exception_msg(&mp_type_TypeError, "exceptions must derive from BaseException");
+ }
+}
+
+mp_obj_t mp_import_name(qstr name, mp_obj_t fromlist, mp_obj_t level) {
+ DEBUG_printf("import name '%s' level=%d\n", qstr_str(name), MP_OBJ_SMALL_INT_VALUE(level));
+
+ // build args array
+ mp_obj_t args[5];
+ args[0] = MP_OBJ_NEW_QSTR(name);
+ args[1] = mp_const_none; // TODO should be globals
+ args[2] = mp_const_none; // TODO should be locals
+ args[3] = fromlist;
+ args[4] = level; // must be 0; we don't yet support other values
+
+ // TODO lookup __import__ and call that instead of going straight to builtin implementation
+ return mp_builtin___import__(5, args);
+}
+
+mp_obj_t mp_import_from(mp_obj_t module, qstr name) {
+ DEBUG_printf("import from %p %s\n", module, qstr_str(name));
+
+ mp_obj_t dest[2];
+
+ mp_load_method_maybe(module, name, dest);
+
+ if (dest[1] != MP_OBJ_NULL) {
+ // Hopefully we can't import bound method from an object
+import_error:
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ImportError, "cannot import name %q", name));
+ }
+
+ if (dest[0] != MP_OBJ_NULL) {
+ return dest[0];
+ }
+
+ // See if it's a package, then can try FS import
+ if (!mp_obj_is_package(module)) {
+ goto import_error;
+ }
+
+ mp_load_method_maybe(module, MP_QSTR___name__, dest);
+ mp_uint_t pkg_name_len;
+ const char *pkg_name = mp_obj_str_get_data(dest[0], &pkg_name_len);
+
+ const uint dot_name_len = pkg_name_len + 1 + qstr_len(name);
+ char *dot_name = alloca(dot_name_len);
+ memcpy(dot_name, pkg_name, pkg_name_len);
+ dot_name[pkg_name_len] = '.';
+ memcpy(dot_name + pkg_name_len + 1, qstr_str(name), qstr_len(name));
+ qstr dot_name_q = qstr_from_strn(dot_name, dot_name_len);
+
+ mp_obj_t args[5];
+ args[0] = MP_OBJ_NEW_QSTR(dot_name_q);
+ args[1] = mp_const_none; // TODO should be globals
+ args[2] = mp_const_none; // TODO should be locals
+ args[3] = mp_const_true; // Pass sentinel "non empty" value to force returning of leaf module
+ args[4] = MP_OBJ_NEW_SMALL_INT(0);
+
+ // TODO lookup __import__ and call that instead of going straight to builtin implementation
+ return mp_builtin___import__(5, args);
+}
+
+void mp_import_all(mp_obj_t module) {
+ DEBUG_printf("import all %p\n", module);
+
+ // TODO: Support __all__
+ mp_map_t *map = mp_obj_dict_get_map(MP_OBJ_FROM_PTR(mp_obj_module_get_globals(module)));
+ for (mp_uint_t i = 0; i < map->alloc; i++) {
+ if (MP_MAP_SLOT_IS_FILLED(map, i)) {
+ qstr name = MP_OBJ_QSTR_VALUE(map->table[i].key);
+ if (*qstr_str(name) != '_') {
+ mp_store_name(name, map->table[i].value);
+ }
+ }
+ }
+}
+
+#if MICROPY_ENABLE_COMPILER
+
+// this is implemented in this file so it can optimise access to locals/globals
+mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
+ // save context
+ mp_obj_dict_t *volatile old_globals = mp_globals_get();
+ mp_obj_dict_t *volatile old_locals = mp_locals_get();
+
+ // set new context
+ mp_globals_set(globals);
+ mp_locals_set(locals);
+
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ qstr source_name = lex->source_name;
+ mp_parse_tree_t parse_tree = mp_parse(lex, parse_input_kind);
+ mp_obj_t module_fun = mp_compile(&parse_tree, source_name, MP_EMIT_OPT_NONE, false);
+
+ mp_obj_t ret;
+ if (MICROPY_PY_BUILTINS_COMPILE && globals == NULL) {
+ // for compile only, return value is the module function
+ ret = module_fun;
+ } else {
+ // execute module function and get return value
+ ret = mp_call_function_0(module_fun);
+ }
+
+ // finish nlr block, restore context and return value
+ nlr_pop();
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ return ret;
+ } else {
+ // exception; restore context and re-raise same exception
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ nlr_jump(nlr.ret_val);
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
+
+void *m_malloc_fail(size_t num_bytes) {
+ DEBUG_printf("memory allocation failed, allocating %u bytes\n", (uint)num_bytes);
+ if (0) {
+ // dummy
+ #if MICROPY_ENABLE_GC
+ } else if (gc_is_locked()) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_MemoryError,
+ "memory allocation failed, heap is locked"));
+ #endif
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_MemoryError,
+ "memory allocation failed, allocating %u bytes", (uint)num_bytes));
+ }
+}
+
+NORETURN void mp_not_implemented(const char *msg) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_NotImplementedError, msg));
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/runtime.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,152 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_RUNTIME_H__
+#define __MICROPY_INCLUDED_PY_RUNTIME_H__
+
+#include "py/mpstate.h"
+#include "py/obj.h"
+
+typedef enum {
+ MP_VM_RETURN_NORMAL,
+ MP_VM_RETURN_YIELD,
+ MP_VM_RETURN_EXCEPTION,
+} mp_vm_return_kind_t;
+
+typedef enum {
+ MP_ARG_BOOL = 0x001,
+ MP_ARG_INT = 0x002,
+ MP_ARG_OBJ = 0x003,
+ MP_ARG_KIND_MASK = 0x0ff,
+ MP_ARG_REQUIRED = 0x100,
+ MP_ARG_KW_ONLY = 0x200,
+} mp_arg_flag_t;
+
+typedef union _mp_arg_val_t {
+ bool u_bool;
+ mp_int_t u_int;
+ mp_obj_t u_obj;
+ mp_rom_obj_t u_rom_obj;
+} mp_arg_val_t;
+
+typedef struct _mp_arg_t {
+ qstr qst;
+ mp_uint_t flags;
+ mp_arg_val_t defval;
+} mp_arg_t;
+
+// defined in objtype.c
+extern const qstr mp_unary_op_method_name[];
+extern const qstr mp_binary_op_method_name[];
+
+void mp_init(void);
+void mp_deinit(void);
+
+// extra printing method specifically for mp_obj_t's which are integral type
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec);
+
+void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw);
+void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals);
+void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals);
+NORETURN void mp_arg_error_terse_mismatch(void);
+NORETURN void mp_arg_error_unimpl_kw(void);
+
+static inline mp_obj_dict_t *mp_locals_get(void) { return MP_STATE_CTX(dict_locals); }
+static inline void mp_locals_set(mp_obj_dict_t *d) { MP_STATE_CTX(dict_locals) = d; }
+static inline mp_obj_dict_t *mp_globals_get(void) { return MP_STATE_CTX(dict_globals); }
+static inline void mp_globals_set(mp_obj_dict_t *d) { MP_STATE_CTX(dict_globals) = d; }
+
+mp_obj_t mp_load_name(qstr qst);
+mp_obj_t mp_load_global(qstr qst);
+mp_obj_t mp_load_build_class(void);
+void mp_store_name(qstr qst, mp_obj_t obj);
+void mp_store_global(qstr qst, mp_obj_t obj);
+void mp_delete_name(qstr qst);
+void mp_delete_global(qstr qst);
+
+mp_obj_t mp_unary_op(mp_uint_t op, mp_obj_t arg);
+mp_obj_t mp_binary_op(mp_uint_t op, mp_obj_t lhs, mp_obj_t rhs);
+
+mp_obj_t mp_call_function_0(mp_obj_t fun);
+mp_obj_t mp_call_function_1(mp_obj_t fun, mp_obj_t arg);
+mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2);
+mp_obj_t mp_call_function_n_kw(mp_obj_t fun, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args);
+mp_obj_t mp_call_method_n_kw(mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args);
+mp_obj_t mp_call_method_n_kw_var(bool have_self, mp_uint_t n_args_n_kw, const mp_obj_t *args);
+
+typedef struct _mp_call_args_t {
+ mp_obj_t fun;
+ mp_uint_t n_args, n_kw, n_alloc;
+ mp_obj_t *args;
+} mp_call_args_t;
+
+#if MICROPY_STACKLESS
+// Takes arguments which are the most general mix of Python arg types, and
+// prepares argument array suitable for passing to ->call() method of a
+// function object (and mp_call_function_n_kw()).
+// (Only needed in stackless mode.)
+void mp_call_prepare_args_n_kw_var(bool have_self, mp_uint_t n_args_n_kw, const mp_obj_t *args, mp_call_args_t *out_args);
+#endif
+
+void mp_unpack_sequence(mp_obj_t seq, mp_uint_t num, mp_obj_t *items);
+void mp_unpack_ex(mp_obj_t seq, mp_uint_t num, mp_obj_t *items);
+mp_obj_t mp_store_map(mp_obj_t map, mp_obj_t key, mp_obj_t value);
+mp_obj_t mp_load_attr(mp_obj_t base, qstr attr);
+void mp_convert_member_lookup(mp_obj_t obj, const mp_obj_type_t *type, mp_obj_t member, mp_obj_t *dest);
+void mp_load_method(mp_obj_t base, qstr attr, mp_obj_t *dest);
+void mp_load_method_maybe(mp_obj_t base, qstr attr, mp_obj_t *dest);
+void mp_store_attr(mp_obj_t base, qstr attr, mp_obj_t val);
+
+mp_obj_t mp_getiter(mp_obj_t o);
+mp_obj_t mp_iternext_allow_raise(mp_obj_t o); // may return MP_OBJ_STOP_ITERATION instead of raising StopIteration()
+mp_obj_t mp_iternext(mp_obj_t o); // will always return MP_OBJ_STOP_ITERATION instead of raising StopIteration(...)
+mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val);
+
+mp_obj_t mp_make_raise_obj(mp_obj_t o);
+
+mp_obj_t mp_import_name(qstr name, mp_obj_t fromlist, mp_obj_t level);
+mp_obj_t mp_import_from(mp_obj_t module, qstr name);
+void mp_import_all(mp_obj_t module);
+
+// Raise NotImplementedError with given message
+NORETURN void mp_not_implemented(const char *msg);
+NORETURN void mp_exc_recursion_depth(void);
+
+// helper functions for native/viper code
+mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type);
+mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type);
+mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, mp_uint_t n_args_kw, const mp_obj_t *args);
+void mp_native_raise(mp_obj_t o);
+
+#define mp_sys_path (MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_sys_path_obj)))
+#define mp_sys_argv (MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)))
+
+#if MICROPY_WARNINGS
+void mp_warning(const char *msg, ...);
+#else
+#define mp_warning(msg, ...)
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_RUNTIME_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/runtime0.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,153 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_RUNTIME0_H__
+#define __MICROPY_INCLUDED_PY_RUNTIME0_H__
+
+// These must fit in 8 bits; see scope.h
+#define MP_SCOPE_FLAG_VARARGS (0x01)
+#define MP_SCOPE_FLAG_VARKEYWORDS (0x02)
+#define MP_SCOPE_FLAG_GENERATOR (0x04)
+#define MP_SCOPE_FLAG_DEFKWARGS (0x08)
+
+// types for native (viper) function signature
+#define MP_NATIVE_TYPE_OBJ (0x00)
+#define MP_NATIVE_TYPE_BOOL (0x01)
+#define MP_NATIVE_TYPE_INT (0x02)
+#define MP_NATIVE_TYPE_UINT (0x03)
+#define MP_NATIVE_TYPE_PTR (0x04)
+#define MP_NATIVE_TYPE_PTR8 (0x05)
+#define MP_NATIVE_TYPE_PTR16 (0x06)
+#define MP_NATIVE_TYPE_PTR32 (0x07)
+
+typedef enum {
+ MP_UNARY_OP_BOOL, // __bool__
+ MP_UNARY_OP_LEN, // __len__
+ MP_UNARY_OP_HASH, // __hash__; must return a small int
+ MP_UNARY_OP_POSITIVE,
+ MP_UNARY_OP_NEGATIVE,
+ MP_UNARY_OP_INVERT,
+ MP_UNARY_OP_NOT,
+} mp_unary_op_t;
+
+typedef enum {
+ MP_BINARY_OP_OR,
+ MP_BINARY_OP_XOR,
+ MP_BINARY_OP_AND,
+ MP_BINARY_OP_LSHIFT,
+ MP_BINARY_OP_RSHIFT,
+
+ MP_BINARY_OP_ADD,
+ MP_BINARY_OP_SUBTRACT,
+ MP_BINARY_OP_MULTIPLY,
+ MP_BINARY_OP_FLOOR_DIVIDE,
+ MP_BINARY_OP_TRUE_DIVIDE,
+
+ MP_BINARY_OP_MODULO,
+ MP_BINARY_OP_POWER,
+ MP_BINARY_OP_DIVMOD, // not emitted by the compiler but supported by the runtime
+ MP_BINARY_OP_INPLACE_OR,
+ MP_BINARY_OP_INPLACE_XOR,
+
+ MP_BINARY_OP_INPLACE_AND,
+ MP_BINARY_OP_INPLACE_LSHIFT,
+ MP_BINARY_OP_INPLACE_RSHIFT,
+ MP_BINARY_OP_INPLACE_ADD,
+ MP_BINARY_OP_INPLACE_SUBTRACT,
+
+ MP_BINARY_OP_INPLACE_MULTIPLY,
+ MP_BINARY_OP_INPLACE_FLOOR_DIVIDE,
+ MP_BINARY_OP_INPLACE_TRUE_DIVIDE,
+ MP_BINARY_OP_INPLACE_MODULO,
+ MP_BINARY_OP_INPLACE_POWER,
+
+ // these should return a bool
+ MP_BINARY_OP_LESS,
+ MP_BINARY_OP_MORE,
+ MP_BINARY_OP_EQUAL,
+ MP_BINARY_OP_LESS_EQUAL,
+ MP_BINARY_OP_MORE_EQUAL,
+
+ MP_BINARY_OP_NOT_EQUAL,
+ MP_BINARY_OP_IN,
+ MP_BINARY_OP_IS,
+ MP_BINARY_OP_EXCEPTION_MATCH,
+ // these are not supported by the runtime and must be synthesised by the emitter
+ MP_BINARY_OP_NOT_IN,
+ MP_BINARY_OP_IS_NOT,
+} mp_binary_op_t;
+
+typedef enum {
+ MP_F_CONVERT_OBJ_TO_NATIVE = 0,
+ MP_F_CONVERT_NATIVE_TO_OBJ,
+ MP_F_LOAD_NAME,
+ MP_F_LOAD_GLOBAL,
+ MP_F_LOAD_BUILD_CLASS,
+ MP_F_LOAD_ATTR,
+ MP_F_LOAD_METHOD,
+ MP_F_STORE_NAME,
+ MP_F_STORE_GLOBAL,
+ MP_F_STORE_ATTR,
+ MP_F_OBJ_SUBSCR,
+ MP_F_OBJ_IS_TRUE,
+ MP_F_UNARY_OP,
+ MP_F_BINARY_OP,
+ MP_F_BUILD_TUPLE,
+ MP_F_BUILD_LIST,
+ MP_F_LIST_APPEND,
+ MP_F_BUILD_MAP,
+ MP_F_STORE_MAP,
+#if MICROPY_PY_BUILTINS_SET
+ MP_F_BUILD_SET,
+ MP_F_STORE_SET,
+#endif
+ MP_F_MAKE_FUNCTION_FROM_RAW_CODE,
+ MP_F_NATIVE_CALL_FUNCTION_N_KW,
+ MP_F_CALL_METHOD_N_KW,
+ MP_F_CALL_METHOD_N_KW_VAR,
+ MP_F_GETITER,
+ MP_F_ITERNEXT,
+ MP_F_NLR_PUSH,
+ MP_F_NLR_POP,
+ MP_F_NATIVE_RAISE,
+ MP_F_IMPORT_NAME,
+ MP_F_IMPORT_FROM,
+ MP_F_IMPORT_ALL,
+#if MICROPY_PY_BUILTINS_SLICE
+ MP_F_NEW_SLICE,
+#endif
+ MP_F_UNPACK_SEQUENCE,
+ MP_F_UNPACK_EX,
+ MP_F_DELETE_NAME,
+ MP_F_DELETE_GLOBAL,
+ MP_F_NEW_CELL,
+ MP_F_MAKE_CLOSURE_FROM_RAW_CODE,
+ MP_F_SETUP_CODE_STATE,
+ MP_F_NUMBER_OF,
+} mp_fun_kind_t;
+
+extern void *const mp_fun_table[MP_F_NUMBER_OF];
+
+#endif // __MICROPY_INCLUDED_PY_RUNTIME0_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/scope.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,155 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "py/scope.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+scope_t *scope_new(scope_kind_t kind, mp_parse_node_t pn, qstr source_file, mp_uint_t emit_options) {
+ scope_t *scope = m_new0(scope_t, 1);
+ scope->kind = kind;
+ scope->pn = pn;
+ scope->source_file = source_file;
+ switch (kind) {
+ case SCOPE_MODULE:
+ scope->simple_name = MP_QSTR__lt_module_gt_;
+ break;
+ case SCOPE_FUNCTION:
+ case SCOPE_CLASS:
+ assert(MP_PARSE_NODE_IS_STRUCT(pn));
+ scope->simple_name = MP_PARSE_NODE_LEAF_ARG(((mp_parse_node_struct_t*)pn)->nodes[0]);
+ break;
+ case SCOPE_LAMBDA:
+ scope->simple_name = MP_QSTR__lt_lambda_gt_;
+ break;
+ case SCOPE_LIST_COMP:
+ scope->simple_name = MP_QSTR__lt_listcomp_gt_;
+ break;
+ case SCOPE_DICT_COMP:
+ scope->simple_name = MP_QSTR__lt_dictcomp_gt_;
+ break;
+ case SCOPE_SET_COMP:
+ scope->simple_name = MP_QSTR__lt_setcomp_gt_;
+ break;
+ case SCOPE_GEN_EXPR:
+ scope->simple_name = MP_QSTR__lt_genexpr_gt_;
+ break;
+ default:
+ assert(0);
+ }
+ scope->raw_code = mp_emit_glue_new_raw_code();
+ scope->emit_options = emit_options;
+ scope->id_info_alloc = MICROPY_ALLOC_SCOPE_ID_INIT;
+ scope->id_info = m_new(id_info_t, scope->id_info_alloc);
+
+ return scope;
+}
+
+void scope_free(scope_t *scope) {
+ m_del(id_info_t, scope->id_info, scope->id_info_alloc);
+ m_del(scope_t, scope, 1);
+}
+
+id_info_t *scope_find_or_add_id(scope_t *scope, qstr qst, bool *added) {
+ id_info_t *id_info = scope_find(scope, qst);
+ if (id_info != NULL) {
+ *added = false;
+ return id_info;
+ }
+
+ // make sure we have enough memory
+ if (scope->id_info_len >= scope->id_info_alloc) {
+ scope->id_info = m_renew(id_info_t, scope->id_info, scope->id_info_alloc, scope->id_info_alloc + MICROPY_ALLOC_SCOPE_ID_INC);
+ scope->id_info_alloc += MICROPY_ALLOC_SCOPE_ID_INC;
+ }
+
+ // add new id to end of array of all ids; this seems to match CPython
+ // important thing is that function arguments are first, but that is
+ // handled by the compiler because it adds arguments before compiling the body
+ id_info = &scope->id_info[scope->id_info_len++];
+
+ id_info->kind = 0;
+ id_info->flags = 0;
+ id_info->local_num = 0;
+ id_info->qst = qst;
+ *added = true;
+ return id_info;
+}
+
+id_info_t *scope_find(scope_t *scope, qstr qst) {
+ for (mp_uint_t i = 0; i < scope->id_info_len; i++) {
+ if (scope->id_info[i].qst == qst) {
+ return &scope->id_info[i];
+ }
+ }
+ return NULL;
+}
+
+id_info_t *scope_find_global(scope_t *scope, qstr qst) {
+ while (scope->parent != NULL) {
+ scope = scope->parent;
+ }
+ return scope_find(scope, qst);
+}
+
+id_info_t *scope_find_local_in_parent(scope_t *scope, qstr qst) {
+ if (scope->parent == NULL) {
+ return NULL;
+ }
+ for (scope_t *s = scope->parent; s->parent != NULL; s = s->parent) {
+ id_info_t *id = scope_find(s, qst);
+ if (id != NULL) {
+ return id;
+ }
+ }
+ return NULL;
+}
+
+void scope_close_over_in_parents(scope_t *scope, qstr qst) {
+ assert(scope->parent != NULL); // we should have at least 1 parent
+ for (scope_t *s = scope->parent; s->parent != NULL; s = s->parent) {
+ bool added;
+ id_info_t *id = scope_find_or_add_id(s, qst, &added);
+ if (added) {
+ // variable not previously declared in this scope, so declare it as free and keep searching parents
+ id->kind = ID_INFO_KIND_FREE;
+ } else {
+ // variable is declared in this scope, so finish
+ switch (id->kind) {
+ case ID_INFO_KIND_LOCAL: id->kind = ID_INFO_KIND_CELL; break; // variable local to this scope, close it over
+ case ID_INFO_KIND_FREE: break; // variable already closed over in a parent scope
+ case ID_INFO_KIND_CELL: break; // variable already closed over in this scope
+ default: assert(0); // TODO
+ }
+ return;
+ }
+ }
+ assert(0); // we should have found the variable in one of the parents
+}
+
+#endif // MICROPY_ENABLE_COMPILER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/scope.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,86 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_SCOPE_H__
+#define __MICROPY_INCLUDED_PY_SCOPE_H__
+
+#include "py/parse.h"
+#include "py/emitglue.h"
+
+enum {
+ ID_INFO_KIND_GLOBAL_IMPLICIT,
+ ID_INFO_KIND_GLOBAL_EXPLICIT,
+ ID_INFO_KIND_LOCAL, // in a function f, written and only referenced by f
+ ID_INFO_KIND_CELL, // in a function f, read/written by children of f
+ ID_INFO_KIND_FREE, // in a function f, belongs to the parent of f
+};
+
+enum {
+ ID_FLAG_IS_PARAM = 0x01,
+ ID_FLAG_IS_STAR_PARAM = 0x02,
+ ID_FLAG_IS_DBL_STAR_PARAM = 0x04,
+};
+
+typedef struct _id_info_t {
+ uint8_t kind;
+ uint8_t flags;
+ // when it's an ID_INFO_KIND_LOCAL this is the unique number of the local
+ // whet it's an ID_INFO_KIND_CELL/FREE this is the unique number of the closed over variable
+ uint16_t local_num;
+ qstr qst;
+} id_info_t;
+
+// scope is a "block" in Python parlance
+typedef enum { SCOPE_MODULE, SCOPE_FUNCTION, SCOPE_LAMBDA, SCOPE_LIST_COMP, SCOPE_DICT_COMP, SCOPE_SET_COMP, SCOPE_GEN_EXPR, SCOPE_CLASS } scope_kind_t;
+typedef struct _scope_t {
+ scope_kind_t kind;
+ struct _scope_t *parent;
+ struct _scope_t *next;
+ mp_parse_node_t pn;
+ qstr source_file;
+ qstr simple_name;
+ mp_raw_code_t *raw_code;
+ uint8_t scope_flags; // see runtime0.h
+ uint8_t emit_options; // see compile.h
+ uint16_t num_pos_args;
+ uint16_t num_kwonly_args;
+ uint16_t num_def_pos_args;
+ uint16_t num_locals;
+ uint16_t stack_size; // maximum size of the locals stack
+ uint16_t exc_stack_size; // maximum size of the exception stack
+ uint16_t id_info_alloc;
+ uint16_t id_info_len;
+ id_info_t *id_info;
+} scope_t;
+
+scope_t *scope_new(scope_kind_t kind, mp_parse_node_t pn, qstr source_file, mp_uint_t emit_options);
+void scope_free(scope_t *scope);
+id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, bool *added);
+id_info_t *scope_find(scope_t *scope, qstr qstr);
+id_info_t *scope_find_global(scope_t *scope, qstr qstr);
+id_info_t *scope_find_local_in_parent(scope_t *scope, qstr qstr);
+void scope_close_over_in_parents(scope_t *scope, qstr qstr);
+
+#endif // __MICROPY_INCLUDED_PY_SCOPE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/sequence.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,251 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime0.h"
+#include "py/runtime.h"
+
+// Helpers for sequence types
+
+#define SWAP(type, var1, var2) { type t = var2; var2 = var1; var1 = t; }
+
+// Implements backend of sequence * integer operation. Assumes elements are
+// memory-adjacent in sequence.
+void mp_seq_multiply(const void *items, mp_uint_t item_sz, mp_uint_t len, mp_uint_t times, void *dest) {
+ for (mp_uint_t i = 0; i < times; i++) {
+ uint copy_sz = item_sz * len;
+ memcpy(dest, items, copy_sz);
+ dest = (char*)dest + copy_sz;
+ }
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+
+bool mp_seq_get_fast_slice_indexes(mp_uint_t len, mp_obj_t slice, mp_bound_slice_t *indexes) {
+ mp_obj_t ostart, ostop, ostep;
+ mp_int_t start, stop;
+ mp_obj_slice_get(slice, &ostart, &ostop, &ostep);
+
+ if (ostart == mp_const_none) {
+ start = 0;
+ } else {
+ start = MP_OBJ_SMALL_INT_VALUE(ostart);
+ }
+ if (ostop == mp_const_none) {
+ stop = len;
+ } else {
+ stop = MP_OBJ_SMALL_INT_VALUE(ostop);
+ }
+
+ // Unlike subscription, out-of-bounds slice indexes are never error
+ if (start < 0) {
+ start = len + start;
+ if (start < 0) {
+ start = 0;
+ }
+ } else if ((mp_uint_t)start > len) {
+ start = len;
+ }
+ if (stop < 0) {
+ stop = len + stop;
+ } else if ((mp_uint_t)stop > len) {
+ stop = len;
+ }
+
+ // CPython returns empty sequence in such case, or point for assignment is at start
+ if (start > stop) {
+ stop = start;
+ }
+
+ indexes->start = start;
+ indexes->stop = stop;
+
+ if (ostep != mp_const_none && ostep != MP_OBJ_NEW_SMALL_INT(1)) {
+ indexes->step = MP_OBJ_SMALL_INT_VALUE(ostep);
+ return false;
+ }
+ indexes->step = 1;
+ return true;
+}
+
+#endif
+
+mp_obj_t mp_seq_extract_slice(mp_uint_t len, const mp_obj_t *seq, mp_bound_slice_t *indexes) {
+ (void)len; // TODO can we remove len from the arg list?
+
+ mp_int_t start = indexes->start, stop = indexes->stop;
+ mp_int_t step = indexes->step;
+
+ mp_obj_t res = mp_obj_new_list(0, NULL);
+
+ if (step < 0) {
+ stop--;
+ while (start <= stop) {
+ mp_obj_list_append(res, seq[stop]);
+ stop += step;
+ }
+ } else {
+ while (start < stop) {
+ mp_obj_list_append(res, seq[start]);
+ start += step;
+ }
+ }
+ return res;
+}
+
+// Special-case comparison function for sequences of bytes
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+bool mp_seq_cmp_bytes(mp_uint_t op, const byte *data1, mp_uint_t len1, const byte *data2, mp_uint_t len2) {
+ if (op == MP_BINARY_OP_EQUAL && len1 != len2) {
+ return false;
+ }
+
+ // Let's deal only with > & >=
+ if (op == MP_BINARY_OP_LESS || op == MP_BINARY_OP_LESS_EQUAL) {
+ SWAP(const byte*, data1, data2);
+ SWAP(uint, len1, len2);
+ if (op == MP_BINARY_OP_LESS) {
+ op = MP_BINARY_OP_MORE;
+ } else {
+ op = MP_BINARY_OP_MORE_EQUAL;
+ }
+ }
+ uint min_len = len1 < len2 ? len1 : len2;
+ int res = memcmp(data1, data2, min_len);
+ if (op == MP_BINARY_OP_EQUAL) {
+ // If we are checking for equality, here're the answer
+ return res == 0;
+ }
+ if (res < 0) {
+ return false;
+ }
+ if (res > 0) {
+ return true;
+ }
+
+ // If we had tie in the last element...
+ // ... and we have lists of different lengths...
+ if (len1 != len2) {
+ if (len1 < len2) {
+ // ... then longer list length wins (we deal only with >)
+ return false;
+ }
+ } else if (op == MP_BINARY_OP_MORE) {
+ // Otherwise, if we have strict relation, equality means failure
+ return false;
+ }
+ return true;
+}
+
+// Special-case comparison function for sequences of mp_obj_t
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+bool mp_seq_cmp_objs(mp_uint_t op, const mp_obj_t *items1, mp_uint_t len1, const mp_obj_t *items2, mp_uint_t len2) {
+ if (op == MP_BINARY_OP_EQUAL && len1 != len2) {
+ return false;
+ }
+
+ // Let's deal only with > & >=
+ if (op == MP_BINARY_OP_LESS || op == MP_BINARY_OP_LESS_EQUAL) {
+ SWAP(const mp_obj_t *, items1, items2);
+ SWAP(uint, len1, len2);
+ if (op == MP_BINARY_OP_LESS) {
+ op = MP_BINARY_OP_MORE;
+ } else {
+ op = MP_BINARY_OP_MORE_EQUAL;
+ }
+ }
+
+ mp_uint_t len = len1 < len2 ? len1 : len2;
+ for (mp_uint_t i = 0; i < len; i++) {
+ // If current elements equal, can't decide anything - go on
+ if (mp_obj_equal(items1[i], items2[i])) {
+ continue;
+ }
+
+ // Othewise, if they are not equal, we can have final decision based on them
+ if (op == MP_BINARY_OP_EQUAL) {
+ // In particular, if we are checking for equality, here're the answer
+ return false;
+ }
+
+ // Otherwise, application of relation op gives the answer
+ return (mp_binary_op(op, items1[i], items2[i]) == mp_const_true);
+ }
+
+ // If we had tie in the last element...
+ // ... and we have lists of different lengths...
+ if (len1 != len2) {
+ if (len1 < len2) {
+ // ... then longer list length wins (we deal only with >)
+ return false;
+ }
+ } else if (op == MP_BINARY_OP_MORE) {
+ // Otherwise, if we have strict relation, sequence equality means failure
+ return false;
+ }
+
+ return true;
+}
+
+// Special-case of index() which searches for mp_obj_t
+mp_obj_t mp_seq_index_obj(const mp_obj_t *items, mp_uint_t len, mp_uint_t n_args, const mp_obj_t *args) {
+ mp_obj_type_t *type = mp_obj_get_type(args[0]);
+ mp_obj_t value = args[1];
+ uint start = 0;
+ uint stop = len;
+
+ if (n_args >= 3) {
+ start = mp_get_index(type, len, args[2], true);
+ if (n_args >= 4) {
+ stop = mp_get_index(type, len, args[3], true);
+ }
+ }
+
+ for (mp_uint_t i = start; i < stop; i++) {
+ if (mp_obj_equal(items[i], value)) {
+ // Common sense says this cannot overflow small int
+ return MP_OBJ_NEW_SMALL_INT(i);
+ }
+ }
+
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, "object not in sequence"));
+}
+
+mp_obj_t mp_seq_count_obj(const mp_obj_t *items, mp_uint_t len, mp_obj_t value) {
+ mp_uint_t count = 0;
+ for (uint i = 0; i < len; i++) {
+ if (mp_obj_equal(items[i], value)) {
+ count++;
+ }
+ }
+
+ // Common sense says this cannot overflow small int
+ return MP_OBJ_NEW_SMALL_INT(count);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/showbc.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,565 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/bc0.h"
+#include "py/bc.h"
+
+#if MICROPY_DEBUG_PRINTERS
+
+#define DECODE_UINT { \
+ unum = 0; \
+ do { \
+ unum = (unum << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0); \
+}
+#define DECODE_ULABEL do { unum = (ip[0] | (ip[1] << 8)); ip += 2; } while (0)
+#define DECODE_SLABEL do { unum = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2; } while (0)
+
+#if MICROPY_PERSISTENT_CODE
+
+#define DECODE_QSTR \
+ qst = ip[0] | ip[1] << 8; \
+ ip += 2;
+#define DECODE_PTR \
+ DECODE_UINT; \
+ unum = mp_showbc_const_table[unum]
+#define DECODE_OBJ \
+ DECODE_UINT; \
+ unum = mp_showbc_const_table[unum]
+
+#else
+
+#define DECODE_QSTR { \
+ qst = 0; \
+ do { \
+ qst = (qst << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0); \
+}
+#define DECODE_PTR do { \
+ ip = (byte*)MP_ALIGN(ip, sizeof(void*)); \
+ unum = (uintptr_t)*(void**)ip; \
+ ip += sizeof(void*); \
+} while (0)
+#define DECODE_OBJ do { \
+ ip = (byte*)MP_ALIGN(ip, sizeof(mp_obj_t)); \
+ unum = (mp_uint_t)*(mp_obj_t*)ip; \
+ ip += sizeof(mp_obj_t); \
+} while (0)
+
+#endif
+
+const byte *mp_showbc_code_start;
+const mp_uint_t *mp_showbc_const_table;
+
+void mp_bytecode_print(const void *descr, const byte *ip, mp_uint_t len, const mp_uint_t *const_table) {
+ mp_showbc_code_start = ip;
+ mp_showbc_const_table = const_table;
+
+ // get bytecode parameters
+ mp_uint_t n_state = mp_decode_uint(&ip);
+ mp_uint_t n_exc_stack = mp_decode_uint(&ip);
+ /*mp_uint_t scope_flags =*/ ip++;
+ mp_uint_t n_pos_args = *ip++;
+ mp_uint_t n_kwonly_args = *ip++;
+ /*mp_uint_t n_def_pos_args =*/ ip++;
+
+ const byte *code_info = ip;
+ mp_uint_t code_info_size = mp_decode_uint(&code_info);
+ ip += code_info_size;
+
+ #if MICROPY_PERSISTENT_CODE
+ qstr block_name = code_info[0] | (code_info[1] << 8);
+ qstr source_file = code_info[2] | (code_info[3] << 8);
+ #else
+ qstr block_name = mp_decode_uint(&code_info);
+ qstr source_file = mp_decode_uint(&code_info);
+ #endif
+ printf("File %s, code block '%s' (descriptor: %p, bytecode @%p " UINT_FMT " bytes)\n",
+ qstr_str(source_file), qstr_str(block_name), descr, mp_showbc_code_start, len);
+
+ // raw bytecode dump
+ printf("Raw bytecode (code_info_size=" UINT_FMT ", bytecode_size=" UINT_FMT "):\n", code_info_size, len - code_info_size);
+ for (mp_uint_t i = 0; i < len; i++) {
+ if (i > 0 && i % 16 == 0) {
+ printf("\n");
+ }
+ printf(" %02x", mp_showbc_code_start[i]);
+ }
+ printf("\n");
+
+ // bytecode prelude: arg names (as qstr objects)
+ printf("arg names:");
+ for (mp_uint_t i = 0; i < n_pos_args + n_kwonly_args; i++) {
+ printf(" %s", qstr_str(MP_OBJ_QSTR_VALUE(const_table[i])));
+ }
+ printf("\n");
+
+ printf("(N_STATE " UINT_FMT ")\n", n_state);
+ printf("(N_EXC_STACK " UINT_FMT ")\n", n_exc_stack);
+
+ // for printing line number info
+ const byte *bytecode_start = ip;
+
+ // bytecode prelude: initialise closed over variables
+ {
+ uint local_num;
+ while ((local_num = *ip++) != 255) {
+ printf("(INIT_CELL %u)\n", local_num);
+ }
+ len -= ip - mp_showbc_code_start;
+ }
+
+ // print out line number info
+ {
+ mp_int_t bc = bytecode_start - ip;
+ mp_uint_t source_line = 1;
+ printf(" bc=" INT_FMT " line=" UINT_FMT "\n", bc, source_line);
+ for (const byte* ci = code_info; *ci;) {
+ if ((ci[0] & 0x80) == 0) {
+ // 0b0LLBBBBB encoding
+ bc += ci[0] & 0x1f;
+ source_line += ci[0] >> 5;
+ ci += 1;
+ } else {
+ // 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+ bc += ci[0] & 0xf;
+ source_line += ((ci[0] << 4) & 0x700) | ci[1];
+ ci += 2;
+ }
+ printf(" bc=" INT_FMT " line=" UINT_FMT "\n", bc, source_line);
+ }
+ }
+ mp_bytecode_print2(ip, len - 0);
+}
+
+const byte *mp_bytecode_print_str(const byte *ip) {
+ mp_uint_t unum;
+ qstr qst;
+
+ switch (*ip++) {
+ case MP_BC_LOAD_CONST_FALSE:
+ printf("LOAD_CONST_FALSE");
+ break;
+
+ case MP_BC_LOAD_CONST_NONE:
+ printf("LOAD_CONST_NONE");
+ break;
+
+ case MP_BC_LOAD_CONST_TRUE:
+ printf("LOAD_CONST_TRUE");
+ break;
+
+ case MP_BC_LOAD_CONST_SMALL_INT: {
+ mp_int_t num = 0;
+ if ((ip[0] & 0x40) != 0) {
+ // Number is negative
+ num--;
+ }
+ do {
+ num = (num << 7) | (*ip & 0x7f);
+ } while ((*ip++ & 0x80) != 0);
+ printf("LOAD_CONST_SMALL_INT " INT_FMT, num);
+ break;
+ }
+
+ case MP_BC_LOAD_CONST_STRING:
+ DECODE_QSTR;
+ printf("LOAD_CONST_STRING '%s'", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_CONST_OBJ:
+ DECODE_OBJ;
+ printf("LOAD_CONST_OBJ %p=", MP_OBJ_TO_PTR(unum));
+ mp_obj_print_helper(&mp_plat_print, (mp_obj_t)unum, PRINT_REPR);
+ break;
+
+ case MP_BC_LOAD_NULL:
+ printf("LOAD_NULL");
+ break;
+
+ case MP_BC_LOAD_FAST_N:
+ DECODE_UINT;
+ printf("LOAD_FAST_N " UINT_FMT, unum);
+ break;
+
+ case MP_BC_LOAD_DEREF:
+ DECODE_UINT;
+ printf("LOAD_DEREF " UINT_FMT, unum);
+ break;
+
+ case MP_BC_LOAD_NAME:
+ DECODE_QSTR;
+ printf("LOAD_NAME %s", qstr_str(qst));
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
+ printf(" (cache=%u)", *ip++);
+ }
+ break;
+
+ case MP_BC_LOAD_GLOBAL:
+ DECODE_QSTR;
+ printf("LOAD_GLOBAL %s", qstr_str(qst));
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
+ printf(" (cache=%u)", *ip++);
+ }
+ break;
+
+ case MP_BC_LOAD_ATTR:
+ DECODE_QSTR;
+ printf("LOAD_ATTR %s", qstr_str(qst));
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
+ printf(" (cache=%u)", *ip++);
+ }
+ break;
+
+ case MP_BC_LOAD_METHOD:
+ DECODE_QSTR;
+ printf("LOAD_METHOD %s", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_BUILD_CLASS:
+ printf("LOAD_BUILD_CLASS");
+ break;
+
+ case MP_BC_LOAD_SUBSCR:
+ printf("LOAD_SUBSCR");
+ break;
+
+ case MP_BC_STORE_FAST_N:
+ DECODE_UINT;
+ printf("STORE_FAST_N " UINT_FMT, unum);
+ break;
+
+ case MP_BC_STORE_DEREF:
+ DECODE_UINT;
+ printf("STORE_DEREF " UINT_FMT, unum);
+ break;
+
+ case MP_BC_STORE_NAME:
+ DECODE_QSTR;
+ printf("STORE_NAME %s", qstr_str(qst));
+ break;
+
+ case MP_BC_STORE_GLOBAL:
+ DECODE_QSTR;
+ printf("STORE_GLOBAL %s", qstr_str(qst));
+ break;
+
+ case MP_BC_STORE_ATTR:
+ DECODE_QSTR;
+ printf("STORE_ATTR %s", qstr_str(qst));
+ if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
+ printf(" (cache=%u)", *ip++);
+ }
+ break;
+
+ case MP_BC_STORE_SUBSCR:
+ printf("STORE_SUBSCR");
+ break;
+
+ case MP_BC_DELETE_FAST:
+ DECODE_UINT;
+ printf("DELETE_FAST " UINT_FMT, unum);
+ break;
+
+ case MP_BC_DELETE_DEREF:
+ DECODE_UINT;
+ printf("DELETE_DEREF " UINT_FMT, unum);
+ break;
+
+ case MP_BC_DELETE_NAME:
+ DECODE_QSTR;
+ printf("DELETE_NAME %s", qstr_str(qst));
+ break;
+
+ case MP_BC_DELETE_GLOBAL:
+ DECODE_QSTR;
+ printf("DELETE_GLOBAL %s", qstr_str(qst));
+ break;
+
+ case MP_BC_DUP_TOP:
+ printf("DUP_TOP");
+ break;
+
+ case MP_BC_DUP_TOP_TWO:
+ printf("DUP_TOP_TWO");
+ break;
+
+ case MP_BC_POP_TOP:
+ printf("POP_TOP");
+ break;
+
+ case MP_BC_ROT_TWO:
+ printf("ROT_TWO");
+ break;
+
+ case MP_BC_ROT_THREE:
+ printf("ROT_THREE");
+ break;
+
+ case MP_BC_JUMP:
+ DECODE_SLABEL;
+ printf("JUMP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_POP_JUMP_IF_TRUE:
+ DECODE_SLABEL;
+ printf("POP_JUMP_IF_TRUE " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_POP_JUMP_IF_FALSE:
+ DECODE_SLABEL;
+ printf("POP_JUMP_IF_FALSE " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_JUMP_IF_TRUE_OR_POP:
+ DECODE_SLABEL;
+ printf("JUMP_IF_TRUE_OR_POP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_JUMP_IF_FALSE_OR_POP:
+ DECODE_SLABEL;
+ printf("JUMP_IF_FALSE_OR_POP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_SETUP_WITH:
+ DECODE_ULABEL; // loop-like labels are always forward
+ printf("SETUP_WITH " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_WITH_CLEANUP:
+ printf("WITH_CLEANUP");
+ break;
+
+ case MP_BC_UNWIND_JUMP:
+ DECODE_SLABEL;
+ printf("UNWIND_JUMP " UINT_FMT " %d", (mp_uint_t)(ip + unum - mp_showbc_code_start), *ip);
+ ip += 1;
+ break;
+
+ case MP_BC_SETUP_EXCEPT:
+ DECODE_ULABEL; // except labels are always forward
+ printf("SETUP_EXCEPT " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_SETUP_FINALLY:
+ DECODE_ULABEL; // except labels are always forward
+ printf("SETUP_FINALLY " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_END_FINALLY:
+ // if TOS is an exception, reraises the exception (3 values on TOS)
+ // if TOS is an integer, does something else
+ // if TOS is None, just pops it and continues
+ // else error
+ printf("END_FINALLY");
+ break;
+
+ case MP_BC_GET_ITER:
+ printf("GET_ITER");
+ break;
+
+ case MP_BC_FOR_ITER:
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ printf("FOR_ITER " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_POP_BLOCK:
+ // pops block and restores the stack
+ printf("POP_BLOCK");
+ break;
+
+ case MP_BC_POP_EXCEPT:
+ // pops block, checks it's an exception block, and restores the stack, saving the 3 exception values to local threadstate
+ printf("POP_EXCEPT");
+ break;
+
+ case MP_BC_BUILD_TUPLE:
+ DECODE_UINT;
+ printf("BUILD_TUPLE " UINT_FMT, unum);
+ break;
+
+ case MP_BC_BUILD_LIST:
+ DECODE_UINT;
+ printf("BUILD_LIST " UINT_FMT, unum);
+ break;
+
+ case MP_BC_LIST_APPEND:
+ DECODE_UINT;
+ printf("LIST_APPEND " UINT_FMT, unum);
+ break;
+
+ case MP_BC_BUILD_MAP:
+ DECODE_UINT;
+ printf("BUILD_MAP " UINT_FMT, unum);
+ break;
+
+ case MP_BC_STORE_MAP:
+ printf("STORE_MAP");
+ break;
+
+ case MP_BC_MAP_ADD:
+ DECODE_UINT;
+ printf("MAP_ADD " UINT_FMT, unum);
+ break;
+
+ case MP_BC_BUILD_SET:
+ DECODE_UINT;
+ printf("BUILD_SET " UINT_FMT, unum);
+ break;
+
+ case MP_BC_SET_ADD:
+ DECODE_UINT;
+ printf("SET_ADD " UINT_FMT, unum);
+ break;
+
+#if MICROPY_PY_BUILTINS_SLICE
+ case MP_BC_BUILD_SLICE:
+ DECODE_UINT;
+ printf("BUILD_SLICE " UINT_FMT, unum);
+ break;
+#endif
+
+ case MP_BC_UNPACK_SEQUENCE:
+ DECODE_UINT;
+ printf("UNPACK_SEQUENCE " UINT_FMT, unum);
+ break;
+
+ case MP_BC_UNPACK_EX:
+ DECODE_UINT;
+ printf("UNPACK_EX " UINT_FMT, unum);
+ break;
+
+ case MP_BC_MAKE_FUNCTION:
+ DECODE_PTR;
+ printf("MAKE_FUNCTION %p", (void*)(uintptr_t)unum);
+ break;
+
+ case MP_BC_MAKE_FUNCTION_DEFARGS:
+ DECODE_PTR;
+ printf("MAKE_FUNCTION_DEFARGS %p", (void*)(uintptr_t)unum);
+ break;
+
+ case MP_BC_MAKE_CLOSURE: {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ printf("MAKE_CLOSURE %p " UINT_FMT, (void*)(uintptr_t)unum, n_closed_over);
+ break;
+ }
+
+ case MP_BC_MAKE_CLOSURE_DEFARGS: {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ printf("MAKE_CLOSURE_DEFARGS %p " UINT_FMT, (void*)(uintptr_t)unum, n_closed_over);
+ break;
+ }
+
+ case MP_BC_CALL_FUNCTION:
+ DECODE_UINT;
+ printf("CALL_FUNCTION n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_FUNCTION_VAR_KW:
+ DECODE_UINT;
+ printf("CALL_FUNCTION_VAR_KW n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_METHOD:
+ DECODE_UINT;
+ printf("CALL_METHOD n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_METHOD_VAR_KW:
+ DECODE_UINT;
+ printf("CALL_METHOD_VAR_KW n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_RETURN_VALUE:
+ printf("RETURN_VALUE");
+ break;
+
+ case MP_BC_RAISE_VARARGS:
+ unum = *ip++;
+ printf("RAISE_VARARGS " UINT_FMT, unum);
+ break;
+
+ case MP_BC_YIELD_VALUE:
+ printf("YIELD_VALUE");
+ break;
+
+ case MP_BC_YIELD_FROM:
+ printf("YIELD_FROM");
+ break;
+
+ case MP_BC_IMPORT_NAME:
+ DECODE_QSTR;
+ printf("IMPORT_NAME '%s'", qstr_str(qst));
+ break;
+
+ case MP_BC_IMPORT_FROM:
+ DECODE_QSTR;
+ printf("IMPORT_FROM '%s'", qstr_str(qst));
+ break;
+
+ case MP_BC_IMPORT_STAR:
+ printf("IMPORT_STAR");
+ break;
+
+ default:
+ if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
+ printf("LOAD_CONST_SMALL_INT " INT_FMT, (mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16);
+ } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
+ printf("LOAD_FAST " UINT_FMT, (mp_uint_t)ip[-1] - MP_BC_LOAD_FAST_MULTI);
+ } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
+ printf("STORE_FAST " UINT_FMT, (mp_uint_t)ip[-1] - MP_BC_STORE_FAST_MULTI);
+ } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + 7) {
+ printf("UNARY_OP " UINT_FMT, (mp_uint_t)ip[-1] - MP_BC_UNARY_OP_MULTI);
+ } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + 36) {
+ mp_uint_t op = ip[-1] - MP_BC_BINARY_OP_MULTI;
+ printf("BINARY_OP " UINT_FMT " %s", op, qstr_str(mp_binary_op_method_name[op]));
+ } else {
+ printf("code %p, byte code 0x%02x not implemented\n", ip, ip[-1]);
+ assert(0);
+ return ip;
+ }
+ break;
+ }
+
+ return ip;
+}
+
+void mp_bytecode_print2(const byte *ip, mp_uint_t len) {
+ mp_showbc_code_start = ip;
+ while (ip < len + mp_showbc_code_start) {
+ printf("%02u ", (uint)(ip - mp_showbc_code_start));
+ ip = mp_bytecode_print_str(ip);
+ printf("\n");
+ }
+}
+
+#endif // MICROPY_DEBUG_PRINTERS
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/smallint.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,75 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/smallint.h"
+
+bool mp_small_int_mul_overflow(mp_int_t x, mp_int_t y) {
+ // Check for multiply overflow; see CERT INT32-C
+ if (x > 0) { // x is positive
+ if (y > 0) { // x and y are positive
+ if (x > (MP_SMALL_INT_MAX / y)) {
+ return true;
+ }
+ } else { // x positive, y nonpositive
+ if (y < (MP_SMALL_INT_MIN / x)) {
+ return true;
+ }
+ } // x positive, y nonpositive
+ } else { // x is nonpositive
+ if (y > 0) { // x is nonpositive, y is positive
+ if (x < (MP_SMALL_INT_MIN / y)) {
+ return true;
+ }
+ } else { // x and y are nonpositive
+ if (x != 0 && y < (MP_SMALL_INT_MAX / x)) {
+ return true;
+ }
+ } // End if x and y are nonpositive
+ } // End if x is nonpositive
+ return false;
+}
+
+mp_int_t mp_small_int_modulo(mp_int_t dividend, mp_int_t divisor) {
+ // Python specs require that mod has same sign as second operand
+ dividend %= divisor;
+ if ((dividend < 0 && divisor > 0) || (dividend > 0 && divisor < 0)) {
+ dividend += divisor;
+ }
+ return dividend;
+}
+
+mp_int_t mp_small_int_floor_divide(mp_int_t num, mp_int_t denom) {
+ if (num >= 0) {
+ if (denom < 0) {
+ num += -denom - 1;
+ }
+ } else {
+ if (denom >= 0) {
+ num += -denom + 1;
+ }
+ }
+ return num / denom;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/smallint.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,68 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_SMALLINT_H__ +#define __MICROPY_INCLUDED_PY_SMALLINT_H__ + +#include "py/mpconfig.h" +#include "py/misc.h" + +// Functions for small integer arithmetic + +#ifndef MP_SMALL_INT_MIN + +// In SMALL_INT, next-to-highest bits is used as sign, so both must match for value in range +#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A || MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C + +#define MP_SMALL_INT_MIN ((mp_int_t)(((mp_int_t)WORD_MSBIT_HIGH) >> 1)) +#define MP_SMALL_INT_FITS(n) ((((n) ^ ((n) << 1)) & WORD_MSBIT_HIGH) == 0) +// Mask to truncate mp_int_t to positive value +#define MP_SMALL_INT_POSITIVE_MASK ~(WORD_MSBIT_HIGH | (WORD_MSBIT_HIGH >> 1)) + +#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_B + +#define MP_SMALL_INT_MIN ((mp_int_t)(((mp_int_t)WORD_MSBIT_HIGH) >> 2)) +#define MP_SMALL_INT_FITS(n) ((((n) & MP_SMALL_INT_MIN) == 0) || (((n) & MP_SMALL_INT_MIN) == MP_SMALL_INT_MIN)) +// Mask to truncate mp_int_t to positive value +#define MP_SMALL_INT_POSITIVE_MASK ~(WORD_MSBIT_HIGH | (WORD_MSBIT_HIGH >> 1) | (WORD_MSBIT_HIGH >> 2)) + +#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D + +#define MP_SMALL_INT_MIN ((mp_int_t)(((mp_int_t)0xffffffff80000000) >> 1)) +#define MP_SMALL_INT_FITS(n) ((((n) ^ ((n) << 1)) & 0xffffffff80000000) == 0) +// Mask to truncate mp_int_t to positive value +#define MP_SMALL_INT_POSITIVE_MASK ~(0xffffffff80000000 | (0xffffffff80000000 >> 1)) + +#endif + +#endif + +#define MP_SMALL_INT_MAX ((mp_int_t)(~(MP_SMALL_INT_MIN))) + +bool mp_small_int_mul_overflow(mp_int_t x, mp_int_t y); +mp_int_t mp_small_int_modulo(mp_int_t dividend, mp_int_t divisor); +mp_int_t mp_small_int_floor_divide(mp_int_t num, mp_int_t denom); + +#endif // __MICROPY_INCLUDED_PY_SMALLINT_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/stackctrl.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,65 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+void mp_stack_ctrl_init(void) {
+ volatile int stack_dummy;
+ MP_STATE_VM(stack_top) = (char*)&stack_dummy;
+}
+
+void mp_stack_set_top(void *top) {
+ MP_STATE_VM(stack_top) = top;
+}
+
+mp_uint_t mp_stack_usage(void) {
+ // Assumes descending stack
+ volatile int stack_dummy;
+ return MP_STATE_VM(stack_top) - (char*)&stack_dummy;
+}
+
+#if MICROPY_STACK_CHECK
+
+void mp_stack_set_limit(mp_uint_t limit) {
+ MP_STATE_VM(stack_limit) = limit;
+}
+
+void mp_exc_recursion_depth(void) {
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_RuntimeError,
+ MP_OBJ_NEW_QSTR(MP_QSTR_maximum_space_recursion_space_depth_space_exceeded)));
+}
+
+void mp_stack_check(void) {
+ if (mp_stack_usage() >= MP_STATE_VM(stack_limit)) {
+ mp_exc_recursion_depth();
+ }
+}
+
+#endif // MICROPY_STACK_CHECK
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/stackctrl.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,48 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2014 Paul Sokolovsky + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_STACKCTRL_H__ +#define __MICROPY_INCLUDED_PY_STACKCTRL_H__ + +#include "py/mpconfig.h" + +void mp_stack_ctrl_init(void); +void mp_stack_set_top(void *top); +mp_uint_t mp_stack_usage(void); + +#if MICROPY_STACK_CHECK + +void mp_stack_set_limit(mp_uint_t limit); +void mp_stack_check(void); +#define MP_STACK_CHECK() mp_stack_check() + +#else + +#define mp_stack_set_limit(limit) +#define MP_STACK_CHECK() + +#endif + +#endif // __MICROPY_INCLUDED_PY_STACKCTRL_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/stream.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,439 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <unistd.h>
+
+#include "py/nlr.h"
+#include "py/objstr.h"
+#include "py/stream.h"
+
+#if MICROPY_STREAMS_NON_BLOCK
+#include <errno.h>
+#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)
+#define EWOULDBLOCK 140
+#endif
+#endif
+
+// This file defines generic Python stream read/write methods which
+// dispatch to the underlying stream interface of an object.
+
+// TODO: should be in mpconfig.h
+#define DEFAULT_BUFFER_SIZE 256
+
+STATIC mp_obj_t stream_readall(mp_obj_t self_in);
+
+#define STREAM_CONTENT_TYPE(stream) (((stream)->is_text) ? &mp_type_str : &mp_type_bytes)
+
+const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags) {
+ mp_obj_base_t *o = (mp_obj_base_t*)MP_OBJ_TO_PTR(self_in);
+ const mp_stream_p_t *stream_p = o->type->stream_p;
+ if (stream_p == NULL
+ || ((flags & MP_STREAM_OP_READ) && stream_p->read == NULL)
+ || ((flags & MP_STREAM_OP_WRITE) && stream_p->write == NULL)
+ || ((flags & MP_STREAM_OP_IOCTL) && stream_p->ioctl == NULL)) {
+ // CPython: io.UnsupportedOperation, OSError subclass
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "stream operation not supported"));
+ }
+ return stream_p;
+}
+
+STATIC mp_obj_t stream_read(size_t n_args, const mp_obj_t *args) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_READ);
+
+ // What to do if sz < -1? Python docs don't specify this case.
+ // CPython does a readall, but here we silently let negatives through,
+ // and they will cause a MemoryError.
+ mp_int_t sz;
+ if (n_args == 1 || ((sz = mp_obj_get_int(args[1])) == -1)) {
+ return stream_readall(args[0]);
+ }
+
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ if (stream_p->is_text) {
+ // We need to read sz number of unicode characters. Because we don't have any
+ // buffering, and because the stream API can only read bytes, we must read here
+ // in units of bytes and must never over read. If we want sz chars, then reading
+ // sz bytes will never over-read, so we follow this approach, in a loop to keep
+ // reading until we have exactly enough chars. This will be 1 read for text
+ // with ASCII-only chars, and about 2 reads for text with a couple of non-ASCII
+ // chars. For text with lots of non-ASCII chars, it'll be pretty inefficient
+ // in time and memory.
+
+ vstr_t vstr;
+ vstr_init(&vstr, sz);
+ mp_uint_t more_bytes = sz;
+ mp_uint_t last_buf_offset = 0;
+ while (more_bytes > 0) {
+ char *p = vstr_add_len(&vstr, more_bytes);
+ if (p == NULL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_MemoryError, "out of memory"));
+ }
+ int error;
+ mp_uint_t out_sz = stream_p->read(args[0], p, more_bytes, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ vstr_cut_tail_bytes(&vstr, more_bytes);
+ if (mp_is_nonblocking_error(error)) {
+ // With non-blocking streams, we read as much as we can.
+ // If we read nothing, return None, just like read().
+ // Otherwise, return data read so far.
+ // TODO what if we have read only half a non-ASCII char?
+ if (vstr.len == 0) {
+ vstr_clear(&vstr);
+ return mp_const_none;
+ }
+ break;
+ }
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ }
+
+ if (out_sz < more_bytes) {
+ // Finish reading.
+ // TODO what if we have read only half a non-ASCII char?
+ vstr_cut_tail_bytes(&vstr, more_bytes - out_sz);
+ if (out_sz == 0) {
+ break;
+ }
+ }
+
+ // count chars from bytes just read
+ for (mp_uint_t off = last_buf_offset;;) {
+ byte b = vstr.buf[off];
+ int n;
+ if (!UTF8_IS_NONASCII(b)) {
+ // 1-byte ASCII char
+ n = 1;
+ } else if ((b & 0xe0) == 0xc0) {
+ // 2-byte char
+ n = 2;
+ } else if ((b & 0xf0) == 0xe0) {
+ // 3-byte char
+ n = 3;
+ } else if ((b & 0xf8) == 0xf0) {
+ // 4-byte char
+ n = 4;
+ } else {
+ // TODO
+ n = 5;
+ }
+ if (off + n <= vstr.len) {
+ // got a whole char in n bytes
+ off += n;
+ sz -= 1;
+ last_buf_offset = off;
+ if (off >= vstr.len) {
+ more_bytes = sz;
+ break;
+ }
+ } else {
+ // didn't get a whole char, so work out how many extra bytes are needed for
+ // this partial char, plus bytes for additional chars that we want
+ more_bytes = (off + n - vstr.len) + (sz - 1);
+ break;
+ }
+ }
+ }
+
+ return mp_obj_new_str_from_vstr(&mp_type_str, &vstr);
+ }
+ #endif
+
+ vstr_t vstr;
+ vstr_init_len(&vstr, sz);
+ int error;
+ mp_uint_t out_sz = stream_p->read(args[0], vstr.buf, sz, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ vstr_clear(&vstr);
+ if (mp_is_nonblocking_error(error)) {
+ // https://docs.python.org/3.4/library/io.html#io.RawIOBase.read
+ // "If the object is in non-blocking mode and no bytes are available,
+ // None is returned."
+ // This is actually very weird, as naive truth check will treat
+ // this as EOF.
+ return mp_const_none;
+ }
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ } else {
+ vstr.len = out_sz;
+ return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr);
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read_obj, 1, 2, stream_read);
+
+mp_obj_t mp_stream_write(mp_obj_t self_in, const void *buf, size_t len) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(self_in, MP_STREAM_OP_WRITE);
+
+ int error;
+ mp_uint_t out_sz = stream_p->write(self_in, buf, len, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ if (mp_is_nonblocking_error(error)) {
+ // http://docs.python.org/3/library/io.html#io.RawIOBase.write
+ // "None is returned if the raw stream is set not to block and
+ // no single byte could be readily written to it."
+ // This is for consistency with read() behavior, still weird,
+ // see abobe.
+ return mp_const_none;
+ }
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(out_sz);
+ }
+}
+
+// XXX hack
+void mp_stream_write_adaptor(void *self, const char *buf, size_t len) {
+ mp_stream_write(MP_OBJ_FROM_PTR(self), buf, len);
+}
+
+// Works only with blocking streams
+mp_uint_t mp_stream_writeall(mp_obj_t stream, const byte *buf, mp_uint_t size, int *errcode) {
+ mp_obj_base_t* s = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream);
+ mp_uint_t org_size = size;
+ while (size > 0) {
+ mp_uint_t out_sz = s->type->stream_p->write(stream, buf, size, errcode);
+ if (out_sz == MP_STREAM_ERROR) {
+ return MP_STREAM_ERROR;
+ }
+ buf += out_sz;
+ size -= out_sz;
+ }
+ return org_size;
+}
+
+STATIC mp_obj_t stream_write_method(mp_obj_t self_in, mp_obj_t arg) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(arg, &bufinfo, MP_BUFFER_READ);
+ return mp_stream_write(self_in, bufinfo.buf, bufinfo.len);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_stream_write_obj, stream_write_method);
+
+STATIC mp_obj_t stream_readinto(size_t n_args, const mp_obj_t *args) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_READ);
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_WRITE);
+
+ // CPython extension: if 2nd arg is provided, that's max len to read,
+ // instead of full buffer. Similar to
+ // https://docs.python.org/3/library/socket.html#socket.socket.recv_into
+ mp_uint_t len = bufinfo.len;
+ if (n_args > 2) {
+ len = mp_obj_get_int(args[2]);
+ if (len > bufinfo.len) {
+ len = bufinfo.len;
+ }
+ }
+
+ int error;
+ mp_uint_t out_sz = stream_p->read(args[0], bufinfo.buf, len, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ if (mp_is_nonblocking_error(error)) {
+ return mp_const_none;
+ }
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(out_sz);
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_readinto_obj, 2, 3, stream_readinto);
+
+STATIC mp_obj_t stream_readall(mp_obj_t self_in) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(self_in, MP_STREAM_OP_READ);
+
+ mp_uint_t total_size = 0;
+ vstr_t vstr;
+ vstr_init(&vstr, DEFAULT_BUFFER_SIZE);
+ char *p = vstr.buf;
+ mp_uint_t current_read = DEFAULT_BUFFER_SIZE;
+ while (true) {
+ int error;
+ mp_uint_t out_sz = stream_p->read(self_in, p, current_read, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ if (mp_is_nonblocking_error(error)) {
+ // With non-blocking streams, we read as much as we can.
+ // If we read nothing, return None, just like read().
+ // Otherwise, return data read so far.
+ if (total_size == 0) {
+ return mp_const_none;
+ }
+ break;
+ }
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ }
+ if (out_sz == 0) {
+ break;
+ }
+ total_size += out_sz;
+ if (out_sz < current_read) {
+ current_read -= out_sz;
+ p += out_sz;
+ } else {
+ p = vstr_extend(&vstr, DEFAULT_BUFFER_SIZE);
+ current_read = DEFAULT_BUFFER_SIZE;
+ if (p == NULL) {
+ // TODO
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_OSError/*&mp_type_RuntimeError*/, "Out of memory"));
+ }
+ }
+ }
+
+ vstr.len = total_size;
+ return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_readall_obj, stream_readall);
+
+// Unbuffered, inefficient implementation of readline() for raw I/O files.
+STATIC mp_obj_t stream_unbuffered_readline(size_t n_args, const mp_obj_t *args) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_READ);
+
+ mp_int_t max_size = -1;
+ if (n_args > 1) {
+ max_size = MP_OBJ_SMALL_INT_VALUE(args[1]);
+ }
+
+ vstr_t vstr;
+ if (max_size != -1) {
+ vstr_init(&vstr, max_size);
+ } else {
+ vstr_init(&vstr, 16);
+ }
+
+ while (max_size == -1 || max_size-- != 0) {
+ char *p = vstr_add_len(&vstr, 1);
+ if (p == NULL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_MemoryError, "out of memory"));
+ }
+
+ int error;
+ mp_uint_t out_sz = stream_p->read(args[0], p, 1, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ if (mp_is_nonblocking_error(error)) {
+ if (vstr.len == 1) {
+ // We just incremented it, but otherwise we read nothing
+ // and immediately got EAGAIN. This is case is not well
+ // specified in
+ // https://docs.python.org/3/library/io.html#io.IOBase.readline
+ // unlike similar case for read(). But we follow the latter's
+ // behavior - return None.
+ vstr_clear(&vstr);
+ return mp_const_none;
+ } else {
+ goto done;
+ }
+ }
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ }
+ if (out_sz == 0) {
+done:
+ // Back out previously added byte
+ // Consider, what's better - read a char and get OutOfMemory (so read
+ // char is lost), or allocate first as we do.
+ vstr_cut_tail_bytes(&vstr, 1);
+ break;
+ }
+ if (*p == '\n') {
+ break;
+ }
+ }
+
+ return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_unbuffered_readline_obj, 1, 2, stream_unbuffered_readline);
+
+// TODO take an optional extra argument (what does it do exactly?)
+STATIC mp_obj_t stream_unbuffered_readlines(mp_obj_t self) {
+ mp_obj_t lines = mp_obj_new_list(0, NULL);
+ for (;;) {
+ mp_obj_t line = stream_unbuffered_readline(1, &self);
+ if (!mp_obj_is_true(line)) {
+ break;
+ }
+ mp_obj_list_append(lines, line);
+ }
+ return lines;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_unbuffered_readlines_obj, stream_unbuffered_readlines);
+
+mp_obj_t mp_stream_unbuffered_iter(mp_obj_t self) {
+ mp_obj_t l_in = stream_unbuffered_readline(1, &self);
+ if (mp_obj_is_true(l_in)) {
+ return l_in;
+ }
+ return MP_OBJ_STOP_ITERATION;
+}
+
+STATIC mp_obj_t stream_seek(size_t n_args, const mp_obj_t *args) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_IOCTL);
+
+ struct mp_stream_seek_t seek_s;
+ // TODO: Could be uint64
+ seek_s.offset = mp_obj_get_int(args[1]);
+ seek_s.whence = 0;
+ if (n_args == 3) {
+ seek_s.whence = mp_obj_get_int(args[2]);
+ }
+
+ int error;
+ mp_uint_t res = stream_p->ioctl(args[0], MP_STREAM_SEEK, (mp_uint_t)(uintptr_t)&seek_s, &error);
+ if (res == MP_STREAM_ERROR) {
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ }
+
+ // TODO: Could be uint64
+ return mp_obj_new_int_from_uint(seek_s.offset);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_seek_obj, 2, 3, stream_seek);
+
+STATIC mp_obj_t stream_tell(mp_obj_t self) {
+ mp_obj_t offset = MP_OBJ_NEW_SMALL_INT(0);
+ mp_obj_t whence = MP_OBJ_NEW_SMALL_INT(SEEK_CUR);
+ const mp_obj_t args[3] = {self, offset, whence};
+ return stream_seek(3, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_tell_obj, stream_tell);
+
+STATIC mp_obj_t stream_ioctl(size_t n_args, const mp_obj_t *args) {
+ const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_IOCTL);
+
+ mp_buffer_info_t bufinfo;
+ uintptr_t val = 0;
+ if (n_args > 2) {
+ if (mp_get_buffer(args[2], &bufinfo, MP_BUFFER_WRITE)) {
+ val = (uintptr_t)bufinfo.buf;
+ } else {
+ val = mp_obj_get_int_truncated(args[2]);
+ }
+ }
+
+ int error;
+ mp_uint_t res = stream_p->ioctl(args[0], mp_obj_get_int(args[1]), val, &error);
+ if (res == MP_STREAM_ERROR) {
+ nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(error)));
+ }
+
+ return mp_obj_new_int(res);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_ioctl_obj, 2, 3, stream_ioctl);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/stream.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,84 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_STREAM_H__
+#define __MICROPY_INCLUDED_PY_STREAM_H__
+
+#include "py/obj.h"
+
+#define MP_STREAM_ERROR ((mp_uint_t)-1)
+
+// Stream ioctl request codes
+#define MP_STREAM_FLUSH (1)
+#define MP_STREAM_SEEK (2)
+#define MP_STREAM_POLL (3)
+//#define MP_STREAM_CLOSE (4) // Not yet implemented
+#define MP_STREAM_TIMEOUT (5) // Get/set timeout (single op)
+#define MP_STREAM_GET_OPTS (6) // Get stream options
+#define MP_STREAM_SET_OPTS (7) // Set stream options
+#define MP_STREAM_GET_DATA_OPTS (8) // Get data/message options
+#define MP_STREAM_SET_DATA_OPTS (9) // Set data/message options
+
+// Argument structure for MP_STREAM_SEEK
+struct mp_stream_seek_t {
+ mp_off_t offset;
+ int whence;
+};
+
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_read_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_readinto_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_readall_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_unbuffered_readline_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_unbuffered_readlines_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_write_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_seek_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_tell_obj);
+MP_DECLARE_CONST_FUN_OBJ(mp_stream_ioctl_obj);
+
+// these are for mp_get_stream_raise and can be or'd together
+#define MP_STREAM_OP_READ (1)
+#define MP_STREAM_OP_WRITE (2)
+#define MP_STREAM_OP_IOCTL (4)
+
+const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags);
+
+// Iterator which uses mp_stream_unbuffered_readline_obj
+mp_obj_t mp_stream_unbuffered_iter(mp_obj_t self);
+
+mp_obj_t mp_stream_write(mp_obj_t self_in, const void *buf, size_t len);
+
+// Helper function to write entire buf to *blocking* stream
+mp_uint_t mp_stream_writeall(mp_obj_t stream, const byte *buf, mp_uint_t size, int *errcode);
+
+#if MICROPY_STREAMS_NON_BLOCK
+// TODO: This is POSIX-specific (but then POSIX is the only real thing,
+// and anything else just emulates it, right?)
+#define mp_is_nonblocking_error(errno) ((errno) == EAGAIN || (errno) == EWOULDBLOCK)
+#else
+#define mp_is_nonblocking_error(errno) (0)
+#endif
+
+#endif // __MICROPY_INCLUDED_PY_STREAM_H__
+void mp_stream_write_adaptor(void *self, const char *buf, size_t len);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/unicode.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,182 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include "py/unicode.h"
+
+// attribute flags
+#define FL_PRINT (0x01)
+#define FL_SPACE (0x02)
+#define FL_DIGIT (0x04)
+#define FL_ALPHA (0x08)
+#define FL_UPPER (0x10)
+#define FL_LOWER (0x20)
+#define FL_XDIGIT (0x40)
+
+// shorthand character attributes
+#define AT_PR (FL_PRINT)
+#define AT_SP (FL_SPACE | FL_PRINT)
+#define AT_DI (FL_DIGIT | FL_PRINT | FL_XDIGIT)
+#define AT_AL (FL_ALPHA | FL_PRINT)
+#define AT_UP (FL_UPPER | FL_ALPHA | FL_PRINT)
+#define AT_LO (FL_LOWER | FL_ALPHA | FL_PRINT)
+#define AT_UX (FL_UPPER | FL_ALPHA | FL_PRINT | FL_XDIGIT)
+#define AT_LX (FL_LOWER | FL_ALPHA | FL_PRINT | FL_XDIGIT)
+
+// table of attributes for ascii characters
+STATIC const uint8_t attr[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, AT_SP, AT_SP, AT_SP, AT_SP, AT_SP, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ AT_SP, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_DI, AT_DI, AT_DI, AT_DI, AT_DI, AT_DI, AT_DI, AT_DI,
+ AT_DI, AT_DI, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_PR, AT_UX, AT_UX, AT_UX, AT_UX, AT_UX, AT_UX, AT_UP,
+ AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP,
+ AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP,
+ AT_UP, AT_UP, AT_UP, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_PR, AT_LX, AT_LX, AT_LX, AT_LX, AT_LX, AT_LX, AT_LO,
+ AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO,
+ AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO,
+ AT_LO, AT_LO, AT_LO, AT_PR, AT_PR, AT_PR, AT_PR, 0
+};
+
+// TODO: Rename to str_get_char
+unichar utf8_get_char(const byte *s) {
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+ unichar ord = *s++;
+ if (!UTF8_IS_NONASCII(ord)) return ord;
+ ord &= 0x7F;
+ for (unichar mask = 0x40; ord & mask; mask >>= 1) {
+ ord &= ~mask;
+ }
+ while (UTF8_IS_CONT(*s)) {
+ ord = (ord << 6) | (*s++ & 0x3F);
+ }
+ return ord;
+#else
+ return *s;
+#endif
+}
+
+// TODO: Rename to str_next_char
+const byte *utf8_next_char(const byte *s) {
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+ ++s;
+ while (UTF8_IS_CONT(*s)) {
+ ++s;
+ }
+ return s;
+#else
+ return s + 1;
+#endif
+}
+
+mp_uint_t utf8_ptr_to_index(const byte *s, const byte *ptr) {
+ mp_uint_t i = 0;
+ while (ptr > s) {
+ if (!UTF8_IS_CONT(*--ptr)) {
+ i++;
+ }
+ }
+
+ return i;
+}
+
+// TODO: Rename to str_charlen
+mp_uint_t unichar_charlen(const char *str, mp_uint_t len) {
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+ mp_uint_t charlen = 0;
+ for (const char *top = str + len; str < top; ++str) {
+ if (!UTF8_IS_CONT(*str)) {
+ ++charlen;
+ }
+ }
+ return charlen;
+#else
+ return len;
+#endif
+}
+
+// Be aware: These unichar_is* functions are actually ASCII-only!
+bool unichar_isspace(unichar c) {
+ return c < 128 && (attr[c] & FL_SPACE) != 0;
+}
+
+bool unichar_isalpha(unichar c) {
+ return c < 128 && (attr[c] & FL_ALPHA) != 0;
+}
+
+bool unichar_isprint(unichar c) {
+ return c < 128 && (attr[c] & FL_PRINT) != 0;
+}
+
+bool unichar_isdigit(unichar c) {
+ return c < 128 && (attr[c] & FL_DIGIT) != 0;
+}
+
+bool unichar_isxdigit(unichar c) {
+ return c < 128 && (attr[c] & FL_XDIGIT) != 0;
+}
+
+bool unichar_isident(unichar c) {
+ return c < 128 && ((attr[c] & (FL_ALPHA | FL_DIGIT)) != 0 || c == '_');
+}
+
+bool unichar_isupper(unichar c) {
+ return c < 128 && (attr[c] & FL_UPPER) != 0;
+}
+
+bool unichar_islower(unichar c) {
+ return c < 128 && (attr[c] & FL_LOWER) != 0;
+}
+
+unichar unichar_tolower(unichar c) {
+ if (unichar_isupper(c)) {
+ return c + 0x20;
+ }
+ return c;
+}
+
+unichar unichar_toupper(unichar c) {
+ if (unichar_islower(c)) {
+ return c - 0x20;
+ }
+ return c;
+}
+
+mp_uint_t unichar_xdigit_value(unichar c) {
+ // c is assumed to be hex digit
+ mp_uint_t n = c - '0';
+ if (n > 9) {
+ n &= ~('a' - 'A');
+ n -= ('A' - ('9' + 1));
+ }
+ return n;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/py/unicode.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,34 @@ +/* + * This file is part of the Micro Python project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2014 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_UNICODE_H__ +#define __MICROPY_INCLUDED_PY_UNICODE_H__ + +#include "py/mpconfig.h" +#include "py/misc.h" + +mp_uint_t utf8_ptr_to_index(const byte *s, const byte *ptr); + +#endif // __MICROPY_INCLUDED_PY_UNICODE_H__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/vm.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,1391 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/nlr.h"
+#include "py/emitglue.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/bc0.h"
+#include "py/bc.h"
+
+#if 0
+//#define TRACE(ip) printf("sp=" INT_FMT " ", sp - code_state->sp); mp_bytecode_print2(ip, 1);
+#define TRACE(ip) printf("sp=%d ", sp - code_state->sp); mp_bytecode_print2(ip, 1);
+#else
+#define TRACE(ip)
+#endif
+
+// Value stack grows up (this makes it incompatible with native C stack, but
+// makes sure that arguments to functions are in natural order arg1..argN
+// (Python semantics mandates left-to-right evaluation order, including for
+// function arguments). Stack pointer is pre-incremented and points at the
+// top element.
+// Exception stack also grows up, top element is also pointed at.
+
+// Exception stack unwind reasons (WHY_* in CPython-speak)
+// TODO perhaps compress this to RETURN=0, JUMP>0, with number of unwinds
+// left to do encoded in the JUMP number
+typedef enum {
+ UNWIND_RETURN = 1,
+ UNWIND_JUMP,
+} mp_unwind_reason_t;
+
+#define DECODE_UINT \
+ mp_uint_t unum = 0; \
+ do { \
+ unum = (unum << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0)
+#define DECODE_ULABEL mp_uint_t ulab = (ip[0] | (ip[1] << 8)); ip += 2
+#define DECODE_SLABEL mp_uint_t slab = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2
+
+#if MICROPY_PERSISTENT_CODE
+
+#define DECODE_QSTR \
+ qstr qst = ip[0] | ip[1] << 8; \
+ ip += 2;
+#define DECODE_PTR \
+ DECODE_UINT; \
+ void *ptr = (void*)(uintptr_t)code_state->const_table[unum]
+#define DECODE_OBJ \
+ DECODE_UINT; \
+ mp_obj_t obj = (mp_obj_t)code_state->const_table[unum]
+
+#else
+
+#define DECODE_QSTR qstr qst = 0; \
+ do { \
+ qst = (qst << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0)
+#define DECODE_PTR \
+ ip = (byte*)MP_ALIGN(ip, sizeof(void*)); \
+ void *ptr = *(void**)ip; \
+ ip += sizeof(void*)
+#define DECODE_OBJ \
+ ip = (byte*)MP_ALIGN(ip, sizeof(mp_obj_t)); \
+ mp_obj_t obj = *(mp_obj_t*)ip; \
+ ip += sizeof(mp_obj_t)
+
+#endif
+
+#define PUSH(val) *++sp = (val)
+#define POP() (*sp--)
+#define TOP() (*sp)
+#define SET_TOP(val) *sp = (val)
+
+#if MICROPY_PY_SYS_EXC_INFO
+#define CLEAR_SYS_EXC_INFO() MP_STATE_VM(cur_exception) = NULL;
+#else
+#define CLEAR_SYS_EXC_INFO()
+#endif
+
+#define PUSH_EXC_BLOCK(with_or_finally) do { \
+ DECODE_ULABEL; /* except labels are always forward */ \
+ ++exc_sp; \
+ exc_sp->handler = ip + ulab; \
+ exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1) | currently_in_except_block); \
+ exc_sp->prev_exc = NULL; \
+ currently_in_except_block = 0; /* in a try block now */ \
+} while (0)
+
+#define POP_EXC_BLOCK() \
+ currently_in_except_block = MP_TAGPTR_TAG0(exc_sp->val_sp); /* restore previous state */ \
+ exc_sp--; /* pop back to previous exception handler */ \
+ CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */
+
+// fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
+// sp points to bottom of stack which grows up
+// returns:
+// MP_VM_RETURN_NORMAL, sp valid, return value in *sp
+// MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
+// MP_VM_RETURN_EXCEPTION, exception in fastn[0]
+mp_vm_return_kind_t mp_execute_bytecode(mp_code_state *code_state, volatile mp_obj_t inject_exc) {
+#define SELECTIVE_EXC_IP (0)
+#if SELECTIVE_EXC_IP
+#define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; } /* stores ip 1 byte past last opcode */
+#define MARK_EXC_IP_GLOBAL()
+#else
+#define MARK_EXC_IP_SELECTIVE()
+#define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; } /* stores ip pointing to last opcode */
+#endif
+#if MICROPY_OPT_COMPUTED_GOTO
+ #include "py/vmentrytable.h"
+ #define DISPATCH() do { \
+ TRACE(ip); \
+ MARK_EXC_IP_GLOBAL(); \
+ goto *entry_table[*ip++]; \
+ } while (0)
+ #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
+ #define ENTRY(op) entry_##op
+ #define ENTRY_DEFAULT entry_default
+#else
+ #define DISPATCH() break
+ #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
+ #define ENTRY(op) case op
+ #define ENTRY_DEFAULT default
+#endif
+
+ // nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
+ // sees that it's possible for us to jump from the dispatch loop to the exception
+ // handler. Without this, the code may have a different stack layout in the dispatch
+ // loop and the exception handler, leading to very obscure bugs.
+ #define RAISE(o) do { nlr_pop(); nlr.ret_val = MP_OBJ_TO_PTR(o); goto exception_handler; } while (0)
+
+#if MICROPY_STACKLESS
+run_code_state: ;
+#endif
+ // Pointers which are constant for particular invocation of mp_execute_bytecode()
+ mp_obj_t * /*const*/ fastn = &code_state->state[code_state->n_state - 1];
+ mp_exc_stack_t * /*const*/ exc_stack = (mp_exc_stack_t*)(code_state->state + code_state->n_state);
+
+ // variables that are visible to the exception handler (declared volatile)
+ volatile bool currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
+ mp_exc_stack_t *volatile exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
+
+ // outer exception handling loop
+ for (;;) {
+ nlr_buf_t nlr;
+outer_dispatch_loop:
+ if (nlr_push(&nlr) == 0) {
+ // local variables that are not visible to the exception handler
+ const byte *ip = code_state->ip;
+ mp_obj_t *sp = code_state->sp;
+ mp_obj_t obj_shared;
+ MICROPY_VM_HOOK_INIT
+
+ // If we have exception to inject, now that we finish setting up
+ // execution context, raise it. This works as if RAISE_VARARGS
+ // bytecode was executed.
+ // Injecting exc into yield from generator is a special case,
+ // handled by MP_BC_YIELD_FROM itself
+ if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
+ mp_obj_t exc = inject_exc;
+ inject_exc = MP_OBJ_NULL;
+ exc = mp_make_raise_obj(exc);
+ RAISE(exc);
+ }
+
+ // loop to execute byte code
+ for (;;) {
+dispatch_loop:
+#if MICROPY_OPT_COMPUTED_GOTO
+ DISPATCH();
+#else
+ TRACE(ip);
+ MARK_EXC_IP_GLOBAL();
+ switch (*ip++) {
+#endif
+
+ ENTRY(MP_BC_LOAD_CONST_FALSE):
+ PUSH(mp_const_false);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_CONST_NONE):
+ PUSH(mp_const_none);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_CONST_TRUE):
+ PUSH(mp_const_true);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
+ mp_int_t num = 0;
+ if ((ip[0] & 0x40) != 0) {
+ // Number is negative
+ num--;
+ }
+ do {
+ num = (num << 7) | (*ip & 0x7f);
+ } while ((*ip++ & 0x80) != 0);
+ PUSH(MP_OBJ_NEW_SMALL_INT(num));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_CONST_STRING): {
+ DECODE_QSTR;
+ PUSH(MP_OBJ_NEW_QSTR(qst));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_CONST_OBJ): {
+ DECODE_OBJ;
+ PUSH(obj);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_NULL):
+ PUSH(MP_OBJ_NULL);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_FAST_N): {
+ DECODE_UINT;
+ obj_shared = fastn[-unum];
+ load_check:
+ if (obj_shared == MP_OBJ_NULL) {
+ local_name_error: {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, "local variable referenced before assignment");
+ RAISE(obj);
+ }
+ }
+ PUSH(obj_shared);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_DEREF): {
+ DECODE_UINT;
+ obj_shared = mp_obj_cell_get(fastn[-unum]);
+ goto load_check;
+ }
+
+ #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+ ENTRY(MP_BC_LOAD_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ PUSH(mp_load_name(qst));
+ DISPATCH();
+ }
+ #else
+ ENTRY(MP_BC_LOAD_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
+ mp_uint_t x = *ip;
+ if (x < MP_STATE_CTX(dict_locals)->map.alloc && MP_STATE_CTX(dict_locals)->map.table[x].key == key) {
+ PUSH(MP_STATE_CTX(dict_locals)->map.table[x].value);
+ } else {
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_locals)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ *(byte*)ip = (elem - &MP_STATE_CTX(dict_locals)->map.table[0]) & 0xff;
+ PUSH(elem->value);
+ } else {
+ PUSH(mp_load_name(MP_OBJ_QSTR_VALUE(key)));
+ }
+ }
+ ip++;
+ DISPATCH();
+ }
+ #endif
+
+ #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+ ENTRY(MP_BC_LOAD_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ PUSH(mp_load_global(qst));
+ DISPATCH();
+ }
+ #else
+ ENTRY(MP_BC_LOAD_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
+ mp_uint_t x = *ip;
+ if (x < MP_STATE_CTX(dict_globals)->map.alloc && MP_STATE_CTX(dict_globals)->map.table[x].key == key) {
+ PUSH(MP_STATE_CTX(dict_globals)->map.table[x].value);
+ } else {
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_globals)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ *(byte*)ip = (elem - &MP_STATE_CTX(dict_globals)->map.table[0]) & 0xff;
+ PUSH(elem->value);
+ } else {
+ PUSH(mp_load_global(MP_OBJ_QSTR_VALUE(key)));
+ }
+ }
+ ip++;
+ DISPATCH();
+ }
+ #endif
+
+ #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+ ENTRY(MP_BC_LOAD_ATTR): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ SET_TOP(mp_load_attr(TOP(), qst));
+ DISPATCH();
+ }
+ #else
+ ENTRY(MP_BC_LOAD_ATTR): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t top = TOP();
+ if (mp_obj_get_type(top)->attr == mp_obj_instance_attr) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
+ mp_uint_t x = *ip;
+ mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
+ mp_map_elem_t *elem;
+ if (x < self->members.alloc && self->members.table[x].key == key) {
+ elem = &self->members.table[x];
+ } else {
+ elem = mp_map_lookup(&self->members, key, MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ *(byte*)ip = elem - &self->members.table[0];
+ } else {
+ goto load_attr_cache_fail;
+ }
+ }
+ SET_TOP(elem->value);
+ ip++;
+ DISPATCH();
+ }
+ load_attr_cache_fail:
+ SET_TOP(mp_load_attr(top, qst));
+ ip++;
+ DISPATCH();
+ }
+ #endif
+
+ ENTRY(MP_BC_LOAD_METHOD): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_load_method(*sp, qst, sp);
+ sp += 1;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_BUILD_CLASS):
+ MARK_EXC_IP_SELECTIVE();
+ PUSH(mp_load_build_class());
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_SUBSCR): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t index = POP();
+ SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_FAST_N): {
+ DECODE_UINT;
+ fastn[-unum] = POP();
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_DEREF): {
+ DECODE_UINT;
+ mp_obj_cell_set(fastn[-unum], POP());
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_store_name(qst, POP());
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_store_global(qst, POP());
+ DISPATCH();
+ }
+
+ #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
+ ENTRY(MP_BC_STORE_ATTR): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_store_attr(sp[0], qst, sp[-1]);
+ sp -= 2;
+ DISPATCH();
+ }
+ #else
+ // This caching code works with MICROPY_PY_BUILTINS_PROPERTY and/or
+ // MICROPY_PY_DESCRIPTORS enabled because if the attr exists in
+ // self->members then it can't be a property or have descriptors. A
+ // consequence of this is that we can't use MP_MAP_LOOKUP_ADD_IF_NOT_FOUND
+ // in the fast-path below, because that store could override a property.
+ ENTRY(MP_BC_STORE_ATTR): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t top = TOP();
+ if (mp_obj_get_type(top)->attr == mp_obj_instance_attr && sp[-1] != MP_OBJ_NULL) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
+ mp_uint_t x = *ip;
+ mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
+ mp_map_elem_t *elem;
+ if (x < self->members.alloc && self->members.table[x].key == key) {
+ elem = &self->members.table[x];
+ } else {
+ elem = mp_map_lookup(&self->members, key, MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ *(byte*)ip = elem - &self->members.table[0];
+ } else {
+ goto store_attr_cache_fail;
+ }
+ }
+ elem->value = sp[-1];
+ sp -= 2;
+ ip++;
+ DISPATCH();
+ }
+ store_attr_cache_fail:
+ mp_store_attr(sp[0], qst, sp[-1]);
+ sp -= 2;
+ ip++;
+ DISPATCH();
+ }
+ #endif
+
+ ENTRY(MP_BC_STORE_SUBSCR):
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_subscr(sp[-1], sp[0], sp[-2]);
+ sp -= 3;
+ DISPATCH();
+
+ ENTRY(MP_BC_DELETE_FAST): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ if (fastn[-unum] == MP_OBJ_NULL) {
+ goto local_name_error;
+ }
+ fastn[-unum] = MP_OBJ_NULL;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DELETE_DEREF): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
+ goto local_name_error;
+ }
+ mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DELETE_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_delete_name(qst);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DELETE_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_delete_global(qst);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DUP_TOP): {
+ mp_obj_t top = TOP();
+ PUSH(top);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DUP_TOP_TWO):
+ sp += 2;
+ sp[0] = sp[-2];
+ sp[-1] = sp[-3];
+ DISPATCH();
+
+ ENTRY(MP_BC_POP_TOP):
+ sp -= 1;
+ DISPATCH();
+
+ ENTRY(MP_BC_ROT_TWO): {
+ mp_obj_t top = sp[0];
+ sp[0] = sp[-1];
+ sp[-1] = top;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_ROT_THREE): {
+ mp_obj_t top = sp[0];
+ sp[0] = sp[-1];
+ sp[-1] = sp[-2];
+ sp[-2] = top;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_JUMP): {
+ DECODE_SLABEL;
+ ip += slab;
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
+ DECODE_SLABEL;
+ if (mp_obj_is_true(POP())) {
+ ip += slab;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
+ DECODE_SLABEL;
+ if (!mp_obj_is_true(POP())) {
+ ip += slab;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
+ DECODE_SLABEL;
+ if (mp_obj_is_true(TOP())) {
+ ip += slab;
+ } else {
+ sp--;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
+ DECODE_SLABEL;
+ if (mp_obj_is_true(TOP())) {
+ sp--;
+ } else {
+ ip += slab;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_SETUP_WITH): {
+ MARK_EXC_IP_SELECTIVE();
+ // stack: (..., ctx_mgr)
+ mp_obj_t obj = TOP();
+ mp_load_method(obj, MP_QSTR___exit__, sp);
+ mp_load_method(obj, MP_QSTR___enter__, sp + 2);
+ mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 2);
+ sp += 1;
+ PUSH_EXC_BLOCK(1);
+ PUSH(ret);
+ // stack: (..., __exit__, ctx_mgr, as_value)
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_WITH_CLEANUP): {
+ MARK_EXC_IP_SELECTIVE();
+ // Arriving here, there's "exception control block" on top of stack,
+ // and __exit__ method (with self) underneath it. Bytecode calls __exit__,
+ // and "deletes" it off stack, shifting "exception control block"
+ // to its place.
+ if (TOP() == mp_const_none) {
+ // stack: (..., __exit__, ctx_mgr, None)
+ sp[1] = mp_const_none;
+ sp[2] = mp_const_none;
+ sp -= 2;
+ mp_call_method_n_kw(3, 0, sp);
+ SET_TOP(mp_const_none);
+ } else if (MP_OBJ_IS_SMALL_INT(TOP())) {
+ mp_int_t cause_val = MP_OBJ_SMALL_INT_VALUE(TOP());
+ if (cause_val == UNWIND_RETURN) {
+ // stack: (..., __exit__, ctx_mgr, ret_val, UNWIND_RETURN)
+ mp_obj_t ret_val = sp[-1];
+ sp[-1] = mp_const_none;
+ sp[0] = mp_const_none;
+ sp[1] = mp_const_none;
+ mp_call_method_n_kw(3, 0, sp - 3);
+ sp[-3] = ret_val;
+ sp[-2] = MP_OBJ_NEW_SMALL_INT(UNWIND_RETURN);
+ } else {
+ assert(cause_val == UNWIND_JUMP);
+ // stack: (..., __exit__, ctx_mgr, dest_ip, num_exc, UNWIND_JUMP)
+ mp_obj_t dest_ip = sp[-2];
+ mp_obj_t num_exc = sp[-1];
+ sp[-2] = mp_const_none;
+ sp[-1] = mp_const_none;
+ sp[0] = mp_const_none;
+ mp_call_method_n_kw(3, 0, sp - 4);
+ sp[-4] = dest_ip;
+ sp[-3] = num_exc;
+ sp[-2] = MP_OBJ_NEW_SMALL_INT(UNWIND_JUMP);
+ }
+ sp -= 2; // we removed (__exit__, ctx_mgr)
+ } else {
+ assert(mp_obj_is_exception_type(TOP()));
+ // stack: (..., __exit__, ctx_mgr, traceback, exc_val, exc_type)
+ // Need to pass (sp[0], sp[-1], sp[-2]) as arguments so must reverse the
+ // order of these on the value stack (don't want to create a temporary
+ // array because it increases stack footprint of the VM).
+ mp_obj_t obj = sp[-2];
+ sp[-2] = sp[0];
+ sp[0] = obj;
+ mp_obj_t ret_value = mp_call_method_n_kw(3, 0, sp - 4);
+ if (mp_obj_is_true(ret_value)) {
+ // We need to silence/swallow the exception. This is done
+ // by popping the exception and the __exit__ handler and
+ // replacing it with None, which signals END_FINALLY to just
+ // execute the finally handler normally.
+ sp -= 4;
+ SET_TOP(mp_const_none);
+ assert(exc_sp >= exc_stack);
+ POP_EXC_BLOCK();
+ } else {
+ // We need to re-raise the exception. We pop __exit__ handler
+ // and copy the 3 exception values down (remembering that they
+ // are reversed due to above code).
+ sp[-4] = sp[0];
+ sp[-3] = sp[-1];
+ sp -= 2;
+ }
+ }
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_UNWIND_JUMP): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_SLABEL;
+ PUSH((mp_obj_t)(mp_uint_t)(uintptr_t)(ip + slab)); // push destination ip for jump
+ PUSH((mp_obj_t)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
+unwind_jump:;
+ mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
+ while ((unum & 0x7f) > 0) {
+ unum -= 1;
+ assert(exc_sp >= exc_stack);
+ if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
+ // Getting here the stack looks like:
+ // (..., X, dest_ip)
+ // where X is pointed to by exc_sp->val_sp and in the case
+ // of a "with" block contains the context manager info.
+ // We're going to run "finally" code as a coroutine
+ // (not calling it recursively). Set up a sentinel
+ // on a stack so it can return back to us when it is
+ // done (when WITH_CLEANUP or END_FINALLY reached).
+ PUSH((mp_obj_t)unum); // push number of exception handlers left to unwind
+ PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_JUMP)); // push sentinel
+ ip = exc_sp->handler; // get exception handler byte code address
+ exc_sp--; // pop exception handler
+ goto dispatch_loop; // run the exception handler
+ }
+ POP_EXC_BLOCK();
+ }
+ ip = (const byte*)MP_OBJ_TO_PTR(POP()); // pop destination ip for jump
+ if (unum != 0) {
+ sp--;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ // matched against: POP_BLOCK or POP_EXCEPT (anything else?)
+ ENTRY(MP_BC_SETUP_EXCEPT):
+ ENTRY(MP_BC_SETUP_FINALLY): {
+ MARK_EXC_IP_SELECTIVE();
+ #if SELECTIVE_EXC_IP
+ PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
+ #else
+ PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
+ #endif
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_END_FINALLY):
+ MARK_EXC_IP_SELECTIVE();
+ // not fully implemented
+ // if TOS is an exception, reraises the exception (3 values on TOS)
+ // if TOS is None, just pops it and continues
+ // if TOS is an integer, does something else
+ // else error
+ if (mp_obj_is_exception_type(TOP())) {
+ RAISE(sp[-1]);
+ }
+ if (TOP() == mp_const_none) {
+ sp--;
+ } else {
+ assert(MP_OBJ_IS_SMALL_INT(TOP()));
+ // We finished "finally" coroutine and now dispatch back
+ // to our caller, based on TOS value
+ mp_unwind_reason_t reason = MP_OBJ_SMALL_INT_VALUE(POP());
+ if (reason == UNWIND_RETURN) {
+ goto unwind_return;
+ } else {
+ assert(reason == UNWIND_JUMP);
+ goto unwind_jump;
+ }
+ }
+ DISPATCH();
+
+ ENTRY(MP_BC_GET_ITER):
+ MARK_EXC_IP_SELECTIVE();
+ SET_TOP(mp_getiter(TOP()));
+ DISPATCH();
+
+ ENTRY(MP_BC_FOR_ITER): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ code_state->sp = sp;
+ assert(TOP());
+ mp_obj_t value = mp_iternext_allow_raise(TOP());
+ if (value == MP_OBJ_STOP_ITERATION) {
+ --sp; // pop the exhausted iterator
+ ip += ulab; // jump to after for-block
+ } else {
+ PUSH(value); // push the next iteration value
+ }
+ DISPATCH();
+ }
+
+ // matched against: SETUP_EXCEPT, SETUP_FINALLY, SETUP_WITH
+ ENTRY(MP_BC_POP_BLOCK):
+ // we are exiting an exception handler, so pop the last one of the exception-stack
+ assert(exc_sp >= exc_stack);
+ POP_EXC_BLOCK();
+ DISPATCH();
+
+ // matched against: SETUP_EXCEPT
+ ENTRY(MP_BC_POP_EXCEPT):
+ // TODO need to work out how blocks work etc
+ // pops block, checks it's an exception block, and restores the stack, saving the 3 exception values to local threadstate
+ assert(exc_sp >= exc_stack);
+ assert(currently_in_except_block);
+ //sp = (mp_obj_t*)(*exc_sp--);
+ //exc_sp--; // discard ip
+ POP_EXC_BLOCK();
+ //sp -= 3; // pop 3 exception values
+ DISPATCH();
+
+ ENTRY(MP_BC_BUILD_TUPLE): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ sp -= unum - 1;
+ SET_TOP(mp_obj_new_tuple(unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_BUILD_LIST): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ sp -= unum - 1;
+ SET_TOP(mp_obj_new_list(unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LIST_APPEND): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // I think it's guaranteed by the compiler that sp[unum] is a list
+ mp_obj_list_append(sp[-unum], sp[0]);
+ sp--;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_BUILD_MAP): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ PUSH(mp_obj_new_dict(unum));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_MAP):
+ MARK_EXC_IP_SELECTIVE();
+ sp -= 2;
+ mp_obj_dict_store(sp[0], sp[2], sp[1]);
+ DISPATCH();
+
+ ENTRY(MP_BC_MAP_ADD): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // I think it's guaranteed by the compiler that sp[-unum - 1] is a map
+ mp_obj_dict_store(sp[-unum - 1], sp[0], sp[-1]);
+ sp -= 2;
+ DISPATCH();
+ }
+
+#if MICROPY_PY_BUILTINS_SET
+ ENTRY(MP_BC_BUILD_SET): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ sp -= unum - 1;
+ SET_TOP(mp_obj_new_set(unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_SET_ADD): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // I think it's guaranteed by the compiler that sp[-unum] is a set
+ mp_obj_set_store(sp[-unum], sp[0]);
+ sp--;
+ DISPATCH();
+ }
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE
+ ENTRY(MP_BC_BUILD_SLICE): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ if (unum == 2) {
+ mp_obj_t stop = POP();
+ mp_obj_t start = TOP();
+ SET_TOP(mp_obj_new_slice(start, stop, mp_const_none));
+ } else {
+ mp_obj_t step = POP();
+ mp_obj_t stop = POP();
+ mp_obj_t start = TOP();
+ SET_TOP(mp_obj_new_slice(start, stop, step));
+ }
+ DISPATCH();
+ }
+#endif
+
+ ENTRY(MP_BC_UNPACK_SEQUENCE): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ mp_unpack_sequence(sp[0], unum, sp);
+ sp += unum - 1;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_UNPACK_EX): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ mp_unpack_ex(sp[0], unum, sp);
+ sp += (unum & 0xff) + ((unum >> 8) & 0xff);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_FUNCTION): {
+ DECODE_PTR;
+ PUSH(mp_make_function_from_raw_code(ptr, MP_OBJ_NULL, MP_OBJ_NULL));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
+ DECODE_PTR;
+ // Stack layout: def_tuple def_dict <- TOS
+ mp_obj_t def_dict = POP();
+ SET_TOP(mp_make_function_from_raw_code(ptr, TOP(), def_dict));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_CLOSURE): {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ // Stack layout: closed_overs <- TOS
+ sp -= n_closed_over - 1;
+ SET_TOP(mp_make_closure_from_raw_code(ptr, n_closed_over, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ // Stack layout: def_tuple def_dict closed_overs <- TOS
+ sp -= 2 + n_closed_over - 1;
+ SET_TOP(mp_make_closure_from_raw_code(ptr, 0x100 | n_closed_over, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_FUNCTION): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
+ mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1);
+ if (new_state) {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ #if MICROPY_STACKLESS_STRICT
+ else {
+ deep_recursion_error:
+ mp_exc_recursion_depth();
+ }
+ #endif
+ }
+ #endif
+ SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ // We have folowing stack layout here:
+ // fun arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
+
+ mp_call_args_t out_args;
+ mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args);
+
+ mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
+ out_args.n_args, out_args.n_kw, out_args.args);
+ m_del(mp_obj_t, out_args.args, out_args.n_alloc);
+ if (new_state) {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ #if MICROPY_STACKLESS_STRICT
+ else {
+ goto deep_recursion_error;
+ }
+ #endif
+ }
+ #endif
+ SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_METHOD): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
+
+ mp_uint_t n_args = unum & 0xff;
+ mp_uint_t n_kw = (unum >> 8) & 0xff;
+ int adjust = (sp[1] == MP_OBJ_NULL) ? 0 : 1;
+
+ mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(*sp, n_args + adjust, n_kw, sp + 2 - adjust);
+ if (new_state) {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ #if MICROPY_STACKLESS_STRICT
+ else {
+ goto deep_recursion_error;
+ }
+ #endif
+ }
+ #endif
+ SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ // We have folowing stack layout here:
+ // fun self arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 3;
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
+
+ mp_call_args_t out_args;
+ mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args);
+
+ mp_code_state *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
+ out_args.n_args, out_args.n_kw, out_args.args);
+ m_del(mp_obj_t, out_args.args, out_args.n_alloc);
+ if (new_state) {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ #if MICROPY_STACKLESS_STRICT
+ else {
+ goto deep_recursion_error;
+ }
+ #endif
+ }
+ #endif
+ SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_RETURN_VALUE):
+ MARK_EXC_IP_SELECTIVE();
+ // These next 3 lines pop a try-finally exception handler, if one
+ // is there on the exception stack. Without this the finally block
+ // is executed a second time when the return is executed, because
+ // the try-finally exception handler is still on the stack.
+ // TODO Possibly find a better way to handle this case.
+ if (currently_in_except_block) {
+ POP_EXC_BLOCK();
+ }
+unwind_return:
+ while (exc_sp >= exc_stack) {
+ if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
+ // Getting here the stack looks like:
+ // (..., X, [iter0, iter1, ...,] ret_val)
+ // where X is pointed to by exc_sp->val_sp and in the case
+ // of a "with" block contains the context manager info.
+ // There may be 0 or more for-iterators between X and the
+ // return value, and these must be removed before control can
+ // pass to the finally code. We simply copy the ret_value down
+ // over these iterators, if they exist. If they don't then the
+ // following is a null operation.
+ mp_obj_t *finally_sp = MP_TAGPTR_PTR(exc_sp->val_sp);
+ finally_sp[1] = sp[0];
+ sp = &finally_sp[1];
+ // We're going to run "finally" code as a coroutine
+ // (not calling it recursively). Set up a sentinel
+ // on a stack so it can return back to us when it is
+ // done (when WITH_CLEANUP or END_FINALLY reached).
+ PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_RETURN));
+ ip = exc_sp->handler;
+ exc_sp--;
+ goto dispatch_loop;
+ }
+ exc_sp--;
+ }
+ nlr_pop();
+ code_state->sp = sp;
+ assert(exc_sp == exc_stack - 1);
+ MICROPY_VM_HOOK_RETURN
+ #if MICROPY_STACKLESS
+ if (code_state->prev != NULL) {
+ mp_obj_t res = *sp;
+ mp_globals_set(code_state->old_globals);
+ code_state = code_state->prev;
+ *code_state->sp = res;
+ goto run_code_state;
+ }
+ #endif
+ return MP_VM_RETURN_NORMAL;
+
+ ENTRY(MP_BC_RAISE_VARARGS): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_uint_t unum = *ip++;
+ mp_obj_t obj;
+ if (unum == 2) {
+ mp_warning("exception chaining not supported");
+ // ignore (pop) "from" argument
+ sp--;
+ }
+ if (unum == 0) {
+ // search for the inner-most previous exception, to reraise it
+ obj = MP_OBJ_NULL;
+ for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; e--) {
+ if (e->prev_exc != NULL) {
+ obj = MP_OBJ_FROM_PTR(e->prev_exc);
+ break;
+ }
+ }
+ if (obj == MP_OBJ_NULL) {
+ obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, "No active exception to reraise");
+ RAISE(obj);
+ }
+ } else {
+ obj = POP();
+ }
+ obj = mp_make_raise_obj(obj);
+ RAISE(obj);
+ }
+
+ ENTRY(MP_BC_YIELD_VALUE):
+yield:
+ nlr_pop();
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
+ return MP_VM_RETURN_YIELD;
+
+ ENTRY(MP_BC_YIELD_FROM): {
+ MARK_EXC_IP_SELECTIVE();
+//#define EXC_MATCH(exc, type) MP_OBJ_IS_TYPE(exc, type)
+#define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type)
+#define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { RAISE(t); }
+ mp_vm_return_kind_t ret_kind;
+ mp_obj_t send_value = POP();
+ mp_obj_t t_exc = MP_OBJ_NULL;
+ mp_obj_t ret_value;
+ if (inject_exc != MP_OBJ_NULL) {
+ t_exc = inject_exc;
+ inject_exc = MP_OBJ_NULL;
+ ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
+ } else {
+ ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
+ }
+
+ if (ret_kind == MP_VM_RETURN_YIELD) {
+ ip--;
+ PUSH(ret_value);
+ goto yield;
+ }
+ if (ret_kind == MP_VM_RETURN_NORMAL) {
+ // Pop exhausted gen
+ sp--;
+ if (ret_value == MP_OBJ_NULL) {
+ // Optimize StopIteration
+ // TODO: get StopIteration's value
+ PUSH(mp_const_none);
+ } else {
+ PUSH(ret_value);
+ }
+
+ // If we injected GeneratorExit downstream, then even
+ // if it was swallowed, we re-raise GeneratorExit
+ GENERATOR_EXIT_IF_NEEDED(t_exc);
+ DISPATCH();
+ }
+ if (ret_kind == MP_VM_RETURN_EXCEPTION) {
+ // Pop exhausted gen
+ sp--;
+ if (EXC_MATCH(ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ PUSH(mp_obj_exception_get_value(ret_value));
+ // If we injected GeneratorExit downstream, then even
+ // if it was swallowed, we re-raise GeneratorExit
+ GENERATOR_EXIT_IF_NEEDED(t_exc);
+ DISPATCH();
+ } else {
+ RAISE(ret_value);
+ }
+ }
+ }
+
+ ENTRY(MP_BC_IMPORT_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t obj = POP();
+ SET_TOP(mp_import_name(qst, obj, TOP()));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_IMPORT_FROM): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t obj = mp_import_from(TOP(), qst);
+ PUSH(obj);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_IMPORT_STAR):
+ MARK_EXC_IP_SELECTIVE();
+ mp_import_all(POP());
+ DISPATCH();
+
+#if MICROPY_OPT_COMPUTED_GOTO
+ ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
+ PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_FAST_MULTI):
+ obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
+ goto load_check;
+
+ ENTRY(MP_BC_STORE_FAST_MULTI):
+ fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
+ DISPATCH();
+
+ ENTRY(MP_BC_UNARY_OP_MULTI):
+ MARK_EXC_IP_SELECTIVE();
+ SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
+ DISPATCH();
+
+ ENTRY(MP_BC_BINARY_OP_MULTI): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t rhs = POP();
+ mp_obj_t lhs = TOP();
+ SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
+ DISPATCH();
+ }
+
+ ENTRY_DEFAULT:
+ MARK_EXC_IP_SELECTIVE();
+#else
+ ENTRY_DEFAULT:
+ if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
+ PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
+ DISPATCH();
+ } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
+ obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
+ goto load_check;
+ } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
+ fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
+ DISPATCH();
+ } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + 7) {
+ SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
+ DISPATCH();
+ } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + 36) {
+ mp_obj_t rhs = POP();
+ mp_obj_t lhs = TOP();
+ SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
+ DISPATCH();
+ } else
+#endif
+ {
+ mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, "byte code not implemented");
+ nlr_pop();
+ fastn[0] = obj;
+ return MP_VM_RETURN_EXCEPTION;
+ }
+
+#if !MICROPY_OPT_COMPUTED_GOTO
+ } // switch
+#endif
+
+pending_exception_check:
+ MICROPY_VM_HOOK_LOOP
+ if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
+ MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
+ RAISE(obj);
+ }
+
+ } // for loop
+
+ } else {
+exception_handler:
+ // exception occurred
+
+ #if MICROPY_PY_SYS_EXC_INFO
+ MP_STATE_VM(cur_exception) = nlr.ret_val;
+ #endif
+
+ #if SELECTIVE_EXC_IP
+ // with selective ip, we store the ip 1 byte past the opcode, so move ptr back
+ code_state->ip -= 1;
+ #endif
+
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ if (code_state->ip) {
+ // check if it's a StopIteration within a for block
+ if (*code_state->ip == MP_BC_FOR_ITER) {
+ const byte *ip = code_state->ip + 1;
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ code_state->ip = ip + ulab; // jump to after for-block
+ code_state->sp -= 1; // pop the exhausted iterator
+ goto outer_dispatch_loop; // continue with dispatch loop
+ } else if (*code_state->ip == MP_BC_YIELD_FROM) {
+ // StopIteration inside yield from call means return a value of
+ // yield from, so inject exception's value as yield from's result
+ *++code_state->sp = mp_obj_exception_get_value(MP_OBJ_FROM_PTR(nlr.ret_val));
+ code_state->ip++; // yield from is over, move to next instruction
+ goto outer_dispatch_loop; // continue with dispatch loop
+ }
+ }
+ }
+
+#if MICROPY_STACKLESS
+unwind_loop:
+#endif
+ // set file and line number that the exception occurred at
+ // TODO: don't set traceback for exceptions re-raised by END_FINALLY.
+ // But consider how to handle nested exceptions.
+ // TODO need a better way of not adding traceback to constant objects (right now, just GeneratorExit_obj and MemoryError_obj)
+ if (nlr.ret_val != &mp_const_GeneratorExit_obj && nlr.ret_val != &mp_const_MemoryError_obj) {
+ const byte *ip = code_state->code_info;
+ mp_uint_t code_info_size = mp_decode_uint(&ip);
+ #if MICROPY_PERSISTENT_CODE
+ qstr block_name = ip[0] | (ip[1] << 8);
+ qstr source_file = ip[2] | (ip[3] << 8);
+ ip += 4;
+ #else
+ qstr block_name = mp_decode_uint(&ip);
+ qstr source_file = mp_decode_uint(&ip);
+ #endif
+ size_t bc = code_state->ip - code_state->code_info - code_info_size;
+ size_t source_line = 1;
+ size_t c;
+ while ((c = *ip)) {
+ mp_uint_t b, l;
+ if ((c & 0x80) == 0) {
+ // 0b0LLBBBBB encoding
+ b = c & 0x1f;
+ l = c >> 5;
+ ip += 1;
+ } else {
+ // 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+ b = c & 0xf;
+ l = ((c << 4) & 0x700) | ip[1];
+ ip += 2;
+ }
+ if (bc >= b) {
+ bc -= b;
+ source_line += l;
+ } else {
+ // found source line corresponding to bytecode offset
+ break;
+ }
+ }
+ mp_obj_exception_add_traceback(MP_OBJ_FROM_PTR(nlr.ret_val), source_file, source_line, block_name);
+ }
+
+ while (currently_in_except_block) {
+ // nested exception
+
+ assert(exc_sp >= exc_stack);
+
+ // TODO make a proper message for nested exception
+ // at the moment we are just raising the very last exception (the one that caused the nested exception)
+
+ // move up to previous exception handler
+ POP_EXC_BLOCK();
+ }
+
+ if (exc_sp >= exc_stack) {
+ // set flag to indicate that we are now handling an exception
+ currently_in_except_block = 1;
+
+ // catch exception and pass to byte code
+ code_state->ip = exc_sp->handler;
+ mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
+ // save this exception in the stack so it can be used in a reraise, if needed
+ exc_sp->prev_exc = nlr.ret_val;
+ // push(traceback, exc-val, exc-type)
+ PUSH(mp_const_none);
+ PUSH(MP_OBJ_FROM_PTR(nlr.ret_val));
+ PUSH(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type));
+ code_state->sp = sp;
+
+ #if MICROPY_STACKLESS
+ } else if (code_state->prev != NULL) {
+ mp_globals_set(code_state->old_globals);
+ code_state = code_state->prev;
+ fastn = &code_state->state[code_state->n_state - 1];
+ exc_stack = (mp_exc_stack_t*)(code_state->state + code_state->n_state);
+ // variables that are visible to the exception handler (declared volatile)
+ currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
+ exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
+ goto unwind_loop;
+
+ #endif
+ } else {
+ // propagate exception to higher level
+ // TODO what to do about ip and sp? they don't really make sense at this point
+ fastn[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // must put exception here because sp is invalid
+ return MP_VM_RETURN_EXCEPTION;
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/vmentrytable.h Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,118 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winitializer-overrides"
+#endif // __clang__
+
+static void* entry_table[256] = {
+ [0 ... 255] = &&entry_default,
+ [MP_BC_LOAD_CONST_FALSE] = &&entry_MP_BC_LOAD_CONST_FALSE,
+ [MP_BC_LOAD_CONST_NONE] = &&entry_MP_BC_LOAD_CONST_NONE,
+ [MP_BC_LOAD_CONST_TRUE] = &&entry_MP_BC_LOAD_CONST_TRUE,
+ [MP_BC_LOAD_CONST_SMALL_INT] = &&entry_MP_BC_LOAD_CONST_SMALL_INT,
+ [MP_BC_LOAD_CONST_STRING] = &&entry_MP_BC_LOAD_CONST_STRING,
+ [MP_BC_LOAD_CONST_OBJ] = &&entry_MP_BC_LOAD_CONST_OBJ,
+ [MP_BC_LOAD_NULL] = &&entry_MP_BC_LOAD_NULL,
+ [MP_BC_LOAD_FAST_N] = &&entry_MP_BC_LOAD_FAST_N,
+ [MP_BC_LOAD_DEREF] = &&entry_MP_BC_LOAD_DEREF,
+ [MP_BC_LOAD_NAME] = &&entry_MP_BC_LOAD_NAME,
+ [MP_BC_LOAD_GLOBAL] = &&entry_MP_BC_LOAD_GLOBAL,
+ [MP_BC_LOAD_ATTR] = &&entry_MP_BC_LOAD_ATTR,
+ [MP_BC_LOAD_METHOD] = &&entry_MP_BC_LOAD_METHOD,
+ [MP_BC_LOAD_BUILD_CLASS] = &&entry_MP_BC_LOAD_BUILD_CLASS,
+ [MP_BC_LOAD_SUBSCR] = &&entry_MP_BC_LOAD_SUBSCR,
+ [MP_BC_STORE_FAST_N] = &&entry_MP_BC_STORE_FAST_N,
+ [MP_BC_STORE_DEREF] = &&entry_MP_BC_STORE_DEREF,
+ [MP_BC_STORE_NAME] = &&entry_MP_BC_STORE_NAME,
+ [MP_BC_STORE_GLOBAL] = &&entry_MP_BC_STORE_GLOBAL,
+ [MP_BC_STORE_ATTR] = &&entry_MP_BC_STORE_ATTR,
+ [MP_BC_STORE_SUBSCR] = &&entry_MP_BC_STORE_SUBSCR,
+ [MP_BC_DELETE_FAST] = &&entry_MP_BC_DELETE_FAST,
+ [MP_BC_DELETE_DEREF] = &&entry_MP_BC_DELETE_DEREF,
+ [MP_BC_DELETE_NAME] = &&entry_MP_BC_DELETE_NAME,
+ [MP_BC_DELETE_GLOBAL] = &&entry_MP_BC_DELETE_GLOBAL,
+ [MP_BC_DUP_TOP] = &&entry_MP_BC_DUP_TOP,
+ [MP_BC_DUP_TOP_TWO] = &&entry_MP_BC_DUP_TOP_TWO,
+ [MP_BC_POP_TOP] = &&entry_MP_BC_POP_TOP,
+ [MP_BC_ROT_TWO] = &&entry_MP_BC_ROT_TWO,
+ [MP_BC_ROT_THREE] = &&entry_MP_BC_ROT_THREE,
+ [MP_BC_JUMP] = &&entry_MP_BC_JUMP,
+ [MP_BC_POP_JUMP_IF_TRUE] = &&entry_MP_BC_POP_JUMP_IF_TRUE,
+ [MP_BC_POP_JUMP_IF_FALSE] = &&entry_MP_BC_POP_JUMP_IF_FALSE,
+ [MP_BC_JUMP_IF_TRUE_OR_POP] = &&entry_MP_BC_JUMP_IF_TRUE_OR_POP,
+ [MP_BC_JUMP_IF_FALSE_OR_POP] = &&entry_MP_BC_JUMP_IF_FALSE_OR_POP,
+ [MP_BC_SETUP_WITH] = &&entry_MP_BC_SETUP_WITH,
+ [MP_BC_WITH_CLEANUP] = &&entry_MP_BC_WITH_CLEANUP,
+ [MP_BC_UNWIND_JUMP] = &&entry_MP_BC_UNWIND_JUMP,
+ [MP_BC_SETUP_EXCEPT] = &&entry_MP_BC_SETUP_EXCEPT,
+ [MP_BC_SETUP_FINALLY] = &&entry_MP_BC_SETUP_FINALLY,
+ [MP_BC_END_FINALLY] = &&entry_MP_BC_END_FINALLY,
+ [MP_BC_GET_ITER] = &&entry_MP_BC_GET_ITER,
+ [MP_BC_FOR_ITER] = &&entry_MP_BC_FOR_ITER,
+ [MP_BC_POP_BLOCK] = &&entry_MP_BC_POP_BLOCK,
+ [MP_BC_POP_EXCEPT] = &&entry_MP_BC_POP_EXCEPT,
+ [MP_BC_BUILD_TUPLE] = &&entry_MP_BC_BUILD_TUPLE,
+ [MP_BC_BUILD_LIST] = &&entry_MP_BC_BUILD_LIST,
+ [MP_BC_LIST_APPEND] = &&entry_MP_BC_LIST_APPEND,
+ [MP_BC_BUILD_MAP] = &&entry_MP_BC_BUILD_MAP,
+ [MP_BC_STORE_MAP] = &&entry_MP_BC_STORE_MAP,
+ [MP_BC_MAP_ADD] = &&entry_MP_BC_MAP_ADD,
+ #if MICROPY_PY_BUILTINS_SET
+ [MP_BC_BUILD_SET] = &&entry_MP_BC_BUILD_SET,
+ [MP_BC_SET_ADD] = &&entry_MP_BC_SET_ADD,
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE
+ [MP_BC_BUILD_SLICE] = &&entry_MP_BC_BUILD_SLICE,
+ #endif
+ [MP_BC_UNPACK_SEQUENCE] = &&entry_MP_BC_UNPACK_SEQUENCE,
+ [MP_BC_UNPACK_EX] = &&entry_MP_BC_UNPACK_EX,
+ [MP_BC_MAKE_FUNCTION] = &&entry_MP_BC_MAKE_FUNCTION,
+ [MP_BC_MAKE_FUNCTION_DEFARGS] = &&entry_MP_BC_MAKE_FUNCTION_DEFARGS,
+ [MP_BC_MAKE_CLOSURE] = &&entry_MP_BC_MAKE_CLOSURE,
+ [MP_BC_MAKE_CLOSURE_DEFARGS] = &&entry_MP_BC_MAKE_CLOSURE_DEFARGS,
+ [MP_BC_CALL_FUNCTION] = &&entry_MP_BC_CALL_FUNCTION,
+ [MP_BC_CALL_FUNCTION_VAR_KW] = &&entry_MP_BC_CALL_FUNCTION_VAR_KW,
+ [MP_BC_CALL_METHOD] = &&entry_MP_BC_CALL_METHOD,
+ [MP_BC_CALL_METHOD_VAR_KW] = &&entry_MP_BC_CALL_METHOD_VAR_KW,
+ [MP_BC_RETURN_VALUE] = &&entry_MP_BC_RETURN_VALUE,
+ [MP_BC_RAISE_VARARGS] = &&entry_MP_BC_RAISE_VARARGS,
+ [MP_BC_YIELD_VALUE] = &&entry_MP_BC_YIELD_VALUE,
+ [MP_BC_YIELD_FROM] = &&entry_MP_BC_YIELD_FROM,
+ [MP_BC_IMPORT_NAME] = &&entry_MP_BC_IMPORT_NAME,
+ [MP_BC_IMPORT_FROM] = &&entry_MP_BC_IMPORT_FROM,
+ [MP_BC_IMPORT_STAR] = &&entry_MP_BC_IMPORT_STAR,
+ [MP_BC_LOAD_CONST_SMALL_INT_MULTI ... MP_BC_LOAD_CONST_SMALL_INT_MULTI + 63] = &&entry_MP_BC_LOAD_CONST_SMALL_INT_MULTI,
+ [MP_BC_LOAD_FAST_MULTI ... MP_BC_LOAD_FAST_MULTI + 15] = &&entry_MP_BC_LOAD_FAST_MULTI,
+ [MP_BC_STORE_FAST_MULTI ... MP_BC_STORE_FAST_MULTI + 15] = &&entry_MP_BC_STORE_FAST_MULTI,
+ [MP_BC_UNARY_OP_MULTI ... MP_BC_UNARY_OP_MULTI + 6] = &&entry_MP_BC_UNARY_OP_MULTI,
+ [MP_BC_BINARY_OP_MULTI ... MP_BC_BINARY_OP_MULTI + 35] = &&entry_MP_BC_BINARY_OP_MULTI,
+};
+
+#if __clang__
+#pragma clang diagnostic pop
+#endif // __clang__
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/vstr.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,331 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/mpprint.h"
+
+// returned value is always at least 1 greater than argument
+#define ROUND_ALLOC(a) (((a) & ((~0) - 7)) + 8)
+
+// Init the vstr so it allocs exactly given number of bytes. Set length to zero.
+void vstr_init(vstr_t *vstr, size_t alloc) {
+ if (alloc < 1) {
+ alloc = 1;
+ }
+ vstr->alloc = alloc;
+ vstr->len = 0;
+ vstr->buf = m_new(char, vstr->alloc);
+ if (vstr->buf == NULL) {
+ vstr->had_error = true;
+ return;
+ }
+ vstr->had_error = false;
+ vstr->fixed_buf = false;
+}
+
+// Init the vstr so it allocs exactly enough ram to hold a null-terminated
+// string of the given length, and set the length.
+void vstr_init_len(vstr_t *vstr, size_t len) {
+ vstr_init(vstr, len + 1);
+ vstr->len = len;
+}
+
+void vstr_init_fixed_buf(vstr_t *vstr, size_t alloc, char *buf) {
+ vstr->alloc = alloc;
+ vstr->len = 0;
+ vstr->buf = buf;
+ vstr->had_error = false;
+ vstr->fixed_buf = true;
+}
+
+void vstr_init_print(vstr_t *vstr, size_t alloc, mp_print_t *print) {
+ vstr_init(vstr, alloc);
+ print->data = vstr;
+ print->print_strn = (mp_print_strn_t)vstr_add_strn;
+}
+
+void vstr_clear(vstr_t *vstr) {
+ if (!vstr->fixed_buf) {
+ m_del(char, vstr->buf, vstr->alloc);
+ }
+ vstr->buf = NULL;
+}
+
+vstr_t *vstr_new(void) {
+ vstr_t *vstr = m_new_obj(vstr_t);
+ if (vstr == NULL) {
+ return NULL;
+ }
+ vstr_init(vstr, 16);
+ return vstr;
+}
+
+vstr_t *vstr_new_size(size_t alloc) {
+ vstr_t *vstr = m_new_obj(vstr_t);
+ if (vstr == NULL) {
+ return NULL;
+ }
+ vstr_init(vstr, alloc);
+ return vstr;
+}
+
+void vstr_free(vstr_t *vstr) {
+ if (vstr != NULL) {
+ if (!vstr->fixed_buf) {
+ m_del(char, vstr->buf, vstr->alloc);
+ }
+ m_del_obj(vstr_t, vstr);
+ }
+}
+
+void vstr_reset(vstr_t *vstr) {
+ vstr->len = 0;
+ vstr->had_error = false;
+}
+
+bool vstr_had_error(vstr_t *vstr) {
+ return vstr->had_error;
+}
+
+char *vstr_str(vstr_t *vstr) {
+ if (vstr->had_error) {
+ return NULL;
+ }
+ return vstr->buf;
+}
+
+size_t vstr_len(vstr_t *vstr) {
+ if (vstr->had_error) {
+ return 0;
+ }
+ return vstr->len;
+}
+
+// Extend vstr strictly by requested size, return pointer to newly added chunk.
+char *vstr_extend(vstr_t *vstr, size_t size) {
+ if (vstr->fixed_buf) {
+ return NULL;
+ }
+ char *new_buf = m_renew(char, vstr->buf, vstr->alloc, vstr->alloc + size);
+ if (new_buf == NULL) {
+ vstr->had_error = true;
+ return NULL;
+ }
+ char *p = new_buf + vstr->alloc;
+ vstr->alloc += size;
+ vstr->buf = new_buf;
+ return p;
+}
+
+STATIC bool vstr_ensure_extra(vstr_t *vstr, size_t size) {
+ if (vstr->len + size > vstr->alloc) {
+ if (vstr->fixed_buf) {
+ return false;
+ }
+ size_t new_alloc = ROUND_ALLOC((vstr->len + size) * 2);
+ char *new_buf = m_renew(char, vstr->buf, vstr->alloc, new_alloc);
+ if (new_buf == NULL) {
+ vstr->had_error = true;
+ return false;
+ }
+ vstr->alloc = new_alloc;
+ vstr->buf = new_buf;
+ }
+ return true;
+}
+
+void vstr_hint_size(vstr_t *vstr, size_t size) {
+ // it's not an error if we fail to allocate for the size hint
+ bool er = vstr->had_error;
+ vstr_ensure_extra(vstr, size);
+ vstr->had_error = er;
+}
+
+char *vstr_add_len(vstr_t *vstr, size_t len) {
+ if (vstr->had_error || !vstr_ensure_extra(vstr, len)) {
+ return NULL;
+ }
+ char *buf = vstr->buf + vstr->len;
+ vstr->len += len;
+ return buf;
+}
+
+// Doesn't increase len, just makes sure there is a null byte at the end
+char *vstr_null_terminated_str(vstr_t *vstr) {
+ if (vstr->had_error || !vstr_ensure_extra(vstr, 1)) {
+ return NULL;
+ }
+ vstr->buf[vstr->len] = '\0';
+ return vstr->buf;
+}
+
+void vstr_add_byte(vstr_t *vstr, byte b) {
+ byte *buf = (byte*)vstr_add_len(vstr, 1);
+ if (buf == NULL) {
+ return;
+ }
+ buf[0] = b;
+}
+
+void vstr_add_char(vstr_t *vstr, unichar c) {
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+ // TODO: Can this be simplified and deduplicated?
+ // Is it worth just calling vstr_add_len(vstr, 4)?
+ if (c < 0x80) {
+ byte *buf = (byte*)vstr_add_len(vstr, 1);
+ if (buf == NULL) {
+ return;
+ }
+ *buf = (byte)c;
+ } else if (c < 0x800) {
+ byte *buf = (byte*)vstr_add_len(vstr, 2);
+ if (buf == NULL) {
+ return;
+ }
+ buf[0] = (c >> 6) | 0xC0;
+ buf[1] = (c & 0x3F) | 0x80;
+ } else if (c < 0x10000) {
+ byte *buf = (byte*)vstr_add_len(vstr, 3);
+ if (buf == NULL) {
+ return;
+ }
+ buf[0] = (c >> 12) | 0xE0;
+ buf[1] = ((c >> 6) & 0x3F) | 0x80;
+ buf[2] = (c & 0x3F) | 0x80;
+ } else {
+ assert(c < 0x110000);
+ byte *buf = (byte*)vstr_add_len(vstr, 4);
+ if (buf == NULL) {
+ return;
+ }
+ buf[0] = (c >> 18) | 0xF0;
+ buf[1] = ((c >> 12) & 0x3F) | 0x80;
+ buf[2] = ((c >> 6) & 0x3F) | 0x80;
+ buf[3] = (c & 0x3F) | 0x80;
+ }
+#else
+ vstr_add_byte(vstr, c);
+#endif
+}
+
+void vstr_add_str(vstr_t *vstr, const char *str) {
+ vstr_add_strn(vstr, str, strlen(str));
+}
+
+void vstr_add_strn(vstr_t *vstr, const char *str, size_t len) {
+ if (vstr->had_error || !vstr_ensure_extra(vstr, len)) {
+ // if buf is fixed, we got here because there isn't enough room left
+ // so just try to copy as much as we can, with room for a possible null byte
+ if (vstr->fixed_buf && vstr->len < vstr->alloc) {
+ len = vstr->alloc - vstr->len;
+ goto copy;
+ }
+ return;
+ }
+copy:
+ memmove(vstr->buf + vstr->len, str, len);
+ vstr->len += len;
+}
+
+STATIC char *vstr_ins_blank_bytes(vstr_t *vstr, size_t byte_pos, size_t byte_len) {
+ if (vstr->had_error) {
+ return NULL;
+ }
+ size_t l = vstr->len;
+ if (byte_pos > l) {
+ byte_pos = l;
+ }
+ if (byte_len > 0) {
+ // ensure room for the new bytes
+ if (!vstr_ensure_extra(vstr, byte_len)) {
+ return NULL;
+ }
+ // copy up the string to make room for the new bytes
+ memmove(vstr->buf + byte_pos + byte_len, vstr->buf + byte_pos, l - byte_pos);
+ // increase the length
+ vstr->len += byte_len;
+ }
+ return vstr->buf + byte_pos;
+}
+
+void vstr_ins_byte(vstr_t *vstr, size_t byte_pos, byte b) {
+ char *s = vstr_ins_blank_bytes(vstr, byte_pos, 1);
+ if (s != NULL) {
+ *s = b;
+ }
+}
+
+void vstr_ins_char(vstr_t *vstr, size_t char_pos, unichar chr) {
+ // TODO UNICODE
+ char *s = vstr_ins_blank_bytes(vstr, char_pos, 1);
+ if (s != NULL) {
+ *s = chr;
+ }
+}
+
+void vstr_cut_head_bytes(vstr_t *vstr, size_t bytes_to_cut) {
+ vstr_cut_out_bytes(vstr, 0, bytes_to_cut);
+}
+
+void vstr_cut_tail_bytes(vstr_t *vstr, size_t len) {
+ if (vstr->had_error) {
+ return;
+ }
+ if (len > vstr->len) {
+ vstr->len = 0;
+ } else {
+ vstr->len -= len;
+ }
+}
+
+void vstr_cut_out_bytes(vstr_t *vstr, size_t byte_pos, size_t bytes_to_cut) {
+ if (vstr->had_error || byte_pos >= vstr->len) {
+ return;
+ } else if (byte_pos + bytes_to_cut >= vstr->len) {
+ vstr->len = byte_pos;
+ } else {
+ memmove(vstr->buf + byte_pos, vstr->buf + byte_pos + bytes_to_cut, vstr->len - byte_pos - bytes_to_cut);
+ vstr->len -= bytes_to_cut;
+ }
+}
+
+void vstr_printf(vstr_t *vstr, const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vstr_vprintf(vstr, fmt, ap);
+ va_end(ap);
+}
+
+void vstr_vprintf(vstr_t *vstr, const char *fmt, va_list ap) {
+ mp_print_t print = {vstr, (mp_print_strn_t)vstr_add_strn};
+ mp_vprintf(&print, fmt, ap);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/py/warning.c Sat Apr 16 17:11:56 2016 +0000
@@ -0,0 +1,49 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "py/emit.h"
+#include "py/runtime.h"
+
+#if MICROPY_WARNINGS
+
+void mp_warning(const char *msg, ...) {
+ va_list args;
+ va_start(args, msg);
+ mp_print_str(&mp_plat_print, "Warning: ");
+ mp_vprintf(&mp_plat_print, msg, args);
+ mp_print_str(&mp_plat_print, "\n");
+}
+
+void mp_emitter_warning(pass_kind_t pass, const char *msg) {
+ if (pass == MP_PASS_CODE_SIZE) {
+ mp_warning(msg, NULL);
+ }
+}
+
+#endif // MICROPY_WARNINGS
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/qstrdefsport.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,39 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2016 Colin Hogben + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** \file qstrdefsport.h + * Extra qstr values needed by this port + */ + +// modmachine.c +#if MICROPY_PY_MACHINE +Q(machine) +Q(reset) +#endif + +// modmbed.cpp +#if MICROPY_PY_MBED +Q(mbed) +Q(DigitalOut) +#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/unistd.h Sat Apr 16 17:11:56 2016 +0000 @@ -0,0 +1,29 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2016 Colin Hogben + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +// Minimal definitions used by py/*.c +typedef int ssize_t; +#define SEEK_SET 0 +#define SEEK_CUR 1 +#define SEEK_END 2