diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 50aa742..d4c6330 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -44,6 +44,12 @@ jobs: with: python-version: "3.11" + - name: Vendor pelfy + run: | + git clone --depth 1 https://github.com/Nonannet/pelfy.git /tmp/pelfy + mkdir -p src/${{ github.event.repository.name }}/_vendor + cp -r /tmp/pelfy/src/pelfy src/${{ github.event.repository.name }}/_vendor/ + # Only needed for Linux ARM builds - name: Set up QEMU if: runner.os == 'Linux' @@ -69,6 +75,11 @@ jobs: if: contains(github.ref, '-beta') == false needs: [build_wheels] runs-on: ubuntu-latest + + environment: + name: pypi + url: https://pypi.org/project/${{ github.event.repository.name }}/ + steps: - name: Install Twine run: pip install --force-reinstall twine==6.2.0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f7177b..2847675 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI Pipeline on: push: - branches: [main, dev] + branches: [main] pull_request: branches: [main, dev] @@ -30,6 +30,42 @@ jobs: name: musl-object-files path: /object_files/musl_objects_*.*o + build-package-test: + needs: [build_stencils] + runs-on: ubuntu-latest + + strategy: + matrix: + python-version: ["3.12"] + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4 + with: + name: stencil-object-files + path: src/copapy/obj + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies + run: | + python -m pip install -e . + python -m pip install pytest + + - name: Vendor pelfy + run: | + git clone --depth 1 https://github.com/Nonannet/pelfy.git /tmp/pelfy + mkdir -p src/${{ github.event.repository.name }}/_vendor + cp -r /tmp/pelfy/src/pelfy src/${{ github.event.repository.name }}/_vendor/ + + - name: Run tests with pytest + run: pytest -m "not runner" + build-ubuntu: needs: [build_stencils] runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index f04634f..5659106 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,8 @@ build/* /*.obj /src/*.pyd vc140.pdb -benchmark_results* +benchmark_results*.json +benchmark_results*.png docs/build docs/source/api /libs/ diff --git a/README.md b/README.md index d7cd39c..62b4d51 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,60 @@ # Copapy -Copapy is a python framework for deterministic low latency realtime computations, targeting hardware applications - for example in the field of robotics, aerospace, embedded systems and control systems in general. -GPU frameworks like PyTorch, JAX and TensorFlow jump started the development in the field of AI. With the right balance of flexibility and performance they allow for fast iterations of new ideas while being performant enough to test them or even use them in production. +Copapy is a Python framework for deterministic, low-latency realtime computation, targeting hardware applications - for example in the fields of robotics, aerospace, embedded systems and control systems in general. -This is exactly what Copapy is aiming for - but in the field of embedded realtime computation. While making use of the ergonomics of Python, the tooling and the general Python ecosystem, Copapy runs seamlessly optimized machine code. Despite being highly portable, the **copy and patch** compiler allows for effortless and fast deployment, without any dependencies beyond Python. It's designed to feel like writing python scripts, with a flat learning curve. But under the hood it produces high performance static typed and memory save code with a minimized set of possible runtime errors[^1]. To maximize productivity the framework provides detailed type hints to catch most errors even before compilation. +GPU frameworks like PyTorch, JAX and TensorFlow jump-started the development in the field of AI. With the right balance of flexibility and performance, they allow for fast iteration of new ideas while still being performant enough to test or even use them in production. -Embedded systems comes with a variety of CPU architectures. The **copy and patch** compiler already supports the most common ones [^3] and porting it to new architectures is effortless if a C compiler for the target architecture is available [^2]. The generated code depends only on the CPU architecture. The actual generated code does neither do system calls nor calling external libraries like libc. This allows Copapy for one to be highly deterministic and for the other it makes targeting different realtime operating systems or bare metal straight forward. +This is exactly what Copapy aims for - but in the field of embedded realtime computation. While making use of the ergonomics of Python, the tooling, and the general Python ecosystem, Copapy runs seamlessly optimized machine code. Despite being highly portable, the **copy-and-patch** compiler allows for effortless and fast deployment without any dependencies beyond Python. It's designed to feel like writing Python scripts with a shallow learning curve, but under the hood it produces high-performance, statically typed and memory-safe code with a minimized set of possible runtime errors[^1]. To maximize productivity, the framework provides detailed type hints to catch most errors even before compilation. -The summarized main features are: +Embedded systems come with a variety of CPU architectures. The **copy-and-patch** compiler already supports the most common ones[^3], and porting it to new architectures is straightforward if a C compiler for the target architecture is available[^2]. The generated code depends only on the CPU architecture. The generated binaries neither perform system calls nor rely on external libraries like libc. This makes Copapy both highly deterministic and easy to deploy on different realtime operating systems (RTOS) or bare metal. + +The main features can be summarized as: - Fast to write & easy to read -- Memory and type safety, minimal set of runtime errors -- deterministic execution -- Auto grad for efficient realtime optimizations -- Optimized machine code for the target architectures x68_64, Aarch64 and ARMv7 -- Very portable to new architectures -- Small python package, minimal dependencies, no cross compile toolchain required +- Memory and type safety with a minimal set of runtime errors +- Deterministic execution +- Autograd for efficient realtime optimization +- Optimized machine code for x86_64, AArch64 and ARMv7 +- Highly portable to new architectures +- Small Python package with minimal dependencies and no cross-compile toolchain required + +Execution of the compiled code is managed by a runner application. The runner is implemented in C and handles I/O and communication with the Copapy framework. The overall design emphasizes minimal complexity of the runner to simplify portability, since this part must be adapted for the individual hardware/application. Because patching of memory addresses is done by the runner, the different architecture-specific relocation types are unified to an architecture-independent format by Copapy before sending the patch instructions to the runner. This keeps the runner implementation as minimal as possible. + +![Copapy architecture](docs/source/media/copapy.svg) + +The design targets either an architecture with a realtime-patched Linux kernel - where the runner uses the same CPU and memory as Linux but executes in a realtime thread - or a setup where even higher determinism is required. In such cases, the runner can be executed on a separate crossover MCU running on bare metal or a RTOS. + +The Copapy framework also includes a runner as Python module build from the same C code. This allows frictionless testing of code and might be valuable for using Copapy in conventional application development. ## Current state -While obviously hardware IO is a core aspect, this is not yet available. Therefore this package is at the moment a proof of concept with limited direct use. However the computation part is fully working and available for testing and playing with it by simply installing the package. At this point the project is quite close to being ready for integration into the first demonstration hardware platform. -Currently worked on: -- Array stencils for handling very large arrays and generate SIMD optimized code - e.g. for machine vision and neural network applications. -- For targeting Crossover‑MCUs, support for Thumb instructions required by ARM*-M is on the way. -- Constant-regrouping for symbolic optimization of the computation graph. +While hardware I/O is obviously a core aspect of the project, it is not yet available. Therefore, this package is currently a proof of concept with limited direct use. However, the computation engine is fully functional and available for testing and experimentation simply by installing the package. The project is now close to being ready for integration into its first demonstration hardware platform. + +Currently in development: +- Array stencils for handling very large arrays and generating SIMD-optimized code - e.g., for machine vision and neural network applications +- Support for Thumb instructions required by ARM*-M targets (for MCUs) +- Constant regrouping for further symbolic optimization of the computation graph + +Despite missing SIMD-optimization, benchmark performance shows promising numbers. The following chart plots the results in comparison to NumPy 2.3.5: + +![Copapy architecture](docs/source/media/benchmark_results_001.svg) + +For the benchmark (`tests/benchmark.py`) the timing of 30000 iterations for calculating the therm `sum((v1 + i) @ v2 for i in range(10))` where measured on an Ryzen 5 3400G. Where the vectors `v1` and `v2` both have a lengths of `v_size` which was varied according to the chart from 10 to 600. For the NumPy case the "i in range(10)" loop was vectorized like this: `np.sum((v1 + i) @ v2)` with i being here a `NDArray` with a dimension of `[10, 1]`. The number of calculated scalar operations is the same for both contenders. Obviously copapy profits from less overheat by calling a single function from python per iteration, where the NumPy variant requires 3. Interestingly there is no indication visible in the chart that for increasing `v_size` the calling overhead for NumPy will be compensated by using faster SIMD instructions. + +Furthermore for many applications copypy will benefit by reducing the actual number of operations significantly compared to a NumPy implementation, by precompute constant values know at compile time and benefiting from sparcity. Multiplying by zero (e.g. in a diagonal matrix) eliminate a hole branch in the computation graph. Operations without effect, like multiplications by 1 oder additions with zero gets eliminated at compile time. ## Install -To install copapy, you can use pip. Precompiled wheels are available for Linux (x86_64, Aarch64 and ARMv7), Windows (x86_64) and Mac OS (x86_64, Aarch64): + +To install Copapy, you can use pip. Precompiled wheels are available for Linux (x86_64, AArch64, ARMv7), Windows (x86_64) and macOS (x86_64, AArch64): ```bash pip install copapy ``` ## Examples + ### Basic example -A very simple example program using copapy can look like this: + +A very simple example program using Copapy can look like this: ```python import copapy as cp @@ -59,9 +80,8 @@ print("Result e:", tg.read_value(e)) ``` ### Inverse kinematics -An other example using autograd in copapy. Here for for implementing -gradient descent to solve a reverse kinematic problem for -a two joint 2D arm: + +Another example using autograd in Copapy, here implementing gradient descent to solve an inverse kinematics problem for a two-joint 2D arm: ```python import copapy as cp @@ -79,13 +99,13 @@ def forward_kinematics(theta1, theta2): """Return positions of joint and end-effector.""" joint = cp.vector([l1 * cp.cos(theta1), l1 * cp.sin(theta1)]) end_effector = joint + cp.vector([l2 * cp.cos(theta1 + theta2), - l2 * cp.sin(theta1 + theta2)]) + l2 * cp.sin(theta1 + theta2)]) return joint, end_effector # Start values theta = cp.vector([cp.value(0.0), cp.value(0.0)]) -# Iterative inverse kinematic +# Iterative inverse kinematics for _ in range(48): joint, effector = forward_kinematics(theta[0], theta[1]) error = ((target - effector) ** 2).sum() @@ -101,31 +121,75 @@ print(f"Joint position: {tg.read_value(joint)}") print(f"End-effector position: {tg.read_value(effector)}") print(f"quadratic error = {tg.read_value(error)}") ``` + ``` Joint angles: [-0.7221821546554565, 2.6245293617248535] -Joint position: [1.3509329557418823, -1.189529299736023] +Joint position: [1.3509329557418823, -1.189529299736023] End-effector position: [0.6995794177055359, 0.7014330625534058] quadratic error = 2.2305819129542215e-06 ``` ## How it works -The **Compilation** step starts with tracing the python code to generate an acyclic directed graph (DAG) of variables and operations. The DAG can be optimized and gets than linearized to a sequence of operations. Each operation gets mapped to a pre-compiled stencil, which is a piece of machine code with placeholders for memory addresses. The compiler generates patch instructions to fill the placeholders with the correct memory addresses. The binary code build from the stencils, data for constants and the patch instructions are than passed to the runner for execution. The runner allocates memory for the code and data, applies the patch instructions to correct memory addresses and finally executes the code. + +The compilation step starts with tracing the Python code to generate an acyclic directed graph (DAG) of variables and operations. The code can contain functions, closures, branching, and so on, but conditional branching is only allowed when the condition is known at tracing time (a `cp.iif` function exists to work around this). In the next step, this DAG is optimized and linearized into a sequence of operations. Each operation is mapped to a precompiled stencil or a combination of several stencils. A stencil is a piece of machine code with placeholders for memory addresses pointing to other code or data. The compiler generates patch instructions that fill these placeholders with the correct memory addresses. + +After compilation, the binary code built from the stencils, the constant data, and the patch instructions is handed to the runner for execution. The runner allocates memory for code and data, copies both into place, applies the patch instructions, and finally executes the code. + +The C code for a very simple stencil can look like this: + +```c +add_float_float(float arg1, float arg2) { + result_float_float(arg1 + arg2, arg2); +} +``` + +The call to the dummy function `result_float_float` ensures that the compiler keeps the result and the second operand in registers for later use. The dummy function acts as a placeholder for the next stencil. Copapy uses two virtual registers, which map on most relevant architectures to actual hardware registers. Data that cannot be kept in a register is stored in statically allocated heap memory. Stack memory may be used inside some stencils, but its usage is essentially fixed and independent of the Copapy program, so total memory requirements are known at compile time. + +The machine code for the function above, compiled for x86_64, looks like this: + +```assembly +0000000000000000 : + 0: f3 0f 58 c1 addss %xmm1,%xmm0 + 4: e9 00 00 00 00 jmp 9 <.LC1+0x1> + 5: R_X86_64_PLT32 result_float_float-0x4 +``` + +Based on the relocation entry for the `jmp` to the symbol `result_float_float`, the `jmp` instruction is stripped when it is the last instruction in a stencil. Thus, a Copapy addition operation results in a single instruction. For stencils containing multiple branch exits, only the final `jmp` is removed; the others are patched to jump to the next stencil. + +For more complex operations - where inlining is less useful - stencils call a non-stencil function, such as in this example: + +```assembly +0000000000000000 : + 0: 48 83 ec 08 sub $0x8,%rsp + 4: e8 00 00 00 00 call 9 + 5: R_X86_64_PLT32 sinf-0x4 + 9: 48 83 c4 08 add $0x8,%rsp + d: e9 00 00 00 00 jmp 12 <.LC0+0x2> + e: R_X86_64_PLT32 result_float-0x4 +``` + +Unlike stencils, non-stencil functions are not stripped and do not need to be tail-call-optimizable. + +Non-stencil functions and constants are stored together with the stencils in an ELF object file for each supported CPU architecture. The required non-stencil functions and constants are bundled during compilation. The compiler includes only the data and code required for the specific program. + +The whole compilation process is independent of the actual instruction set. It relies purely on relocation entries and symbol metadata from the ELF file generated by the C compiler. ## Developer Guide -Contributions are welcome, please open an issue or submit a pull request on GitHub. -To get started with developing the package, first clone the repository using Git: +Feedback and contributions are welcome - please open an issue or submit a pull request on GitHub. + +To get started with development, first clone the repository: ```bash git clone https://github.com/Nonannet/copapy.git cd copapy ``` -You may setup a venv: +You may set up a virtual environment: ```bash python -m venv .venv -source .venv/bin/activate # On Windows `.venv\Scripts\activate` +source .venv/bin/activate # On Windows: `.venv\Scripts\activate` ``` Build and install the package and dev dependencies: @@ -134,37 +198,40 @@ Build and install the package and dev dependencies: pip install -e .[dev] ``` -If the build fails because you have no suitable c compiler installed, you can either install a compiler (obviously) or use the binary from pypi: +If the build fails because no suitable C compiler is installed, you can either install one or use the binary package from PyPI: ```bash pip install copapy[dev] ``` -When running pytest it will use the binary part from pypi but all the python code gets executed from the local repo. +When running pytest, it will use the binary components from PyPI, but all Python code is executed from the local repository. -For running all tests you need the stencil object files and the compiled runner. You can download the stencils and binary runner from GitHub or build them with gcc yourself. +To run all tests, you need the stencil object files and the compiled runner. You can download them from GitHub or build them yourself with gcc. -For downloading the latest binaries from GitHub run: +Download the latest binaries from GitHub: ```bash python tools/get_binaries.py ``` -To build the binaries from source on Linux run: +Build the binaries from source on Linux: ```bash bash tools/build.sh ``` -Ensure that everything is set up correctly by running the tests: +Run the tests: ```bash pytest ``` ## License -This project is licensed under GPL - see the [LICENSE](LICENSE) file for details. -[^1]: Currently errors like divide by zero are possible. The feasibility of tacking value ranges in the type system is under investigation to be able to do checks at compile time. -[^2]: The compiler must support TCO (tail call optimization). Currently gcc as C compiler is supported. Porting to a new architecture requires to implement a subset of relocation types used by the architecture. -[^3]: Supported are x68_64, Aarch64, ARMv7 (non-Thumb); ARMv6/7-M (Thumb) is under development; code for x68 32 Bit is present but has open issues (low priority). \ No newline at end of file +This project is licensed under the MIT license - see the [LICENSE](LICENSE) file for details. + +[^1]: Errors like divide-by-zero are currently still possible. The feasibility of tracking value ranges in the type system is under investigation to enable compile-time checks. + +[^2]: The compiler must support tail-call optimization (TCO). Currently, GCC is supported. Porting to a new architecture requires implementing a subset of relocation types used by that architecture. + +[^3]: Supported architectures: x86_64, AArch64, ARMv7 (non-Thumb). ARMv6/7-M (Thumb) support is in development. Code for x86 32-bit exists but has unresolved issues and a low priority. diff --git a/docs/source/media/benchmark_results_001.svg b/docs/source/media/benchmark_results_001.svg new file mode 100644 index 0000000..437656d --- /dev/null +++ b/docs/source/media/benchmark_results_001.svg @@ -0,0 +1,306 @@ + + + + + + + + + + + 2025-12-16T11:37:29.841711 + image/svg+xml + + + Matplotlib v3.10.7, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + 100 + + + + + + + + + + 200 + + + + + + + + + + 300 + + + + + + + + + + 400 + + + + + + + + + + 500 + + + + Vector Size (v_size) + + + + + + + + + + + + + + 0.0 + + + + + + + + + + 0.1 + + + + + + + + + + 0.2 + + + + + + + + + + 0.3 + + + + + + + + + + 0.4 + + + + + + + + + + 0.5 + + + + Elapsed Time (seconds) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copapy + + + + + + + + NumPy + + + + + + + + + + diff --git a/docs/source/media/copapy.svg b/docs/source/media/copapy.svg new file mode 100644 index 0000000..ba8f68f --- /dev/null +++ b/docs/source/media/copapy.svg @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + Runner + + + Compiled code & + Patch instructions + Data + + Hardware + IO + Copapy framework & + copy & patch + cross compiler + + diff --git a/docs/source/media/copapy_src.svg b/docs/source/media/copapy_src.svg new file mode 100644 index 0000000..4cc596f --- /dev/null +++ b/docs/source/media/copapy_src.svg @@ -0,0 +1,272 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Runner + + + Compiled code & + Patch instructions + Data + + Hardware + IO + Copapy framework & + copy & patch + cross compiler + + diff --git a/pyproject.toml b/pyproject.toml index ac0d2b9..37612d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,6 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "pelfy>=1.0.7" ] [project.urls] @@ -34,13 +33,15 @@ copapy = ["obj/*.o", "py.typed"] dev = [ "ruff", "mypy", - "pytest" + "pytest", + "pelfy>=1.0.7" ] doc_build = [ "sphinx", "pydata_sphinx_theme", "sphinx-autodoc-typehints", - "myst-parser" + "myst-parser", + "pelfy>=1.0.7" ] [tool.mypy] diff --git a/src/copapy/_basic_types.py b/src/copapy/_basic_types.py index aa399dd..ec9b9ac 100644 --- a/src/copapy/_basic_types.py +++ b/src/copapy/_basic_types.py @@ -61,7 +61,6 @@ class Node: return hash(self.name) ^ hash(frozenset(a.source.node_hash for a in self.args)) return hash(self.name) ^ hash(tuple(a.source.node_hash for a in self.args)) - def __hash__(self) -> int: return self.node_hash @@ -77,6 +76,7 @@ class Net: def __init__(self, dtype: str, source: Node): self.dtype = dtype self.source = source + self.volatile = False def __repr__(self) -> str: names = get_var_name(self) @@ -93,7 +93,7 @@ class value(Generic[TNum], Net): Attributes: dtype (str): Data type of this value. """ - def __init__(self, source: TNum | Node, dtype: str | None = None): + def __init__(self, source: TNum | Node, dtype: str | None = None, volatile: bool = True): """Instance a value. Args: @@ -113,6 +113,7 @@ class value(Generic[TNum], Net): else: self.source = CPConstant(source) self.dtype = 'int' + self.volatile = volatile @overload def __add__(self: 'value[TNum]', other: 'value[TNum] | TNum') -> 'value[TNum]': ... @@ -220,33 +221,33 @@ class value(Generic[TNum], Net): return add_op('floordiv', [other, self]) def __neg__(self: TCPNum) -> TCPNum: - if self.dtype == 'int': - return cast(TCPNum, add_op('sub', [value(0), self])) - return cast(TCPNum, add_op('sub', [value(0.0), self])) + if self.dtype == 'float': + return cast(TCPNum, add_op('sub', [value(0.0, volatile=False), self])) + return cast(TCPNum, add_op('sub', [value(0, volatile=False), self])) def __gt__(self, other: TVarNumb) -> 'value[int]': ret = add_op('gt', [self, other]) - return value(ret.source, dtype='bool') + return value(ret.source, dtype='bool', volatile=False) def __lt__(self, other: TVarNumb) -> 'value[int]': ret = add_op('gt', [other, self]) - return value(ret.source, dtype='bool') + return value(ret.source, dtype='bool', volatile=False) def __ge__(self, other: TVarNumb) -> 'value[int]': ret = add_op('ge', [self, other]) - return value(ret.source, dtype='bool') + return value(ret.source, dtype='bool', volatile=False) def __le__(self, other: TVarNumb) -> 'value[int]': ret = add_op('ge', [other, self]) - return value(ret.source, dtype='bool') + return value(ret.source, dtype='bool', volatile=False) def __eq__(self, other: TVarNumb) -> 'value[int]': # type: ignore ret = add_op('eq', [self, other], True) - return value(ret.source, dtype='bool') + return value(ret.source, dtype='bool', volatile=False) def __ne__(self, other: TVarNumb) -> 'value[int]': # type: ignore ret = add_op('ne', [self, other], True) - return value(ret.source, dtype='bool') + return value(ret.source, dtype='bool', volatile=False) @overload def __mod__(self: 'value[TNum]', other: 'value[TNum] | TNum') -> 'value[TNum]': ... @@ -358,7 +359,7 @@ class Op(Node): def net_from_value(val: Any) -> value[Any]: vi = CPConstant(val) - return value(vi, vi.dtype) + return value(vi, vi.dtype, False) @overload diff --git a/src/copapy/_compiler.py b/src/copapy/_compiler.py index 61075ff..fbb9125 100644 --- a/src/copapy/_compiler.py +++ b/src/copapy/_compiler.py @@ -299,6 +299,17 @@ def get_aux_func_layout(function_names: Iterable[str], sdb: stencil_database, of return section_list, function_lookup, offset +def get_dag_stats(node_list: Iterable[Node | Net]) -> dict[str, int]: + edges = get_all_dag_edges(n.source if isinstance(n, Net) else n for n in node_list) + ops = {node for node, _ in edges} + + op_stat: dict[str, int] = {} + for op in ops: + op_stat[op.name] = op_stat.get(op.name, 0) + 1 + + return op_stat + + def compile_to_dag(node_list: Iterable[Node], sdb: stencil_database) -> tuple[binw.data_writer, dict[Net, tuple[int, int, str]]]: """Compiles a DAG identified by provided end nodes to binary code diff --git a/src/copapy/_matrices.py b/src/copapy/_matrices.py index a5505a7..45ab495 100644 --- a/src/copapy/_matrices.py +++ b/src/copapy/_matrices.py @@ -78,8 +78,14 @@ class matrix(Generic[TNum]): tuple(a + b for a, b in zip(row1, row2)) for row1, row2 in zip(self.values, other.values) ) + if isinstance(other, value): + return matrix( + tuple(a + other for a in row) + for row in self.values + ) + o = value(other, volatile=False) # Make sure a single constant is allocated return matrix( - tuple(a + other for a in row) + tuple(a + o if isinstance(a, value) else a + other for a in row) for row in self.values ) @@ -106,8 +112,14 @@ class matrix(Generic[TNum]): tuple(a - b for a, b in zip(row1, row2)) for row1, row2 in zip(self.values, other.values) ) + if isinstance(other, value): + return matrix( + tuple(a - other for a in row) + for row in self.values + ) + o = value(other, volatile=False) # Make sure a single constant is allocated return matrix( - tuple(a - other for a in row) + tuple(a - o if isinstance(a, value) else a - other for a in row) for row in self.values ) @@ -123,8 +135,14 @@ class matrix(Generic[TNum]): tuple(b - a for a, b in zip(row1, row2)) for row1, row2 in zip(self.values, other.values) ) + if isinstance(other, value): + return matrix( + tuple(other - a for a in row) + for row in self.values + ) + o = value(other, volatile=False) # Make sure a single constant is allocated return matrix( - tuple(other - a for a in row) + tuple(o - a if isinstance(a, value) else other - a for a in row) for row in self.values ) @@ -145,8 +163,14 @@ class matrix(Generic[TNum]): tuple(a * b for a, b in zip(row1, row2)) for row1, row2 in zip(self.values, other.values) ) + if isinstance(other, value): + return matrix( + tuple(a * other for a in row) + for row in self.values + ) + o = value(other, volatile=False) # Make sure a single constant is allocated return matrix( - tuple(a * other for a in row) + tuple(a * o if isinstance(a, value) else a * other for a in row) for row in self.values ) @@ -166,8 +190,14 @@ class matrix(Generic[TNum]): tuple(a / b for a, b in zip(row1, row2)) for row1, row2 in zip(self.values, other.values) ) + if isinstance(other, value): + return matrix( + tuple(a / other for a in row) + for row in self.values + ) + o = value(other, volatile=False) # Make sure a single constant is allocated return matrix( - tuple(a / other for a in row) + tuple(a / o if isinstance(a, value) else a / other for a in row) for row in self.values ) @@ -179,8 +209,14 @@ class matrix(Generic[TNum]): tuple(b / a for a, b in zip(row1, row2)) for row1, row2 in zip(self.values, other.values) ) + if isinstance(other, value): + return matrix( + tuple(other / a for a in row) + for row in self.values + ) + o = value(other, volatile=False) # Make sure a single constant is allocated return matrix( - tuple(other / a for a in row) + tuple(o / a if isinstance(a, value) else other / a for a in row) for row in self.values ) @@ -269,7 +305,7 @@ class matrix(Generic[TNum]): """Convert all elements to copapy values if any element is a copapy value.""" if any(isinstance(val, value) for row in self.values for val in row): return matrix( - tuple(value(val) if not isinstance(val, value) else val for val in row) + tuple(value(val, volatile=False) if not isinstance(val, value) else val for val in row) for row in self.values ) else: diff --git a/src/copapy/_mixed.py b/src/copapy/_mixed.py index 5b84e93..f8624ed 100644 --- a/src/copapy/_mixed.py +++ b/src/copapy/_mixed.py @@ -19,6 +19,6 @@ def mixed_sum(scalars: Iterable[int | float | value[Any]]) -> Any: def mixed_homogenize(scalars: Iterable[T | value[T]]) -> Iterable[T] | Iterable[value[T]]: if any(isinstance(val, value) for val in scalars): - return (value(val) if not isinstance(val, value) else val for val in scalars) + return (value(val, volatile=False) if not isinstance(val, value) else val for val in scalars) else: return (val for val in scalars if not isinstance(val, value)) diff --git a/src/copapy/_stencils.py b/src/copapy/_stencils.py index 1c57374..3d78e83 100644 --- a/src/copapy/_stencils.py +++ b/src/copapy/_stencils.py @@ -1,10 +1,17 @@ from dataclasses import dataclass -from pelfy import open_elf_file, elf_file, elf_symbol -from typing import Generator, Literal, Iterable -import pelfy +from typing import Generator, Literal, Iterable, TYPE_CHECKING import struct import platform +if TYPE_CHECKING: + import pelfy +else: + try: + from ._vendor import pelfy + except ImportError: + import pelfy + + ByteOrder = Literal['little', 'big'] @@ -62,7 +69,7 @@ def detect_process_arch() -> str: return arch_family -def get_return_function_type(symbol: elf_symbol) -> str: +def get_return_function_type(symbol: pelfy.elf_symbol) -> str: if symbol.relocations: for reloc in reversed(symbol.relocations): func_name = reloc.symbol.name @@ -71,7 +78,7 @@ def get_return_function_type(symbol: elf_symbol) -> str: return 'void' -def get_stencil_position(func: elf_symbol) -> tuple[int, int]: +def get_stencil_position(func: pelfy.elf_symbol) -> tuple[int, int]: start_index = 0 # There must be no prolog # Find last relocation in function last_instr = get_last_call_in_function(func) @@ -84,7 +91,7 @@ def get_stencil_position(func: elf_symbol) -> tuple[int, int]: return start_index, end_index -def get_last_call_in_function(func: elf_symbol) -> int: +def get_last_call_in_function(func: pelfy.elf_symbol) -> int: # Find last relocation in function assert func.relocations, f'No call function in stencil function {func.name}.' reloc = func.relocations[-1] @@ -95,7 +102,7 @@ def get_last_call_in_function(func: elf_symbol) -> int: return reloc.fields['r_offset'] - func.fields['st_value'] + address_field_length - instruction_lengths -def get_op_after_last_call_in_function(func: elf_symbol) -> int: +def get_op_after_last_call_in_function(func: pelfy.elf_symbol) -> int: # Find last relocation in function assert func.relocations, f'No call function in stencil function {func.name}.' reloc = func.relocations[-1] @@ -120,9 +127,9 @@ class stencil_database(): obj_file: path to the ELF object file or bytes of the ELF object file """ if isinstance(obj_file, str): - self.elf = open_elf_file(obj_file) + self.elf = pelfy.open_elf_file(obj_file) else: - self.elf = elf_file(obj_file) + self.elf = pelfy.elf_file(obj_file) self.stencil_definitions = {s.name: get_return_function_type(s) for s in self.elf.symbols diff --git a/src/copapy/_vectors.py b/src/copapy/_vectors.py index ced1137..1295e1f 100644 --- a/src/copapy/_vectors.py +++ b/src/copapy/_vectors.py @@ -1,6 +1,6 @@ from . import value from ._mixed import mixed_sum, mixed_homogenize -from typing import TypeVar, Iterable, Any, overload, TypeAlias, Callable, Iterator, Generic +from typing import Sequence, TypeVar, Iterable, Any, overload, TypeAlias, Callable, Iterator, Generic import copapy as cp from ._helper_types import TNum @@ -57,7 +57,10 @@ class vector(Generic[TNum]): if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a + b for a, b in zip(self.values, other.values)) - return vector(a + other for a in self.values) + if isinstance(other, value): + return vector(a + other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a + o if isinstance(a, value) else a + other for a in self.values) @overload def __radd__(self: 'vector[float]', other: VecNumLike) -> 'vector[float]': ... @@ -80,7 +83,10 @@ class vector(Generic[TNum]): if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a - b for a, b in zip(self.values, other.values)) - return vector(a - other for a in self.values) + if isinstance(other, value): + return vector(a - other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a - o if isinstance(a, value) else a - other for a in self.values) @overload def __rsub__(self: 'vector[float]', other: VecNumLike) -> 'vector[float]': ... @@ -92,7 +98,10 @@ class vector(Generic[TNum]): if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(b - a for a, b in zip(self.values, other.values)) - return vector(other - a for a in self.values) + if isinstance(other, value): + return vector(other - a for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(o - a if isinstance(a, value) else other - a for a in self.values) @overload def __mul__(self: 'vector[int]', other: VecFloatLike) -> 'vector[float]': ... @@ -106,7 +115,10 @@ class vector(Generic[TNum]): if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a * b for a, b in zip(self.values, other.values)) - return vector(a * other for a in self.values) + if isinstance(other, value): + return vector(a * other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a * o if isinstance(a, value) else a * other for a in self.values) @overload def __rmul__(self: 'vector[float]', other: VecNumLike) -> 'vector[float]': ... @@ -129,7 +141,10 @@ class vector(Generic[TNum]): if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a ** b for a, b in zip(self.values, other.values)) - return vector(a ** other for a in self.values) + if isinstance(other, value): + return vector(a ** other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a ** o if isinstance(a, value) else a ** other for a in self.values) @overload def __rpow__(self: 'vector[float]', other: VecNumLike) -> 'vector[float]': ... @@ -138,19 +153,31 @@ class vector(Generic[TNum]): @overload def __rpow__(self, other: VecNumLike) -> 'vector[Any]': ... def __rpow__(self, other: VecNumLike) -> Any: - return self ** other + if isinstance(other, vector): + assert len(self.values) == len(other.values) + return vector(b ** a for a, b in zip(self.values, other.values)) + if isinstance(other, value): + return vector(other ** a for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(o ** a if isinstance(a, value) else other ** a for a in self.values) def __truediv__(self, other: VecNumLike) -> 'vector[float]': if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a / b for a, b in zip(self.values, other.values)) - return vector(a / other for a in self.values) + if isinstance(other, value): + return vector(a / other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a / o if isinstance(a, value) else a / other for a in self.values) def __rtruediv__(self, other: VecNumLike) -> 'vector[float]': if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(b / a for a, b in zip(self.values, other.values)) - return vector(other / a for a in self.values) + if isinstance(other, value): + return vector(other / a for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(o / a if isinstance(a, value) else other / a for a in self.values) @overload def dot(self: 'vector[int]', other: 'vector[int]') -> int | value[int]: ... @@ -191,37 +218,55 @@ class vector(Generic[TNum]): if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a > b for a, b in zip(self.values, other.values)) - return vector(a > other for a in self.values) + if isinstance(other, value): + return vector(a > other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a > o if isinstance(a, value) else a > other for a in self.values) def __lt__(self, other: VecNumLike) -> 'vector[int]': if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a < b for a, b in zip(self.values, other.values)) - return vector(a < other for a in self.values) + if isinstance(other, value): + return vector(a < other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a < o if isinstance(a, value) else a < other for a in self.values) def __ge__(self, other: VecNumLike) -> 'vector[int]': if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a >= b for a, b in zip(self.values, other.values)) - return vector(a >= other for a in self.values) + if isinstance(other, value): + return vector(a >= other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a >= o if isinstance(a, value) else a >= other for a in self.values) def __le__(self, other: VecNumLike) -> 'vector[int]': if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a <= b for a, b in zip(self.values, other.values)) - return vector(a <= other for a in self.values) + if isinstance(other, value): + return vector(a <= other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a <= o if isinstance(a, value) else a <= other for a in self.values) - def __eq__(self, other: VecNumLike) -> 'vector[int]': # type: ignore - if isinstance(other, vector): - assert len(self.values) == len(other.values) - return vector(a == b for a, b in zip(self.values, other.values)) - return vector(a == other for a in self.values) + def __eq__(self, other: VecNumLike | Sequence[int | float]) -> 'vector[int]': # type: ignore + if isinstance(other, vector | Sequence): + assert len(self) == len(other) + return vector(a == b for a, b in zip(self.values, other)) + if isinstance(other, value): + return vector(a == other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a == o if isinstance(a, value) else a == other for a in self.values) def __ne__(self, other: VecNumLike) -> 'vector[int]': # type: ignore if isinstance(other, vector): assert len(self.values) == len(other.values) return vector(a != b for a, b in zip(self.values, other.values)) - return vector(a != other for a in self.values) + if isinstance(other, value): + return vector(a != other for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(a != o if isinstance(a, value) else a != other for a in self.values) @property def shape(self) -> tuple[int]: @@ -255,6 +300,15 @@ class vector(Generic[TNum]): def map(self, func: Callable[[Any], value[U] | U]) -> 'vector[U]': """Applies a function to each element of the vector and returns a new vector.""" return vector(func(x) for x in self.values) + + def _map2(self, other: VecNumLike, func: Callable[[Any, Any], value[int] | value[float]]) -> 'vector[Any]': + if isinstance(other, vector): + assert len(self.values) == len(other.values) + return vector(func(a, b) for a, b in zip(self.values, other.values)) + if isinstance(other, value): + return vector(func(a, other) for a in self.values) + o = value(other, volatile=False) # Make sure a single constant is allocated + return vector(func(a, o) if isinstance(a, value) else a + other for a in self.values) def cross_product(v1: vector[float], v2: vector[float]) -> vector[float]: diff --git a/src/copapy/backend.py b/src/copapy/backend.py index c03c76c..f494d1d 100644 --- a/src/copapy/backend.py +++ b/src/copapy/backend.py @@ -2,7 +2,7 @@ from ._target import add_read_command from ._basic_types import Net, Op, Node, CPConstant, Write, stencil_db_from_package from ._compiler import compile_to_dag, \ stable_toposort, get_const_nets, get_all_dag_edges, add_read_ops, get_all_dag_edges_between, \ - add_write_ops + add_write_ops, get_dag_stats __all__ = [ "add_read_command", @@ -18,5 +18,6 @@ __all__ = [ "get_all_dag_edges_between", "add_read_ops", "add_write_ops", - "stencil_db_from_package" + "stencil_db_from_package", + "get_dag_stats" ] diff --git a/tests/benchmark.py b/tests/benchmark.py index 647afd5..804500f 100644 --- a/tests/benchmark.py +++ b/tests/benchmark.py @@ -117,7 +117,6 @@ def cp_vs_python_sparse(path: str = 'benchmark_results_001_sparse.json'): results.append({'benchmark': 'Copapy', 'iter_size': iter_size, 'elapsed_time': elapsed_cp, 'sum_size': sum_size, 'v_size': v_size}) - v1 = cp.vector(float(v) for v in range(v_size)) v2 = cp.vector(float(v) for v in [5]*v_size) @@ -158,6 +157,7 @@ def plot_results(path: str): import matplotlib.pyplot as plt import numpy as np from collections import defaultdict + import matplotlib as mpl # Load the benchmark results with open(path, 'r') as f: @@ -185,30 +185,97 @@ def plot_results(path: str): v_sizes_set = sorted(set(v for benchmark_data in medians_by_benchmark.values() for v in benchmark_data.keys())) # Create the plot - plt.figure(figsize=(10, 6)) + plt.figure(figsize=(6, 4)) for benchmark in benchmarks: if benchmark != 'Python': v_sizes = sorted(medians_by_benchmark[benchmark].keys()) elapsed_times = [medians_by_benchmark[benchmark][v] for v in v_sizes] - plt.plot(v_sizes, elapsed_times, '.', label=benchmark) + plt.plot(v_sizes, elapsed_times, '.', label=benchmark, markersize=10) plt.xlabel('Vector Size (v_size)') plt.ylabel('Elapsed Time (seconds)') #plt.title('Benchmark Results: Elapsed Time vs Vector Size') - plt.legend() + plt.legend(frameon=False) #plt.grid(True, alpha=0.3) plt.ylim(bottom=0) plt.tight_layout() # Save to PNG - plt.savefig(path.replace('.json', '') + '.png', dpi=300) + mpl.rcParams['svg.fonttype'] = 'none' + save_svg_with_theme_styles(plt, path.replace('.json', '') + '.svg') print("Plot saved") +def save_svg_with_theme_styles(pyplot_obj, path): + import io + import re + """ + Takes a pyplot object (typically `plt`) or a figure, captures its SVG output, + injects theme-based CSS, and writes to disk. + """ + + # --- Step 1: Capture SVG to memory --- + buf = io.StringIO() + + # pyplot_obj can be a module (plt) or a Figure instance + if hasattr(pyplot_obj, "gcf"): + fig = pyplot_obj.gcf() + else: + fig = pyplot_obj + + fig.savefig(buf, format="svg", dpi=150, transparent=True) + svg_data = buf.getvalue() + buf.close() + + # --- Step 2: Theme CSS to inject --- + theme_css = """ + + """ + + # --- Step 3: Inject CSS right after tag --- + # Find the first > after the opening tag + modified_svg = re.sub( + r"(]*>)", + r"\1\n" + theme_css, + svg_data, + count=1 + ) + + # --- Step 4: Write final output to disk --- + with open(path, "w", encoding="utf-8") as f: + f.write(modified_svg) + + if __name__ == "__main__": - path1 = 'benchmark_results_001.json' - path2 = 'benchmark_results_001_sparse.json' + path1 = 'docs/source/media/benchmark_results_001.json' + path2 = 'docs/source/media/benchmark_results_001_sparse.json' if 'no_simd' in sys.argv[1:]: os.environ["NPY_DISABLE_CPU_FEATURES"] = CPU_SIMD_FEATURES diff --git a/tests/test_dag_optimization.py b/tests/test_dag_optimization.py new file mode 100644 index 0000000..5d705ee --- /dev/null +++ b/tests/test_dag_optimization.py @@ -0,0 +1,24 @@ +import copapy as cp +from copapy._basic_types import value +from copapy.backend import get_dag_stats + +def test_get_dag_stats(): + + sum_size = 10 + v_size = 200 + + v1 = cp.vector(cp.value(float(v)) for v in range(v_size)) + v2 = cp.vector(cp.value(float(v)) for v in [5]*v_size) + + v3 = sum((v1 + i + 7) @ v2 for i in range(sum_size)) + + assert isinstance(v3, value) + stat = get_dag_stats([v3]) + print(stat) + + assert stat['const_float'] == 2 * v_size + assert stat['add_float_float'] == sum_size * v_size - 2 + + +if __name__ == "__main__": + test_get_dag_stats() \ No newline at end of file diff --git a/tests/test_matrix.py b/tests/test_matrix.py index 81ec534..f9e18c3 100644 --- a/tests/test_matrix.py +++ b/tests/test_matrix.py @@ -103,8 +103,8 @@ def test_matrix_scalar_division(): m1 = cp.matrix([[6.0, 8.0], [12.0, 16.0]]) m2 = m1 / 2.0 - assert m2[0] == pytest.approx((3.0, 4.0)) # pyright: ignore[reportUnknownMemberType] - assert m2[1] == pytest.approx((6.0, 8.0)) # pyright: ignore[reportUnknownMemberType] + assert list(m2[0]) == pytest.approx((3.0, 4.0)) # pyright: ignore[reportUnknownMemberType] + assert list(m2[1]) == pytest.approx((6.0, 8.0)) # pyright: ignore[reportUnknownMemberType] def test_matrix_vector_multiplication():