diff --git a/.github/workflows/build_docker_image.yml b/.github/workflows/build_docker_image.yml index b67c98e..a236d7d 100644 --- a/.github/workflows/build_docker_image.yml +++ b/.github/workflows/build_docker_image.yml @@ -63,6 +63,37 @@ jobs: - name: Build & Push Docker image run: docker buildx build --platform linux/arm64 --push -t $IMAGE_NAME tools/qemu_test/ + docker-build-armv6: + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: arm + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Set image name + run: echo "IMAGE_NAME=ghcr.io/${GITHUB_REPOSITORY_OWNER,,}/armv6_test:1" >> $GITHUB_ENV + + - name: Build & Push Docker image + run: docker buildx build --platform linux/arm/v6 --push -t $IMAGE_NAME tools/qemu_test/ + + docker-build-armv7: runs-on: ubuntu-latest env: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2847675..8002fc6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,7 +54,7 @@ jobs: - name: Install Python dependencies run: | - python -m pip install -e . + python -m pip install . python -m pip install pytest - name: Vendor pelfy @@ -89,7 +89,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Python dependencies - run: python -m pip install -e .[dev] + run: python -m pip install .[dev] - name: Compile coparun run: | @@ -153,6 +153,35 @@ jobs: name: runner-linux-arm64 path: build/runner/* + build-armv6: + needs: [build_stencils] + runs-on: ubuntu-latest + continue-on-error: true + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + name: stencil-object-files + path: src/copapy/obj + - name: Set up QEMU for ARMv6 + uses: docker/setup-qemu-action@v3 + with: + platforms: linux/arm/v6 + - name: Use ARMv6 container + run: | + docker run --rm -v $PWD:/app -w /app --platform linux/arm/v6 ghcr.io/nonannet/armv6_test:1 \ + bash -lc "pip install . && \ + mkdir -p build/runner && \ + gcc -O3 -DENABLE_LOGGING -o build/runner/coparun src/coparun/runmem.c \ + src/coparun/coparun.c src/coparun/mem_man.c && \ + pytest && \ + bash tools/create_asm.sh" + + - uses: actions/upload-artifact@v4 + with: + name: runner-linux-armv6 + path: build/runner/* + build-armv7: needs: [build_stencils] runs-on: ubuntu-latest @@ -205,7 +234,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Python dependencies - run: python -m pip install -e .[dev] + run: python -m pip install .[dev] - name: Set up MSVC environment uses: microsoft/setup-msbuild@v2 @@ -217,9 +246,9 @@ jobs: run: | mkdir build\runner call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=amd64 - cl /DENABLE_BASIC_LOGGING /O2 src\coparun\runmem.c src\coparun\coparun.c src\coparun\mem_man.c /Fe:build\runner\coparun.exe + cl /DENABLE_BASIC_LOGGING /Od src\coparun\runmem.c src\coparun\coparun.c src\coparun\mem_man.c /Fe:build\runner\coparun.exe call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=x86 - cl /DENABLE_BASIC_LOGGING /O2 src\coparun\runmem.c src\coparun\coparun.c src\coparun\mem_man.c /Fe:build\runner\coparun-x86.exe + cl /DENABLE_BASIC_LOGGING /Od src\coparun\runmem.c src\coparun\coparun.c src\coparun\mem_man.c /Fe:build\runner\coparun-x86.exe - name: Run tests with pytest run: pytest @@ -231,7 +260,7 @@ jobs: path: build/runner/* release-stencils: - needs: [build_stencils, build-ubuntu, build-windows, build-arm64, build-armv7] + needs: [build_stencils, build-ubuntu, build-windows, build-arm64, build-armv6, build-armv7] runs-on: ubuntu-latest if: github.ref == 'refs/heads/main' && github.event_name == 'push' permissions: @@ -263,6 +292,7 @@ jobs: cp tmp/musl-object-files/* release/ cp tmp/runner-linux/coparun release/ cp tmp/runner-linux-arm64/coparun release/coparun-aarch64 + cp tmp/runner-linux-armv6/coparun release/coparun-armv6 cp tmp/runner-linux-armv7/coparun release/coparun-armv7 cp tmp/runner-win/coparun*.exe release/ @@ -278,3 +308,49 @@ jobs: echo "Updating existing release for $TAG" gh release upload "$TAG" release/* --clobber fi + + build-docs: + needs: [build_stencils, build-ubuntu, build-windows, build-arm64, build-armv6, build-armv7] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4 + with: + name: stencil-object-files + path: src/copapy/obj + + - uses: actions/download-artifact@v4 + with: + path: build/tmp + + - uses: actions/setup-python@v3 + with: + python-version: "3.x" + + - name: Install package and dependencies + run: pip install .[doc_build] + + - name: Build Docs + run: | + mkdir -p build/stencils + python stencils/generate_stencils.py build/stencils/stencils.c + cd docs + make html + touch build/html/.nojekyll + + deploy-docs: + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + needs: build-docs + runs-on: ubuntu-latest + permissions: + contents: read + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5659106..cf6c9ff 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,5 @@ docs/source/api /libs/ *.core core -*.log \ No newline at end of file +*.log +docs/source/start.md diff --git a/README.md b/README.md index 62b4d51..6a1c950 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Copapy -Copapy is a Python framework for deterministic, low-latency realtime computation, targeting hardware applications - for example in the fields of robotics, aerospace, embedded systems and control systems in general. +Copapy is a Python framework for deterministic, low-latency realtime computation with automatic differentiation support, targeting hardware applications - for example in the fields of robotics, aerospace, embedded systems and control systems in general. GPU frameworks like PyTorch, JAX and TensorFlow jump-started the development in the field of AI. With the right balance of flexibility and performance, they allow for fast iteration of new ideas while still being performant enough to test or even use them in production. @@ -12,7 +12,7 @@ The main features can be summarized as: - Fast to write & easy to read - Memory and type safety with a minimal set of runtime errors - Deterministic execution -- Autograd for efficient realtime optimization +- Automatic differentiation for efficient realtime optimization (reverse-mode) - Optimized machine code for x86_64, AArch64 and ARMv7 - Highly portable to new architectures - Small Python package with minimal dependencies and no cross-compile toolchain required @@ -147,7 +147,7 @@ The call to the dummy function `result_float_float` ensures that the compiler ke The machine code for the function above, compiled for x86_64, looks like this: -```assembly +```nasm 0000000000000000 : 0: f3 0f 58 c1 addss %xmm1,%xmm0 4: e9 00 00 00 00 jmp 9 <.LC1+0x1> @@ -158,7 +158,7 @@ Based on the relocation entry for the `jmp` to the symbol `result_float_float`, For more complex operations - where inlining is less useful - stencils call a non-stencil function, such as in this example: -```assembly +```nasm 0000000000000000 : 0: 48 83 ec 08 sub $0x8,%rsp 4: e8 00 00 00 00 call 9 diff --git a/docs/Makefile b/docs/Makefile index d0c3cbf..473fdf1 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -12,7 +12,18 @@ BUILDDIR = build help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -.PHONY: help Makefile +.PHONY: help prepare-docs html + +prepare-docs: + mkdir -p $(BUILDDIR) + python $(SOURCEDIR)/generate_class_list.py --api-dir $(SOURCEDIR)/api + python $(SOURCEDIR)/extract_section.py --readme $(SOURCEDIR)/../../README.md --build-dir $(BUILDDIR) + python $(SOURCEDIR)/stencil_doc.py --input $(SOURCEDIR)/../../build/stencils/stencils.c --asm-pattern "$(SOURCEDIR)/../../build/tmp/runner-linux-*/stencils.asm" --output $(BUILDDIR)/stencils.md + python $(SOURCEDIR)/example_asm.py --input $(SOURCEDIR)/../../tools/make_example.py --asm-pattern "$(SOURCEDIR)/../../build/tmp/runner-linux-*/example.asm" --output $(BUILDDIR)/compiled_example.md + +# Build documentation (generate API and extract sections first) +html: prepare-docs + @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/docs/make.bat b/docs/make.bat index dc1312a..495ca16 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -25,6 +25,13 @@ if errorlevel 9009 ( if "%1" == "" goto help +md %BUILDDIR% + +python %SOURCEDIR%\generate_class_list.py --api-dir %SOURCEDIR%\api +python %SOURCEDIR%\extract_section.py --readme %SOURCEDIR%/../../README.md --build-dir %BUILDDIR% +python %SOURCEDIR%\stencil_doc.py --input "%SOURCEDIR%/../../build/stencils.c" --asm-pattern "%SOURCEDIR%/../../build/tmp/runner-linux-*/stencils.asm" --output %BUILDDIR%/stencils.md +python %SOURCEDIR%\example_asm.py --input "%SOURCEDIR%/../../tools/make_example.py" --asm-pattern "%SOURCEDIR%/../../build/tmp/runner-linux-*/example.asm" --output %BUILDDIR%/compiled_example.md + %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end diff --git a/docs/source/LICENSE.md b/docs/source/LICENSE.md new file mode 100644 index 0000000..e041895 --- /dev/null +++ b/docs/source/LICENSE.md @@ -0,0 +1,7 @@ +--- +orphan: true +--- + +# License +```{include} ../../LICENSE +``` \ No newline at end of file diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css new file mode 100644 index 0000000..5fb9b3b --- /dev/null +++ b/docs/source/_static/custom.css @@ -0,0 +1,3 @@ +html[data-theme="dark"] .bd-content img { + background-color: transparent !important; +} \ No newline at end of file diff --git a/docs/source/compiler.md b/docs/source/compiler.md new file mode 100644 index 0000000..078a74e --- /dev/null +++ b/docs/source/compiler.md @@ -0,0 +1,12 @@ +# Compiler +```{toctree} +:maxdepth: 1 +:hidden: +stencil_doc +example_asm +``` + +```{include} ../build/compiler.md +``` + +A full listing of all stencils with machine code for all architectures from latest build is here available: [Stencil overview](stencil_doc.md). The compiler output for a full example program from latest compiler build is here available: [Example program](example_asm). \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 55a33b6..c3defd1 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -28,6 +28,7 @@ exclude_patterns = [] # html_theme = 'alabaster' html_theme = 'pydata_sphinx_theme' html_static_path = ['_static'] +html_css_files = ['custom.css'] html_theme_options = { "secondary_sidebar_items": ["page-toc"], "footer_start": ["copyright"] @@ -35,3 +36,4 @@ html_theme_options = { html_theme_options["footer_end"] = [] autodoc_inherit_docstrings = True +autoclass_content = 'both' diff --git a/docs/source/example_asm.md b/docs/source/example_asm.md new file mode 100644 index 0000000..eb8f8b1 --- /dev/null +++ b/docs/source/example_asm.md @@ -0,0 +1,3 @@ +# Example program +```{include} ../build/compiled_example.md +``` \ No newline at end of file diff --git a/docs/source/example_asm.py b/docs/source/example_asm.py new file mode 100644 index 0000000..0533c56 --- /dev/null +++ b/docs/source/example_asm.py @@ -0,0 +1,67 @@ +from pathlib import Path +import glob +import argparse + + +def build_asm_code_dict(asm_glob_pattern: str) -> dict[str, str]: + """ + Build a dictionary of assembly code for all available architectures. + + Args: + asm_glob_pattern: Glob pattern to find stencils.asm files + + Returns: + Dictionary mapping architecture names to their asm_code dictionaries + """ + asm_code: dict[str, str] = {} + + # Find all stencils.asm files matching the pattern + asm_files = glob.glob(asm_glob_pattern) + + for asm_file in asm_files: + arch_name = Path(asm_file).parent.name.replace('runner-linux-', '') + + try: + with open(asm_file) as f: + asm_code[arch_name] = f.read() + print(f"Loaded assembly for {arch_name}") + except FileNotFoundError: + print(f"Warning: Assembly file not found for {arch_name}: {asm_file}") + + return asm_code + + +# Example usage: +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate stencils documentation from C and assembly code") + parser.add_argument('--input', default='tools/make_example.py', help='Path to input C file') + parser.add_argument('--asm-pattern', default='build/tmp/runner-linux-*/example.asm', help='Glob pattern for assembly files') + parser.add_argument('--output', default='docs/build/compiled_example.md', help='Output markdown file path') + + args = parser.parse_args() + + # Build assembly code dictionary for all architectures + asm_code = build_asm_code_dict(args.asm_pattern) + + with open(args.input) as f: + python_code = f.read() + + md_code: str = f""" +Example program: +```python +{python_code} +``` +""" + + for arch in sorted(asm_code.keys()): + md_code += f""" +## {arch} +```nasm +{asm_code[arch]} +``` +""" + + with open(args.output, 'wt') as f: + f.write(md_code) + + print(f"Generated {args.output} for {len(asm_code)} architectures") diff --git a/docs/source/extract_section.py b/docs/source/extract_section.py index 1162405..6ca98ba 100644 --- a/docs/source/extract_section.py +++ b/docs/source/extract_section.py @@ -1,4 +1,6 @@ import re +import argparse +import os def extract_sections(md_text: str) -> dict[str, str]: """ @@ -17,13 +19,25 @@ def extract_sections(md_text: str) -> dict[str, str]: sections: dict[str, str] = {} for _, title, content in pattern.findall(md_text): - sections[title] = content.strip() + assert isinstance(content, str) + sections[title] = content.strip().replace('](docs/source/media/', '](media/') return sections if __name__ == '__main__': - with open('README.md', 'rt') as f: + parser = argparse.ArgumentParser(description='Extract sections from README.md and generate documentation files') + parser.add_argument('--readme', type=str, default='README.md', help='README.md path') + parser.add_argument('--build-dir', type=str, default='docs/source', help='Build directory for output files (default: docs/source)') + args = parser.parse_args() + + readme_path = args.readme + build_dir = args.build_dir + + with open(readme_path, 'rt') as f: readme = extract_sections(f.read()) - with open('docs/source/start.md', 'wt') as f: - f.write('\n'.join(readme[s] for s in ['Copapy', 'Current state'])) \ No newline at end of file + with open(os.path.join(build_dir, 'start.md'), 'wt') as f: + f.write('\n'.join(f"# {s}\n" + readme[s] for s in ['Copapy', 'Current state', 'Install', 'License'])) + + with open(os.path.join(build_dir, 'compiler.md'), 'wt') as f: + f.write('\n'.join(readme[s] for s in ['How it works'])) diff --git a/docs/source/generate_class_list.py b/docs/source/generate_class_list.py index e69df0f..e409f84 100644 --- a/docs/source/generate_class_list.py +++ b/docs/source/generate_class_list.py @@ -5,19 +5,20 @@ import inspect import fnmatch from io import TextIOWrapper import os +import argparse def write_manual(f: TextIOWrapper, doc_files: list[str], title: str) -> None: write_dochtree(f, title, doc_files) -def write_classes(f: TextIOWrapper, patterns: list[str], module_name: str, title: str, description: str = '', exclude: list[str] = []) -> None: +def write_classes(f: TextIOWrapper, patterns: list[str], module_name: str, title: str, description: str = '', exclude: list[str] = [], api_dir: str = 'api') -> None: """Write the classes to the file.""" module = importlib.import_module(module_name) classes = [ name for name, obj in inspect.getmembers(module, inspect.isclass) - if (any(fnmatch.fnmatch(name, pat) for pat in patterns if pat not in exclude) and + if (any(fnmatch.fnmatch(name, pat) for pat in patterns if name not in exclude) and obj.__doc__ and '(Automatic generated stub)' not in obj.__doc__) ] @@ -27,7 +28,7 @@ def write_classes(f: TextIOWrapper, patterns: list[str], module_name: str, title write_dochtree(f, title, classes) for cls in classes: - with open(f'docs/source/api/{cls}.md', 'w') as f2: + with open(f'{api_dir}/{cls}.md', 'w') as f2: f2.write(f'# {module_name}.{cls}\n') f2.write('```{eval-rst}\n') f2.write(f'.. autoclass:: {module_name}.{cls}\n') @@ -38,14 +39,17 @@ def write_classes(f: TextIOWrapper, patterns: list[str], module_name: str, title f2.write('```\n\n') -def write_functions(f: TextIOWrapper, patterns: list[str], module_name: str, title: str, description: str = '', exclude: list[str] = []) -> None: +def write_functions(f: TextIOWrapper, patterns: list[str], module_name: str, title: str, description: str = '', exclude: list[str] = [], path_patterns: list[str] = ['*'], api_dir: str = 'api') -> None: """Write the classes to the file.""" module = importlib.import_module(module_name) - functions = [ - name for name, _ in inspect.getmembers(module, inspect.isfunction) - if (any(fnmatch.fnmatch(name, pat) for pat in patterns if pat not in exclude)) - ] + functions: list[str] = [] + for name, fu in inspect.getmembers(module, inspect.isfunction): + if (any(fnmatch.fnmatch(name, pat) for pat in patterns if name not in exclude)): + path = inspect.getfile(fu) + if any(fnmatch.fnmatch(path, pat) for pat in path_patterns): + functions.append(name) + if description: f.write(f'{description}\n\n') @@ -54,7 +58,7 @@ def write_functions(f: TextIOWrapper, patterns: list[str], module_name: str, tit for func in functions: if not func.startswith('_'): - with open(f'docs/source/api/{func}.md', 'w') as f2: + with open(f'{api_dir}/{func}.md', 'w') as f2: f2.write(f'# {module_name}.{func}\n') f2.write('```{eval-rst}\n') f2.write(f'.. autofunction:: {module_name}.{func}\n') @@ -73,14 +77,32 @@ def write_dochtree(f: TextIOWrapper, title: str, items: list[str]): if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate class and function documentation') + parser.add_argument('--api-dir', type=str, default='docs/source/api', help='Output directory for API documentation (default: api)') + args = parser.parse_args() + + api_dir = args.api_dir + # Ensure the output directory exists - os.makedirs('docs/source/api', exist_ok=True) + os.makedirs(api_dir, exist_ok=True) - with open('docs/source/api/index.md', 'w') as f: - f.write('# Classes and functions\n\n') + with open(f'{api_dir}/index.md', 'w') as f: + f.write('# User API\n\n') - write_classes(f, ['*'], 'copapy', title='Classes') + write_classes(f, ['*'], 'copapy', title='Classes', api_dir=api_dir) - write_functions(f, ['*'], 'copapy', title='Functions') + write_functions(f, ['*'], 'copapy', title='General functions', path_patterns=['*_autograd.py', '*_basic_types.py', '*_target.py'], api_dir=api_dir) - #write_manual(f, ['../ndfloat', '../floatarray'], title='Types') + write_functions(f, ['*'], 'copapy', title='Math functions', path_patterns=['*_math*'], exclude=['get_42'], api_dir=api_dir) + + write_functions(f, ['*'], 'copapy', title='Vector functions', path_patterns=['*_vectors*'], api_dir=api_dir) + + write_functions(f, ['*'], 'copapy', title='Matrix functions', path_patterns=['*_matrices*'], api_dir=api_dir) + + #write_manual(f, ['NumLike'], title='Types') + + with open(f'{api_dir}/backend.md', 'w') as f: + f.write('# Backend\n\n') + write_classes(f, ['*'], 'copapy.backend', title='Classes', api_dir=api_dir) + + write_functions(f, ['*'], 'copapy.backend', title='Functions', api_dir=api_dir) diff --git a/docs/source/index.md b/docs/source/index.md index b65f4c4..bc346d8 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -1,9 +1,11 @@ ```{toctree} :maxdepth: 1 :hidden: +compiler api/index +api/backend repo ``` -```{include} ../../README.md +```{include} ../build/start.md ``` \ No newline at end of file diff --git a/docs/source/media/benchmark_results_001.svg b/docs/source/media/benchmark_results_001.svg index 437656d..b4157f9 100644 --- a/docs/source/media/benchmark_results_001.svg +++ b/docs/source/media/benchmark_results_001.svg @@ -12,18 +12,18 @@ fill: #EEEEEE !important; } #patch_1 path { - fill: #444444 !important; + fill: #14141400 !important; } } @media (prefers-color-scheme: light) { path { - stroke: #444444 !important; + stroke: #141414 !important; } text { - fill: #444444 !important; + fill: #141414 !important; } #patch_1 path { - fill: #FFFFFF !important; + fill: #FFFFFF00 !important; } } #patch_1 path { diff --git a/docs/source/repo.md b/docs/source/repo.md index 110e578..d940e59 100644 --- a/docs/source/repo.md +++ b/docs/source/repo.md @@ -1,3 +1,55 @@ -# Code repository +# Code + +Primary code repository is on GitHub: [github.com/Nonannet/copapy](https://github.com/Nonannet/copapy). + +[Issues](https://github.com/Nonannet/copapy/issues) and [pull requests](https://github.com/Nonannet/copapy/pulls) can be created there. + +To get started with development, first clone the repository: + +```bash +git clone https://github.com/Nonannet/copapy.git +cd copapy +``` + +You may set up a virtual environment: + +```bash +python -m venv .venv +source .venv/bin/activate # On Windows: `.venv\Scripts\activate` +``` + +Build and install the package and dev dependencies: + +```bash +pip install -e .[dev] +``` + +If the build fails because no suitable C compiler is installed, you can either install one or use the binary package from PyPI: + +```bash +pip install copapy[dev] +``` + +When running pytest, it will use the binary components from PyPI, but all Python code is executed from the local repository. + +To run all tests, you need the stencil object files and the compiled runner. You can download them from GitHub or build them yourself with gcc. + +Download the latest binaries from GitHub: + +```bash +python tools/get_binaries.py +``` + +Build the binaries from source on Linux: + +```bash +bash tools/build.sh +``` + +Run the tests: + +```bash +pytest +``` + -Code repository is on GitHub: [github.com/Nonannet/copapy](https://github.com/Nonannet/copapy). \ No newline at end of file diff --git a/docs/source/stencil_doc.md b/docs/source/stencil_doc.md new file mode 100644 index 0000000..f0d574d --- /dev/null +++ b/docs/source/stencil_doc.md @@ -0,0 +1,3 @@ +# Stencil overview +```{include} ../build/stencils.md +``` \ No newline at end of file diff --git a/docs/source/stencil_doc.py b/docs/source/stencil_doc.py new file mode 100644 index 0000000..9f6008e --- /dev/null +++ b/docs/source/stencil_doc.py @@ -0,0 +1,142 @@ +import re +from pathlib import Path +import glob +import argparse + +def extract_c_functions(stencils_path: str) -> dict[str, str]: + """ + Extract all C function names and their code from a stencils.c file. + + Args: + stencils_path: Path to the stencils.c file + + Returns: + Dictionary mapping function names to their complete code + """ + with open(stencils_path, 'r') as f: + content = f.read() + + # Regex pattern to match C functions + # Matches: return_type function_name(parameters) { ... } + pattern = r'((?:STENCIL\s+extern|void|int|float|double)\s+\w+\s*\([^)]*\)\s*\{(?:[^{}]|\{[^{}]*\})*\})' + + functions: dict[str, str] = {} + + # Find all function matches + for match in re.finditer(pattern, content, re.MULTILINE | re.DOTALL): + func_code = match.group(1).strip() + + # Extract function name using a simpler regex on the matched code + name_match = re.search(r'(?:STENCIL\s+extern)?\s*(?:void|int|float|double)?\s*(\w+)\s*\(', func_code) + + if name_match: + func_name = name_match.group(1) + functions[func_name] = func_code + + return functions + + +def extract_asm_section(asm_path: str) -> dict[str, str]: + """ + Extract assembly functions organized by section. + + Args: + asm_path: Path to the stencils.asm file + + Returns: + Dictionary with sections as keys, containing function dictionaries + """ + with open(asm_path, 'r') as f: + content = f.read() + + # Split by "Disassembly of section" + sections = re.split(r'^Disassembly of section (.+?):', content, flags=re.MULTILINE) + + result: dict[str, str] = {} + + # Process sections (skip first empty element) + for i in range(1, len(sections), 2): + section_name = sections[i].strip() + section_content = sections[i + 1] if i + 1 < len(sections) else "" + + if section_content: + result[section_name] = section_content.strip() + + return result + + +def build_asm_code_dict(asm_glob_pattern: str) -> dict[str, dict[str, str]]: + """ + Build a dictionary of assembly code for all available architectures. + + Args: + asm_glob_pattern: Glob pattern to find stencils.asm files + + Returns: + Dictionary mapping architecture names to their asm_code dictionaries + """ + asm_code: dict[str, dict[str, str]] = {} + + asm_files = glob.glob(asm_glob_pattern) + + for asm_file in asm_files: + arch_name = Path(asm_file).parent.name.replace('runner-linux-', '') + + try: + asm_code[arch_name] = extract_asm_section(asm_file) + print(f"Loaded assembly for {arch_name}") + except FileNotFoundError: + print(f"Warning: Assembly file not found for {arch_name}: {asm_file}") + + return asm_code + + +# Example usage: +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate stencils documentation from C and assembly code") + parser.add_argument('--input', default='build/stencils.c', help='Path to input C file') + parser.add_argument('--asm-pattern', default='build/tmp/runner-*/stencils.asm', help='Glob pattern for assembly files') + parser.add_argument('--output', default='docs/build/stencils.md', help='Output markdown file path') + + args = parser.parse_args() + + # Get all C functions + functions = extract_c_functions(args.input) + + # Build assembly code dictionary for all architectures + asm_code = build_asm_code_dict(args.asm_pattern) + + #@norm_indent + def get_stencil_section(func_name: str) -> str: + c_code = functions[func_name] + section_name = '.text.' + func_name + + arch_asm_code = '' + for arch in sorted(asm_code.keys()): + if section_name in asm_code[arch]: + arch_asm_code += f""" +### {arch} +```nasm +{asm_code[arch][section_name]} +``` +""" + else: + arch_asm_code += f"\n### {arch}\nNo assembly found for this architecture\n" + + return f""" +## {func_name} +```c +{c_code} +``` +{arch_asm_code} +""" + + md_code: str = '' + + for function_name, code in functions.items(): + md_code += get_stencil_section(function_name) + + with open(args.output, 'wt') as f: + f.write(md_code) + + print(f"Generated {args.output} with {len(functions)} stencil functions") diff --git a/pyproject.toml b/pyproject.toml index 37612d2..d08a40e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "copapy" -version = "0.0.2" +version = "0.0.3" authors = [ { name="Nicolas Kruse", email="nicolas.kruse@nonan.net" }, ] diff --git a/src/copapy/__init__.py b/src/copapy/__init__.py index 6e6ea03..4297d2d 100644 --- a/src/copapy/__init__.py +++ b/src/copapy/__init__.py @@ -1,4 +1,39 @@ -from ._target import Target +""" +Copapy is a Python framework for deterministic, low-latency +realtime computation with automatic differentiation, targeting +hardware applications - for example in the fields of robotics, +aerospace, embedded systems and control systems in general. + +Main features: +- Automatic differentiation (reverse-mode) +- Generates optimized machine code +- Highly portable to new architectures +- Small Python package with minimal dependencies + +Example usage: + >>> import copapy as cp + + >>> # Define variables + >>> a = cp.value(0.25) + >>> b = cp.value(0.87) + + >>> # Define computations + >>> c = a + b * 2.0 + >>> d = c ** 2 + cp.sin(a) + >>> e = cp.sqrt(b) + + >>> # Create a target (default is local), compile and run + >>> tg = cp.Target() + >>> tg.compile(c, d, e) + >>> tg.run() + + >>> # Read the results + >>> print("Result c:", tg.read_value(c)) + >>> print("Result d:", tg.read_value(d)) + >>> print("Result e:", tg.read_value(e)) +""" + +from ._target import Target, jit from ._basic_types import NumLike, value, generic_sdb, iif from ._vectors import vector, distance, scalar_projection, angle_between, rotate_vector, vector_projection from ._matrices import matrix, identity, zeros, ones, diagonal, eye @@ -41,5 +76,6 @@ __all__ = [ "rotate_vector", "vector_projection", "grad", - "eye" + "eye", + "jit" ] diff --git a/src/copapy/_autograd.py b/src/copapy/_autograd.py index b09da2d..d17cedc 100644 --- a/src/copapy/_autograd.py +++ b/src/copapy/_autograd.py @@ -15,7 +15,8 @@ def grad(x: Any, y: Sequence[value[Any]]) -> list[unifloat]: ... def grad(x: Any, y: matrix[Any]) -> matrix[float]: ... def grad(x: Any, y: value[Any] | Sequence[value[Any]] | vector[Any] | matrix[Any]) -> Any: """Returns the partial derivative dx/dy where x needs to be a scalar - and y might be a scalar, a list of scalars, a vector or matrix. + and y might be a scalar, a list of scalars, a vector or matrix. It + uses automatic differentiation in reverse-mode. Arguments: x: Value to return derivative of @@ -34,23 +35,23 @@ def grad(x: Any, y: value[Any] | Sequence[value[Any]] | vector[Any] | matrix[Any assert isinstance(y, Sequence) or isinstance(y, vector) y_set = {v for v in y} - edges = cpb.get_all_dag_edges_between([x.source], (net.source for net in y_set if isinstance(net, Net))) + edges = cpb.get_all_dag_edges_between([x.net.source], (v.net.source for v in y_set if isinstance(v, value))) ordered_ops = cpb.stable_toposort(edges) net_lookup = {net.source: net for node in ordered_ops for net in node.args} grad_dict: dict[Net, unifloat] = dict() def add_grad(val: value[Any], gradient_value: unifloat) -> None: - grad_dict[val] = grad_dict.get(val, 0.0) + gradient_value + grad_dict[val.net] = grad_dict.get(val.net, 0.0) + gradient_value for node in reversed(ordered_ops): #print(f"--> {'x' if node in net_lookup else ' '}", node, f"{net_lookup.get(node)}") if node.args: - args: Sequence[Any] = list(node.args) - g = 1.0 if node is x.source else grad_dict[net_lookup[node]] + args: Sequence[Net] = list(node.args) + g = 1.0 if node is x.net.source else grad_dict[net_lookup[node]] opn = node.name.split('_')[0] - a: value[Any] = args[0] - b: value[Any] = args[1] if len(args) > 1 else a + a: value[float] = value(args[0]) + b: value[float] = value(args[1]) if len(args) > 1 else a if opn in ['ge', 'gt', 'eq', 'ne', 'floordiv', 'bwand', 'bwor', 'bwxor']: pass # Derivative is 0 for all ops returning integers @@ -118,9 +119,9 @@ def grad(x: Any, y: value[Any] | Sequence[value[Any]] | vector[Any] | matrix[Any raise ValueError(f"Operation {opn} not yet supported for auto diff.") if isinstance(y, value): - return grad_dict[y] + return grad_dict[y.net] if isinstance(y, vector): - return vector(grad_dict[yi] if isinstance(yi, value) else 0.0 for yi in y) + return vector(grad_dict[yi.net] if isinstance(yi, value) else 0.0 for yi in y) if isinstance(y, matrix): - return matrix((grad_dict[yi] if isinstance(yi, value) else 0.0 for yi in row) for row in y) - return [grad_dict[yi] for yi in y] + return matrix((grad_dict[yi.net] if isinstance(yi, value) else 0.0 for yi in row) for row in y) + return [grad_dict[yi.net] for yi in y] diff --git a/src/copapy/_basic_types.py b/src/copapy/_basic_types.py index ec9b9ac..fa5f0cf 100644 --- a/src/copapy/_basic_types.py +++ b/src/copapy/_basic_types.py @@ -56,14 +56,6 @@ class Node: def __repr__(self) -> str: return f"Node:{self.name}({', '.join(str(a) for a in self.args) if self.args else (self.value if isinstance(self, CPConstant) else '')})" - def get_node_hash(self, commutative: bool = False) -> int: - if commutative: - return hash(self.name) ^ hash(frozenset(a.source.node_hash for a in self.args)) - return hash(self.name) ^ hash(tuple(a.source.node_hash for a in self.args)) - - def __hash__(self) -> int: - return self.node_hash - class Net: """A Net represents a scalar type in the computation graph - or more generally it @@ -76,44 +68,62 @@ class Net: def __init__(self, dtype: str, source: Node): self.dtype = dtype self.source = source - self.volatile = False def __repr__(self) -> str: names = get_var_name(self) - return f"{'name:' + names[0] if names else 'id:' + str(hash(self))[-5:]}" + return f"{'name:' + names[0] if names else 'h:' + str(hash(self))[-5:]}" def __hash__(self) -> int: return self.source.node_hash + + def __eq__(self, other: object) -> bool: + return isinstance(other, Net) and self.source == other.source -class value(Generic[TNum], Net): +class value(Generic[TNum]): """A "value" represents a typed scalar variable. It supports arithmetic and comparison operations. Attributes: dtype (str): Data type of this value. """ - def __init__(self, source: TNum | Node, dtype: str | None = None, volatile: bool = True): + def __init__(self, source: TNum | Net, dtype: str | None = None): """Instance a value. - Args: - source: A numeric value or Node object. - dtype: Data type of this value. Required if source is a Node. + Arguments: + dtype: Data type of this value. + net: Reference to the underlying Net in the graph """ - if isinstance(source, Node): - self.source = source - assert dtype, 'For source type Node a dtype argument is required.' + if isinstance(source, Net): + self.net: Net = source + if dtype: + assert transl_type(dtype) == source.dtype, f"Type of Net ({source.dtype}) does not match {dtype}" + self.dtype: str = dtype + else: + self.dtype = source.dtype + elif dtype == 'int' or dtype == 'bool': + new_node = CPConstant(int(source), False) + self.net = Net(new_node.dtype, new_node) self.dtype = dtype - elif isinstance(source, float): - self.source = CPConstant(source) - self.dtype = 'float' - elif isinstance(source, bool): - self.source = CPConstant(source) - self.dtype = 'bool' + elif dtype == 'float': + new_node = CPConstant(float(source), False) + self.net = Net(new_node.dtype, new_node) + self.dtype = dtype + elif dtype is None: + if isinstance(source, bool): + new_node = CPConstant(source, False) + self.net = Net(new_node.dtype, new_node) + self.dtype = 'bool' + else: + new_node = CPConstant(source, False) + self.net = Net(new_node.dtype, new_node) + self.dtype = new_node.dtype else: - self.source = CPConstant(source) - self.dtype = 'int' - self.volatile = volatile + raise ValueError('Unknown type: {dtype}') + + def __repr__(self) -> str: + names = get_var_name(self) + return f"{'name:' + names[0] if names else 'h:' + str(self.net.source.node_hash)[-5:]}" @overload def __add__(self: 'value[TNum]', other: 'value[TNum] | TNum') -> 'value[TNum]': ... @@ -220,34 +230,31 @@ class value(Generic[TNum], Net): def __rfloordiv__(self, other: NumLike) -> Any: return add_op('floordiv', [other, self]) + def __abs__(self: TCPNum) -> TCPNum: + return cp.abs(self) # type: ignore + def __neg__(self: TCPNum) -> TCPNum: if self.dtype == 'float': - return cast(TCPNum, add_op('sub', [value(0.0, volatile=False), self])) - return cast(TCPNum, add_op('sub', [value(0, volatile=False), self])) + return cast(TCPNum, add_op('sub', [value(0.0), self])) + return cast(TCPNum, add_op('sub', [value(0), self])) def __gt__(self, other: TVarNumb) -> 'value[int]': - ret = add_op('gt', [self, other]) - return value(ret.source, dtype='bool', volatile=False) + return add_op('gt', [self, other], dtype='bool') def __lt__(self, other: TVarNumb) -> 'value[int]': - ret = add_op('gt', [other, self]) - return value(ret.source, dtype='bool', volatile=False) + return add_op('gt', [other, self], dtype='bool') def __ge__(self, other: TVarNumb) -> 'value[int]': - ret = add_op('ge', [self, other]) - return value(ret.source, dtype='bool', volatile=False) + return add_op('ge', [self, other], dtype='bool') def __le__(self, other: TVarNumb) -> 'value[int]': - ret = add_op('ge', [other, self]) - return value(ret.source, dtype='bool', volatile=False) + return add_op('ge', [other, self], dtype='bool') def __eq__(self, other: TVarNumb) -> 'value[int]': # type: ignore - ret = add_op('eq', [self, other], True) - return value(ret.source, dtype='bool', volatile=False) + return add_op('eq', [self, other], True, dtype='bool') def __ne__(self, other: TVarNumb) -> 'value[int]': # type: ignore - ret = add_op('ne', [self, other], True) - return value(ret.source, dtype='bool', volatile=False) + return add_op('ne', [self, other], True, dtype='bool') @overload def __mod__(self: 'value[TNum]', other: 'value[TNum] | TNum') -> 'value[TNum]': ... @@ -294,7 +301,7 @@ class value(Generic[TNum], Net): return cp.pow(other, self) def __hash__(self) -> int: - return super().__hash__() + return id(self) # Bitwise and shift operations for cp[int] def __lshift__(self, other: uniint) -> 'value[int]': @@ -329,16 +336,37 @@ class value(Generic[TNum], Net): class CPConstant(Node): - def __init__(self, value: int | float): - self.dtype, self.value = _get_data_and_dtype(value) + def __init__(self, value: Any, anonymous: bool = True): + if isinstance(value, int): + self.value: int | float = value + self.dtype = 'int' + elif isinstance(value, float): + self.value = value + self.dtype = 'float' + else: + raise ValueError(f'Non supported data type: {type(value).__name__}') + self.name = 'const_' + self.dtype self.args = tuple() - self.node_hash = id(self) + self.node_hash = hash(value) ^ hash(self.dtype) if anonymous else id(self) + self.anonymous = anonymous + + def __eq__(self, other: object) -> bool: + return (self is other) or (self.anonymous and + isinstance(other, CPConstant) and + other.anonymous and + self.value == other.value and + self.dtype == other.dtype) + + def __hash__(self) -> int: + return self.node_hash class Write(Node): - def __init__(self, input: Net | int | float): - if isinstance(input, Net): + def __init__(self, input: value[Any] | Net | int | float): + if isinstance(input, value): + net = input.net + elif isinstance(input, Net): net = input else: node = CPConstant(input) @@ -351,15 +379,64 @@ class Write(Node): class Op(Node): def __init__(self, typed_op_name: str, args: Sequence[Net], commutative: bool = False): - assert not args or any(isinstance(t, Net) for t in args), 'args parameter must be of type list[Net]' self.name: str = typed_op_name self.args: tuple[Net, ...] = tuple(args) self.node_hash = self.get_node_hash(commutative) + self.commutative = commutative + + def get_node_hash(self, commutative: bool = False) -> int: + if commutative: + h = hash(self.name) ^ hash(frozenset(a.source.node_hash for a in self.args)) + else: + h = hash(self.name) ^ hash(tuple(a.source.node_hash for a in self.args)) + return h if h != -1 else -2 + + def __eq__(self, other: object) -> bool: + if self is other: + return True + if not isinstance(other, Op): + return NotImplemented + + # Traverse graph for both notes. Return false on first difference. + # A false inequality result in seldom cases is ok, whereas a false + # equality result leads to wrong computation results. + nodes: list[tuple[Node, Node]] = [(self, other)] + seen: set[tuple[int, int]] = set() + while(nodes): + s_node, o_node = nodes.pop() + + if s_node.node_hash != o_node.node_hash: + return False + key = (id(s_node), id(o_node)) + if key in seen: + continue + if isinstance(s_node, Op): + if (s_node.name.split('_')[0] != o_node.name.split('_')[0] or + len(o_node.args) != len(s_node.args)): + return False + if s_node.commutative: + for s_net, o_net in zip(sorted(s_node.args, key=hash), + sorted(o_node.args, key=hash)): + if s_net is not o_net: + nodes.append((s_net.source, o_net.source)) + else: + for s_net, o_net in zip(s_node.args, o_node.args): + if s_net is not o_net: + nodes.append((s_net.source, o_net.source)) + elif s_node != o_node: + return False + seen.add(key) + return True + + def __hash__(self) -> int: + return self.node_hash -def net_from_value(val: Any) -> value[Any]: - vi = CPConstant(val) - return value(vi, vi.dtype, False) +def value_from_number(val: Any) -> value[Any]: + # Create anonymous constant that can be removed during optimization + new_node = CPConstant(val) + new_net = Net(new_node.dtype, new_node) + return value(new_net) @overload @@ -375,34 +452,38 @@ def iif(expression: float | int, true_result: value[TNum], false_result: TNum | @overload def iif(expression: float | int | value[Any], true_result: TNum | value[TNum], false_result: TNum | value[TNum]) -> value[TNum] | TNum: ... def iif(expression: Any, true_result: Any, false_result: Any) -> Any: + """Inline if-else operation. Returns true_result if expression is non-zero, + else returns false_result. + + Arguments: + expression: The condition to evaluate. + true_result: The result if expression is non-zero. + false_result: The result if expression is zero. + + Returns: + The selected result based on the evaluation of expression. + """ allowed_type = (value, int, float) assert isinstance(true_result, allowed_type) and isinstance(false_result, allowed_type), "Result type not supported" return (expression != 0) * true_result + (expression == 0) * false_result -def add_op(op: str, args: list[value[Any] | int | float], commutative: bool = False) -> value[Any]: - arg_nets = [a if isinstance(a, Net) else net_from_value(a) for a in args] +def add_op(op: str, args: list[value[Any] | int | float], commutative: bool = False, dtype: str | None = None) -> value[Any]: + arg_values = [a if isinstance(a, value) else value_from_number(a) for a in args] if commutative: - arg_nets = sorted(arg_nets, key=lambda a: a.dtype) # TODO: update the stencil generator to generate only sorted order + arg_values = sorted(arg_values, key=lambda a: a.dtype) # TODO: update the stencil generator to generate only sorted order - typed_op = '_'.join([op] + [transl_type(a.dtype) for a in arg_nets]) + typed_op = '_'.join([op] + [transl_type(a.dtype) for a in arg_values]) if typed_op not in generic_sdb.stencil_definitions: - raise NotImplementedError(f"Operation {op} not implemented for {' and '.join([a.dtype for a in arg_nets])}") + raise NotImplementedError(f"Operation {op} not implemented for {' and '.join([a.dtype for a in arg_values])}") result_type = generic_sdb.stencil_definitions[typed_op].split('_')[0] - if result_type == 'float': - return value[float](Op(typed_op, arg_nets, commutative), result_type) - else: - return value[int](Op(typed_op, arg_nets, commutative), result_type) + result_net = Net(result_type, Op(typed_op, [av.net for av in arg_values], commutative)) + if dtype: + result_type = dtype -def _get_data_and_dtype(value: Any) -> tuple[str, float | int]: - if isinstance(value, int): - return ('int', int(value)) - elif isinstance(value, float): - return ('float', float(value)) - else: - raise ValueError(f'Non supported data type: {type(value).__name__}') + return value(result_net, result_type) diff --git a/src/copapy/_compiler.py b/src/copapy/_compiler.py index fbb9125..16676c6 100644 --- a/src/copapy/_compiler.py +++ b/src/copapy/_compiler.py @@ -102,11 +102,19 @@ def get_all_dag_edges(nodes: Iterable[Node]) -> Generator[tuple[Node, Node], Non Tuples of (source_node, target_node) representing edges in the DAG """ emitted_edges: set[tuple[Node, Node]] = set() + used_nets: dict[Net, Net] = {} node_list: list[Node] = [n for n in nodes] while(node_list): node = node_list.pop() for net in node.args: + + # In case there is already net with equivalent value use this + if net in used_nets: + net = used_nets[net] + else: + used_nets[net] = net + edge = (net.source, node) if edge not in emitted_edges: yield edge @@ -213,6 +221,8 @@ def get_nets(*inputs: Iterable[Iterable[Any]]) -> list[Net]: for net in el: if isinstance(net, Net): nets.add(net) + else: + assert net is None or isinstance(net, Node), net return list(nets) @@ -300,6 +310,14 @@ def get_aux_func_layout(function_names: Iterable[str], sdb: stencil_database, of def get_dag_stats(node_list: Iterable[Node | Net]) -> dict[str, int]: + """Get operation statistics for the DAG identified by provided end nodes + + Arguments: + node_list: List of end nodes of the DAG + + Returns: + Dictionary of operation name to occurrence count + """ edges = get_all_dag_edges(n.source if isinstance(n, Net) else n for n in node_list) ops = {node for node, _ in edges} @@ -335,7 +353,7 @@ def compile_to_dag(node_list: Iterable[Node], sdb: stencil_database) -> tuple[bi dw.write_com(binw.Command.FREE_MEMORY) # Get all nets/variables associated with heap memory - variable_list = get_nets([[const_net_list]], extended_output_ops) + variable_list = get_nets([const_net_list], extended_output_ops) stencil_names = {node.name for _, node in extended_output_ops} aux_function_names = sdb.get_sub_functions(stencil_names) diff --git a/src/copapy/_math.py b/src/copapy/_math.py index 3303ce1..facd90c 100644 --- a/src/copapy/_math.py +++ b/src/copapy/_math.py @@ -79,10 +79,10 @@ def pow(x: VecNumLike, y: VecNumLike) -> Any: for _ in range(y - 1): m *= x return m - if y == -1: - return 1 / x if isinstance(x, value) or isinstance(y, value): return add_op('pow', [x, y]) + elif y == -1: + return 1 / x else: return float(x ** y) @@ -280,7 +280,6 @@ def get_42(x: NumLike) -> value[float] | float: return float((int(x) * 3.0 + 42.0) * 5.0 + 21.0) -#TODO: Add vector support @overload def abs(x: U) -> U: ... @overload @@ -296,9 +295,11 @@ def abs(x: U | value[U] | vector[U]) -> Any: Returns: Absolute value of x """ - #tt = -x * (x < 0) - ret = (x < 0) * -x + (x >= 0) * x - return ret # REMpyright: ignore[reportReturnType] + if isinstance(x, value): + return add_op('abs', [x]) + if isinstance(x, vector): + return x.map(abs) + return (x < 0) * -x + (x >= 0) * x @overload diff --git a/src/copapy/_matrices.py b/src/copapy/_matrices.py index 45ab495..480bf1c 100644 --- a/src/copapy/_matrices.py +++ b/src/copapy/_matrices.py @@ -16,7 +16,7 @@ class matrix(Generic[TNum]): def __init__(self, values: Iterable[Iterable[TNum | value[TNum]]] | vector[TNum]): """Create a matrix with given values. - Args: + Arguments: values: iterable of iterable of constant values """ if isinstance(values, vector): @@ -44,7 +44,7 @@ class matrix(Generic[TNum]): def __getitem__(self, key: tuple[int, int]) -> value[TNum] | TNum: ... def __getitem__(self, key: int | tuple[int, int]) -> Any: """Get a row as a vector or a specific element. - Args: + Arguments: key: row index or (row, col) tuple Returns: @@ -83,7 +83,7 @@ class matrix(Generic[TNum]): tuple(a + other for a in row) for row in self.values ) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return matrix( tuple(a + o if isinstance(a, value) else a + other for a in row) for row in self.values @@ -117,7 +117,7 @@ class matrix(Generic[TNum]): tuple(a - other for a in row) for row in self.values ) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return matrix( tuple(a - o if isinstance(a, value) else a - other for a in row) for row in self.values @@ -140,7 +140,7 @@ class matrix(Generic[TNum]): tuple(other - a for a in row) for row in self.values ) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return matrix( tuple(o - a if isinstance(a, value) else other - a for a in row) for row in self.values @@ -168,7 +168,7 @@ class matrix(Generic[TNum]): tuple(a * other for a in row) for row in self.values ) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return matrix( tuple(a * o if isinstance(a, value) else a * other for a in row) for row in self.values @@ -195,7 +195,7 @@ class matrix(Generic[TNum]): tuple(a / other for a in row) for row in self.values ) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return matrix( tuple(a / o if isinstance(a, value) else a / other for a in row) for row in self.values @@ -214,7 +214,7 @@ class matrix(Generic[TNum]): tuple(other / a for a in row) for row in self.values ) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return matrix( tuple(o / a if isinstance(a, value) else other / a for a in row) for row in self.values @@ -305,7 +305,7 @@ class matrix(Generic[TNum]): """Convert all elements to copapy values if any element is a copapy value.""" if any(isinstance(val, value) for row in self.values for val in row): return matrix( - tuple(value(val, volatile=False) if not isinstance(val, value) else val for val in row) + tuple(value(val) if not isinstance(val, value) else val for val in row) for row in self.values ) else: diff --git a/src/copapy/_mixed.py b/src/copapy/_mixed.py index f8624ed..205955b 100644 --- a/src/copapy/_mixed.py +++ b/src/copapy/_mixed.py @@ -18,7 +18,18 @@ def mixed_sum(scalars: Iterable[int | float | value[Any]]) -> Any: def mixed_homogenize(scalars: Iterable[T | value[T]]) -> Iterable[T] | Iterable[value[T]]: + """Convert all scalars to either python numbers if there are no value types, + or to value types if there is at least one value type. + + Arguments: + scalars: Iterable of scalars which can be either + python numbers or value types. + + Returns: + Iterable of scalars homogenized to either all plain values + or all value types. + """ if any(isinstance(val, value) for val in scalars): - return (value(val, volatile=False) if not isinstance(val, value) else val for val in scalars) + return (value(val) if not isinstance(val, value) else val for val in scalars) else: return (val for val in scalars if not isinstance(val, value)) diff --git a/src/copapy/_stencils.py b/src/copapy/_stencils.py index 3d78e83..b7864c0 100644 --- a/src/copapy/_stencils.py +++ b/src/copapy/_stencils.py @@ -95,11 +95,14 @@ def get_last_call_in_function(func: pelfy.elf_symbol) -> int: # Find last relocation in function assert func.relocations, f'No call function in stencil function {func.name}.' reloc = func.relocations[-1] - # Assume the call instruction is 4 bytes long for relocations with less than 32 bit and 5 bytes otherwise - instruction_lengths = 4 if reloc.bits < 32 else 5 - address_field_length = 4 - #print(f"-> {[r.fields['r_offset'] - func.fields['st_value'] for r in func.relocations]}") - return reloc.fields['r_offset'] - func.fields['st_value'] + address_field_length - instruction_lengths + if reloc.symbol.name.startswith('dummy_'): + return -0xFFFF # Last relocation is not a jump + else: + # Assume the call instruction is 4 bytes long for relocations with less than 32 bit and 5 bytes otherwise + instruction_lengths = 4 if reloc.bits < 32 else 5 + address_field_length = 4 + #print(f"-> {[r.fields['r_offset'] - func.fields['st_value'] for r in func.relocations]}") + return reloc.fields['r_offset'] - func.fields['st_value'] + address_field_length - instruction_lengths def get_op_after_last_call_in_function(func: pelfy.elf_symbol) -> int: @@ -123,7 +126,7 @@ class stencil_database(): def __init__(self, obj_file: str | bytes): """Load the stencil database from an ELF object file - Args: + Arguments: obj_file: path to the ELF object file or bytes of the ELF object file """ if isinstance(obj_file, str): @@ -201,7 +204,7 @@ class stencil_database(): def get_patch(self, relocation: relocation_entry, symbol_address: int, function_offset: int, symbol_type: int) -> patch_entry: """Return patch positions for a provided symbol (function or object) - Args: + Arguments: relocation: relocation entry symbol_address: absolute address of the target symbol function_offset: absolute address of the first byte of the @@ -305,6 +308,12 @@ class stencil_database(): symbol_type = symbol_type + 0x04 # Absolut value scale = 0x10000 + elif pr.type.endswith('_ABS32'): + # R_ARM_ABS32 + # S + A (replaces full 32 bit) + patch_value = symbol_address + pr.fields['r_addend'] + symbol_type = symbol_type + 0x03 # Relative to data section + else: raise NotImplementedError(f"Relocation type {pr.type} in {relocation.pelfy_reloc.target_section.name} pointing to {relocation.pelfy_reloc.symbol.name} not implemented") @@ -313,7 +322,7 @@ class stencil_database(): def get_stencil_code(self, name: str) -> bytes: """Return the striped function code for a provided function name - Args: + Arguments: name: function name Returns: @@ -333,7 +342,7 @@ class stencil_database(): def get_sub_functions(self, names: Iterable[str]) -> set[str]: """Return recursively all functions called by stencils or by other functions - Args: + Arguments: names: function or stencil names Returns: @@ -384,7 +393,7 @@ class stencil_database(): def get_function_code(self, name: str, part: Literal['full', 'start', 'end'] = 'full') -> bytes: """Returns machine code for a specified function name. - Args: + Arguments: name: function name part: part of the function to return ('full', 'start', 'end') diff --git a/src/copapy/_target.py b/src/copapy/_target.py index be0e6a1..8f1866f 100644 --- a/src/copapy/_target.py +++ b/src/copapy/_target.py @@ -1,12 +1,17 @@ -from typing import Iterable, overload, TypeVar, Any +from typing import Iterable, overload, TypeVar, Any, Callable, TypeAlias from . import _binwrite as binw -from coparun_module import coparun, read_data_mem +from coparun_module import coparun, read_data_mem, create_target, clear_target import struct from ._basic_types import stencil_db_from_package from ._basic_types import value, Net, Node, Write, NumLike from ._compiler import compile_to_dag T = TypeVar("T", int, float) +Values: TypeAlias = 'Iterable[NumLike] | NumLike' +ArgType: TypeAlias = int | float | Iterable[int | float] +TRet = TypeVar("TRet", Iterable[int | float], int, float) + +_jit_cache: dict[Any, tuple['Target', tuple[value[Any] | Iterable[value[Any]], ...], NumLike | Iterable[NumLike]]] = {} def add_read_command(dw: binw.data_writer, variables: dict[Net, tuple[int, int, str]], net: Net) -> None: @@ -17,6 +22,33 @@ def add_read_command(dw: binw.data_writer, variables: dict[Net, tuple[int, int, dw.write_int(lengths) +def jit(func: Callable[..., TRet]) -> Callable[..., TRet]: + """Just-in-time compile a function for the copapy target. + + Arguments: + func: Function to compile + + Returns: + A callable that runs the compiled function. + """ + def call_helper(*args: ArgType) -> TRet: + if func in _jit_cache: + tg, inputs, out = _jit_cache[func] + for input, arg in zip(inputs, args): + tg.write_value(input, arg) + else: + tg = Target() + inputs = tuple( + tuple(value(ai) for ai in a) if isinstance(a, Iterable) else value(a) for a in args) + out = func(*inputs) + tg.compile(out) + _jit_cache[func] = (tg, inputs, out) + tg.run() + return tg.read_value(out) # type: ignore + + return call_helper + + class Target(): """Target device for compiling for and running on copapy code. """ @@ -29,26 +61,30 @@ class Target(): """ self.sdb = stencil_db_from_package(arch, optimization) self._values: dict[Net, tuple[int, int, str]] = {} + self._context = create_target() - def compile(self, *values: int | float | value[int] | value[float] | Iterable[int | float | value[int] | value[float]]) -> None: + def __del__(self) -> None: + clear_target(self._context) + + def compile(self, *values: int | float | value[Any] | Iterable[int | float | value[Any]]) -> None: """Compiles the code to compute the given values. Arguments: values: Values to compute """ nodes: list[Node] = [] - for s in values: - if isinstance(s, Iterable): - for net in s: - if isinstance(net, Net): - nodes.append(Write(net)) + for input in values: + if isinstance(input, Iterable): + for v in input: + if isinstance(v, value): + nodes.append(Write(v)) else: - if isinstance(s, Net): - nodes.append(Write(s)) + if isinstance(input, value): + nodes.append(Write(input)) dw, self._values = compile_to_dag(nodes, self.sdb) dw.write_com(binw.Command.END_COM) - assert coparun(dw.get_data()) > 0 + assert coparun(self._context, dw.get_data()) > 0 def run(self) -> None: """Runs the compiled code on the target device. @@ -56,36 +92,36 @@ class Target(): dw = binw.data_writer(self.sdb.byteorder) dw.write_com(binw.Command.RUN_PROG) dw.write_com(binw.Command.END_COM) - assert coparun(dw.get_data()) > 0 + assert coparun(self._context, dw.get_data()) > 0 @overload - def read_value(self, net: value[T]) -> T: ... + def read_value(self, variables: value[T]) -> T: ... @overload - def read_value(self, net: NumLike) -> float | int | bool: ... + def read_value(self, variables: NumLike) -> float | int | bool: ... @overload - def read_value(self, net: Iterable[T | value[T]]) -> list[T]: ... - def read_value(self, net: NumLike | value[T] | Iterable[T | value[T]]) -> Any: + def read_value(self, variables: Iterable[T | value[T]]) -> list[T]: ... + def read_value(self, variables: NumLike | value[T] | Iterable[T | value[T]]) -> Any: """Reads the numeric value of a copapy type. Arguments: - net: Values to read + variables: Variable or multiple variables to read Returns: - Numeric value + Numeric value or values """ - if isinstance(net, Iterable): - return [self.read_value(ni) if isinstance(ni, value) else ni for ni in net] + if isinstance(variables, Iterable): + return [self.read_value(ni) if isinstance(ni, value) else ni for ni in variables] - if isinstance(net, float | int): - print("Warning: value is not a copypy value") - return net + if isinstance(variables, float | int): + return variables - assert isinstance(net, Net), "Argument must be a copapy value" - assert net in self._values, f"Value {net} not found. It might not have been compiled for the target." - addr, lengths, var_type = self._values[net] + assert isinstance(variables, value), "Argument must be a copapy value" + assert variables.net in self._values, f"Value {variables} not found. It might not have been compiled for the target." + addr, lengths, _ = self._values[variables.net] + var_type = variables.dtype assert lengths > 0 - data = read_data_mem(addr, lengths) - assert data is not None and len(data) == lengths, f"Failed to read value {net}" + data = read_data_mem(self._context, addr, lengths) + assert data is not None and len(data) == lengths, f"Failed to read value {variables}" en = {'little': '<', 'big': '>'}[self.sdb.byteorder] if var_type == 'float': if lengths == 4: @@ -106,9 +142,44 @@ class Target(): return val else: raise ValueError(f"Unsupported value type: {var_type}") + + def write_value(self, variables: value[Any] | Iterable[value[Any]], data: int | float | Iterable[int | float]) -> None: + """Write to a copapy value on the target. - def read_value_remote(self, net: Net) -> None: + Arguments: + variables: Singe variable or multiple variables to overwrite + value: Singe value or multiple values to write + """ + if isinstance(variables, Iterable): + assert isinstance(data, Iterable), "If net is iterable, value must be iterable too" + for ni, vi in zip(variables, data): + self.write_value(ni, vi) + return + + assert not isinstance(data, Iterable), "If net is not iterable, value must not be iterable" + + assert isinstance(variables, value), "Argument must be a copapy value" + assert variables.net in self._values, f"Value {variables} not found. It might not have been compiled for the target." + addr, lengths, var_type = self._values[variables.net] + assert lengths > 0 + + dw = binw.data_writer(self.sdb.byteorder) + dw.write_com(binw.Command.COPY_DATA) + dw.write_int(addr) + dw.write_int(lengths) + + if var_type == 'float': + dw.write_value(float(data), lengths) + elif var_type == 'int' or var_type == 'bool': + dw.write_value(int(data), lengths) + else: + raise ValueError(f"Unsupported value type: {var_type}") + + dw.write_com(binw.Command.END_COM) + assert coparun(self._context, dw.get_data()) > 0 + + def read_value_remote(self, variable: value[Any]) -> None: """Reads the raw data of a value by the runner.""" dw = binw.data_writer(self.sdb.byteorder) - add_read_command(dw, self._values, net) - assert coparun(dw.get_data()) > 0 + add_read_command(dw, self._values, variable.net) + assert coparun(self._context, dw.get_data()) > 0 diff --git a/src/copapy/_vectors.py b/src/copapy/_vectors.py index 1295e1f..9ba1431 100644 --- a/src/copapy/_vectors.py +++ b/src/copapy/_vectors.py @@ -19,7 +19,7 @@ class vector(Generic[TNum]): def __init__(self, values: Iterable[TNum | value[TNum]]): """Create a vector with given values. - Args: + Arguments: values: iterable of constant values """ self.values: tuple[value[TNum] | TNum, ...] = tuple(values) @@ -59,7 +59,7 @@ class vector(Generic[TNum]): return vector(a + b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a + other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a + o if isinstance(a, value) else a + other for a in self.values) @overload @@ -85,7 +85,7 @@ class vector(Generic[TNum]): return vector(a - b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a - other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a - o if isinstance(a, value) else a - other for a in self.values) @overload @@ -100,7 +100,7 @@ class vector(Generic[TNum]): return vector(b - a for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(other - a for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(o - a if isinstance(a, value) else other - a for a in self.values) @overload @@ -117,7 +117,7 @@ class vector(Generic[TNum]): return vector(a * b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a * other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a * o if isinstance(a, value) else a * other for a in self.values) @overload @@ -143,7 +143,7 @@ class vector(Generic[TNum]): return vector(a ** b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a ** other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a ** o if isinstance(a, value) else a ** other for a in self.values) @overload @@ -158,7 +158,7 @@ class vector(Generic[TNum]): return vector(b ** a for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(other ** a for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(o ** a if isinstance(a, value) else other ** a for a in self.values) def __truediv__(self, other: VecNumLike) -> 'vector[float]': @@ -167,7 +167,7 @@ class vector(Generic[TNum]): return vector(a / b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a / other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a / o if isinstance(a, value) else a / other for a in self.values) def __rtruediv__(self, other: VecNumLike) -> 'vector[float]': @@ -176,7 +176,7 @@ class vector(Generic[TNum]): return vector(b / a for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(other / a for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(o / a if isinstance(a, value) else other / a for a in self.values) @overload @@ -220,7 +220,7 @@ class vector(Generic[TNum]): return vector(a > b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a > other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a > o if isinstance(a, value) else a > other for a in self.values) def __lt__(self, other: VecNumLike) -> 'vector[int]': @@ -229,7 +229,7 @@ class vector(Generic[TNum]): return vector(a < b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a < other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a < o if isinstance(a, value) else a < other for a in self.values) def __ge__(self, other: VecNumLike) -> 'vector[int]': @@ -238,7 +238,7 @@ class vector(Generic[TNum]): return vector(a >= b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a >= other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a >= o if isinstance(a, value) else a >= other for a in self.values) def __le__(self, other: VecNumLike) -> 'vector[int]': @@ -247,7 +247,7 @@ class vector(Generic[TNum]): return vector(a <= b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a <= other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a <= o if isinstance(a, value) else a <= other for a in self.values) def __eq__(self, other: VecNumLike | Sequence[int | float]) -> 'vector[int]': # type: ignore @@ -256,7 +256,7 @@ class vector(Generic[TNum]): return vector(a == b for a, b in zip(self.values, other)) if isinstance(other, value): return vector(a == other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a == o if isinstance(a, value) else a == other for a in self.values) def __ne__(self, other: VecNumLike) -> 'vector[int]': # type: ignore @@ -265,7 +265,7 @@ class vector(Generic[TNum]): return vector(a != b for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(a != other for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(a != o if isinstance(a, value) else a != other for a in self.values) @property @@ -298,7 +298,14 @@ class vector(Generic[TNum]): return self def map(self, func: Callable[[Any], value[U] | U]) -> 'vector[U]': - """Applies a function to each element of the vector and returns a new vector.""" + """Applies a function to each element of the vector and returns a new vector. + + Arguments: + func: A function that takes a single argument. + + Returns: + A new vector with the function applied to each element. + """ return vector(func(x) for x in self.values) def _map2(self, other: VecNumLike, func: Callable[[Any, Any], value[int] | value[float]]) -> 'vector[Any]': @@ -307,35 +314,75 @@ class vector(Generic[TNum]): return vector(func(a, b) for a, b in zip(self.values, other.values)) if isinstance(other, value): return vector(func(a, other) for a in self.values) - o = value(other, volatile=False) # Make sure a single constant is allocated + o = value(other) # Make sure a single constant is allocated return vector(func(a, o) if isinstance(a, value) else a + other for a in self.values) def cross_product(v1: vector[float], v2: vector[float]) -> vector[float]: - """Calculate the cross product of two 3D vectors.""" + """Calculate the cross product of two 3D vectors. + + Arguments: + v1: First 3D vector. + v2: Second 3D vector. + + Returns: + The cross product vector. + """ return v1.cross(v2) def dot_product(v1: vector[float], v2: vector[float]) -> 'float | value[float]': - """Calculate the dot product of two vectors.""" + """Calculate the dot product of two vectors. + + Arguments: + v1: First vector. + v2: Second vector. + + Returns: + The dot product. + """ return v1.dot(v2) def distance(v1: vector[float], v2: vector[float]) -> 'float | value[float]': - """Calculate the Euclidean distance between two vectors.""" + """Calculate the Euclidean distance between two vectors. + + Arguments: + v1: First vector. + v2: Second vector. + + Returns: + The Euclidean distance. + """ diff = v1 - v2 return diff.magnitude() def scalar_projection(v1: vector[float], v2: vector[float]) -> 'float | value[float]': - """Calculate the scalar projection of v1 onto v2.""" + """Calculate the scalar projection of v1 onto v2. + + Arguments: + v1: First vector. + v2: Second vector. + + Returns: + The scalar projection. + """ dot_prod = v1.dot(v2) mag_v2 = v2.magnitude() + epsilon return dot_prod / mag_v2 def vector_projection(v1: vector[float], v2: vector[float]) -> vector[float]: - """Calculate the vector projection of v1 onto v2.""" + """Calculate the vector projection of v1 onto v2. + + Arguments: + v1: First vector. + v2: Second vector. + + Returns: + The projected vector. + """ dot_prod = v1.dot(v2) mag_v2_squared = v2.magnitude() ** 2 + epsilon scalar_proj = dot_prod / mag_v2_squared @@ -343,7 +390,15 @@ def vector_projection(v1: vector[float], v2: vector[float]) -> vector[float]: def angle_between(v1: vector[float], v2: vector[float]) -> 'float | value[float]': - """Calculate the angle in radians between two vectors.""" + """Calculate the angle in radians between two vectors. + + Arguments: + v1: First vector. + v2: Second vector. + + Returns: + The angle in radians. + """ dot_prod = v1.dot(v2) mag_v1 = v1.magnitude() mag_v2 = v2.magnitude() @@ -352,7 +407,16 @@ def angle_between(v1: vector[float], v2: vector[float]) -> 'float | value[float] def rotate_vector(v: vector[float], axis: vector[float], angle: 'float | value[float]') -> vector[float]: - """Rotate vector v around a given axis by a specified angle using Rodrigues' rotation formula.""" + """Rotate vector v around a given axis by a specified angle using Rodrigues' rotation formula. + + Arguments: + v: The 3D vector to be rotated. + axis: A 3D vector defining the axis of rotation. + angle: The angle of rotation in radians. + + Returns: + The rotated vector. + """ k = axis.normalize() cos_angle = cp.cos(angle) sin_angle = cp.sin(angle) diff --git a/src/copapy/backend.py b/src/copapy/backend.py index f494d1d..4ff6109 100644 --- a/src/copapy/backend.py +++ b/src/copapy/backend.py @@ -1,3 +1,8 @@ +""" +Backend module for Copapy: contains internal data types +and give access to compiler internals and debugging tools. +""" + from ._target import add_read_command from ._basic_types import Net, Op, Node, CPConstant, Write, stencil_db_from_package from ._compiler import compile_to_dag, \ diff --git a/src/copapy/filters.py b/src/copapy/filters.py index 4cbdc40..1434019 100644 --- a/src/copapy/filters.py +++ b/src/copapy/filters.py @@ -22,7 +22,7 @@ def argsort(input_vector: vector[TNum]) -> vector[int]: Perform an indirect sort. It returns an array of indices that index data in sorted order. - Args: + Arguments: input_vector: The input vector containing numerical values. Returns: @@ -35,7 +35,7 @@ def median(input_vector: vector[TNum]) -> TNum | value[TNum]: """ Applies a median filter to the input vector and returns the median as a unifloat. - Args: + Arguments: input_vector: The input vector containing numerical values. Returns: @@ -56,7 +56,7 @@ def mean(input_vector: vector[Any]) -> unifloat: """ Applies a mean filter to the input vector and returns the mean as a unifloat. - Args: + Arguments: input_vector (vector): The input vector containing numerical values. Returns: diff --git a/src/coparun/coparun.c b/src/coparun/coparun.c index 9e77fe6..45da6d3 100644 --- a/src/coparun/coparun.c +++ b/src/coparun/coparun.c @@ -45,7 +45,15 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } - int ret = parse_commands(file_buff); + runmem_t targ; + targ.executable_memory_len = 0; + targ.data_memory_len = 0; + targ.executable_memory = NULL; + targ.data_memory = NULL; + targ.entr_point = NULL; + targ.data_offs = 0; + + int ret = parse_commands(&targ, file_buff); if (ret == 2) { /* Dump code for debugging */ @@ -54,11 +62,11 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } f = fopen(argv[2], "wb"); - fwrite(executable_memory, 1, (size_t)executable_memory_len, f); + fwrite(targ.executable_memory, 1, (size_t)targ.executable_memory_len, f); fclose(f); } - free_memory(); + free_memory(&targ); return ret < 0; } diff --git a/src/coparun/coparun_module.c b/src/coparun/coparun_module.c index 4f33679..bee0dfb 100644 --- a/src/coparun/coparun_module.c +++ b/src/coparun/coparun_module.c @@ -1,30 +1,41 @@ #define PY_SSIZE_T_CLEAN #include #include "runmem.h" +#include static PyObject* coparun(PyObject* self, PyObject* args) { + PyObject *handle_obj; const char *buf; Py_ssize_t buf_len; int result; - if (!PyArg_ParseTuple(args, "y#", &buf, &buf_len)) { + // Expect: handle, bytes + if (!PyArg_ParseTuple(args, "Oy#", &handle_obj, &buf, &buf_len)) { return NULL; /* TypeError set by PyArg_ParseTuple */ } + void *ptr = PyLong_AsVoidPtr(handle_obj); + if (!ptr) { + PyErr_SetString(PyExc_ValueError, "Invalid context handle"); + return NULL; + } + runmem_t *context = (runmem_t*)ptr; + /* If parse_commands may run for a long time, release the GIL. */ Py_BEGIN_ALLOW_THREADS - result = parse_commands((uint8_t*)buf); + result = parse_commands(context, (uint8_t*)buf); Py_END_ALLOW_THREADS return PyLong_FromLong(result); } static PyObject* read_data_mem(PyObject* self, PyObject* args) { + PyObject *handle_obj; unsigned long rel_addr; unsigned long length; - // Parse arguments: unsigned long (relative address), Py_ssize_t (length) - if (!PyArg_ParseTuple(args, "nn", &rel_addr, &length)) { + // Expect: handle, rel_addr, length + if (!PyArg_ParseTuple(args, "Onn", &handle_obj, &rel_addr, &length)) { return NULL; } @@ -33,9 +44,21 @@ static PyObject* read_data_mem(PyObject* self, PyObject* args) { return NULL; } - const char *ptr = (const char *)(data_memory + rel_addr); + void *ptr = PyLong_AsVoidPtr(handle_obj); + if (!ptr) { + PyErr_SetString(PyExc_ValueError, "Invalid context handle"); + return NULL; + } + runmem_t *context = (runmem_t*)ptr; - PyObject *result = PyBytes_FromStringAndSize(ptr, length); + if (!context->data_memory || rel_addr + length > context->data_memory_len) { + PyErr_SetString(PyExc_ValueError, "Read out of bounds"); + return NULL; + } + + const char *data_ptr = (const char *)(context->data_memory + rel_addr); + + PyObject *result = PyBytes_FromStringAndSize(data_ptr, length); if (!result) { return PyErr_NoMemory(); } @@ -43,9 +66,36 @@ static PyObject* read_data_mem(PyObject* self, PyObject* args) { return result; } +static PyObject* create_target(PyObject* self, PyObject* args) { + runmem_t *context = (runmem_t*)calloc(1, sizeof(runmem_t)); + if (!context) { + return PyErr_NoMemory(); + } + // Return the pointer as a Python integer (handle) + return PyLong_FromVoidPtr((void*)context); +} + +static PyObject* clear_target(PyObject* self, PyObject* args) { + PyObject *handle_obj; + if (!PyArg_ParseTuple(args, "O", &handle_obj)) { + return NULL; + } + void *ptr = PyLong_AsVoidPtr(handle_obj); + if (!ptr) { + PyErr_SetString(PyExc_ValueError, "Invalid handle"); + return NULL; + } + runmem_t *context = (runmem_t*)ptr; + free_memory(context); + free(context); + Py_RETURN_NONE; +} + static PyMethodDef MyMethods[] = { {"coparun", coparun, METH_VARARGS, "Pass raw command data to coparun"}, {"read_data_mem", read_data_mem, METH_VARARGS, "Read memory and return as bytes"}, + {"create_target", create_target, METH_NOARGS, "Create and return a handle to a zero-initialized runmem_t struct"}, + {"clear_target", clear_target, METH_VARARGS, "Free all memory associated with the given target handle"}, {NULL, NULL, 0, NULL} }; diff --git a/src/coparun/runmem.c b/src/coparun/runmem.c index 7e0762e..1165956 100644 --- a/src/coparun/runmem.c +++ b/src/coparun/runmem.c @@ -5,14 +5,6 @@ #include "runmem.h" #include "mem_man.h" -/* Globals declared extern in runmem.h */ -uint8_t *data_memory = NULL; -uint32_t data_memory_len = 0; -uint8_t *executable_memory = NULL; -uint32_t executable_memory_len = 0; -entry_point_t entr_point = NULL; -int data_offs = 0; - void patch(uint8_t *patch_addr, uint32_t patch_mask, int32_t value) { uint32_t *val_ptr = (uint32_t*)patch_addr; uint32_t original = *val_ptr; @@ -58,23 +50,25 @@ void patch_arm32_abs(uint8_t *patch_addr, uint32_t imm16) *((uint32_t *)patch_addr) = instr; } -void free_memory() { - deallocate_memory(executable_memory, executable_memory_len); - deallocate_memory(data_memory, data_memory_len); - executable_memory_len = 0; - data_memory_len = 0; +void free_memory(runmem_t *context) { + deallocate_memory(context->executable_memory, context->executable_memory_len); + deallocate_memory(context->data_memory, context->data_memory_len); + context->executable_memory_len = 0; + context->data_memory_len = 0; + context->executable_memory = NULL; + context->data_memory = NULL; + context->entr_point = NULL; + context->data_offs = 0; } -int update_data_offs() { - if (data_memory && executable_memory && (data_memory - executable_memory > 0x7FFFFFFF || executable_memory - data_memory > 0x7FFFFFFF)) { +int update_data_offs(runmem_t *context) { + if (context->data_memory && context->executable_memory && + (context->data_memory - context->executable_memory > 0x7FFFFFFF || + context->executable_memory - context->data_memory > 0x7FFFFFFF)) { perror("Error: code and data memory to far apart"); return 0; } - if (data_memory && executable_memory && (data_memory - executable_memory > 0x7FFFFFFF || executable_memory - data_memory > 0x7FFFFFFF)) { - perror("Error: code and data memory to far apart"); - return 0; - } - data_offs = (int)(data_memory - executable_memory); + context->data_offs = (int)(context->data_memory - context->executable_memory); return 1; } @@ -82,7 +76,7 @@ int floor_div(int a, int b) { return a / b - ((a % b != 0) && ((a < 0) != (b < 0))); } -int parse_commands(uint8_t *bytes) { +int parse_commands(runmem_t *context, uint8_t *bytes) { int32_t value; uint32_t command; uint32_t patch_mask; @@ -98,33 +92,32 @@ int parse_commands(uint8_t *bytes) { switch(command) { case ALLOCATE_DATA: size = *(uint32_t*)bytes; bytes += 4; - data_memory = allocate_data_memory(size); - data_memory_len = size; - LOG("ALLOCATE_DATA size=%i mem_addr=%p\n", size, (void*)data_memory); - if (!update_data_offs()) end_flag = -4; + context->data_memory = allocate_data_memory(size); + context->data_memory_len = size; + LOG("ALLOCATE_DATA size=%i mem_addr=%p\n", size, (void*)context->data_memory); + if (!update_data_offs(context)) end_flag = -4; break; case COPY_DATA: offs = *(uint32_t*)bytes; bytes += 4; size = *(uint32_t*)bytes; bytes += 4; LOG("COPY_DATA offs=%i size=%i\n", offs, size); - memcpy(data_memory + offs, bytes, size); bytes += size; + memcpy(context->data_memory + offs, bytes, size); bytes += size; break; case ALLOCATE_CODE: size = *(uint32_t*)bytes; bytes += 4; - executable_memory = allocate_executable_memory(size); - executable_memory_len = size; - LOG("ALLOCATE_CODE size=%i mem_addr=%p\n", size, (void*)executable_memory); - //LOG("# d %i c %i off %i\n", data_memory, executable_memory, data_offs); - if (!update_data_offs()) end_flag = -4; + context->executable_memory = allocate_executable_memory(size); + context->executable_memory_len = size; + LOG("ALLOCATE_CODE size=%i mem_addr=%p\n", size, (void*)context->executable_memory); + if (!update_data_offs(context)) end_flag = -4; break; case COPY_CODE: offs = *(uint32_t*)bytes; bytes += 4; size = *(uint32_t*)bytes; bytes += 4; LOG("COPY_CODE offs=%i size=%i\n", offs, size); - memcpy(executable_memory + offs, bytes, size); bytes += size; + memcpy(context->executable_memory + offs, bytes, size); bytes += size; break; case PATCH_FUNC: @@ -134,7 +127,7 @@ int parse_commands(uint8_t *bytes) { value = *(int32_t*)bytes; bytes += 4; LOG("PATCH_FUNC patch_offs=%i patch_mask=%#08x scale=%i value=%i\n", offs, patch_mask, patch_scale, value); - patch(executable_memory + offs, patch_mask, value / patch_scale); + patch(context->executable_memory + offs, patch_mask, value / patch_scale); break; case PATCH_OBJECT: @@ -144,7 +137,7 @@ int parse_commands(uint8_t *bytes) { value = *(int32_t*)bytes; bytes += 4; LOG("PATCH_OBJECT patch_offs=%i patch_mask=%#08x scale=%i value=%i\n", offs, patch_mask, patch_scale, value); - patch(executable_memory + offs, patch_mask, value / patch_scale + data_offs / patch_scale); + patch(context->executable_memory + offs, patch_mask, value / patch_scale + context->data_offs / patch_scale); break; case PATCH_OBJECT_ABS: @@ -154,7 +147,7 @@ int parse_commands(uint8_t *bytes) { value = *(int32_t*)bytes; bytes += 4; LOG("PATCH_OBJECT_ABS patch_offs=%i patch_mask=%#08x scale=%i value=%i\n", offs, patch_mask, patch_scale, value); - patch(executable_memory + offs, patch_mask, value / patch_scale); + patch(context->executable_memory + offs, patch_mask, value / patch_scale); break; case PATCH_OBJECT_REL: @@ -163,8 +156,8 @@ int parse_commands(uint8_t *bytes) { patch_scale = *(int32_t*)bytes; bytes += 4; value = *(int32_t*)bytes; bytes += 4; LOG("PATCH_OBJECT_REL patch_offs=%i patch_addr=%p scale=%i value=%i\n", - offs, (void*)(data_memory + value), patch_scale, value); - *(void **)(executable_memory + offs) = data_memory + value; // / patch_scale; + offs, (void*)(context->data_memory + value), patch_scale, value); + *(void **)(context->executable_memory + offs) = context->data_memory + value; break; case PATCH_OBJECT_HI21: @@ -173,8 +166,8 @@ int parse_commands(uint8_t *bytes) { patch_scale = *(int32_t*)bytes; bytes += 4; value = *(int32_t*)bytes; bytes += 4; LOG("PATCH_OBJECT_HI21 patch_offs=%i scale=%i value=%i res_value=%i\n", - offs, patch_scale, value, floor_div(data_offs + value, patch_scale) - (int32_t)offs / patch_scale); - patch_hi21(executable_memory + offs, floor_div(data_offs + value, patch_scale) - (int32_t)offs / patch_scale); + offs, patch_scale, value, floor_div(context->data_offs + value, patch_scale) - (int32_t)offs / patch_scale); + patch_hi21(context->executable_memory + offs, floor_div(context->data_offs + value, patch_scale) - (int32_t)offs / patch_scale); break; case PATCH_OBJECT_ARM32_ABS: @@ -183,21 +176,24 @@ int parse_commands(uint8_t *bytes) { patch_scale = *(int32_t*)bytes; bytes += 4; value = *(int32_t*)bytes; bytes += 4; LOG("PATCH_OBJECT_ARM32_ABS patch_offs=%i patch_mask=%#08x scale=%i value=%i imm16=%#04x\n", - offs, patch_mask, patch_scale, value, (uint32_t)((uintptr_t)(data_memory + value) & patch_mask) / (uint32_t)patch_scale); - patch_arm32_abs(executable_memory + offs, (uint32_t)((uintptr_t)(data_memory + value) & patch_mask) / (uint32_t)patch_scale); + offs, patch_mask, patch_scale, value, (uint32_t)((uintptr_t)(context->data_memory + value) & patch_mask) / (uint32_t)patch_scale); + patch_arm32_abs(context->executable_memory + offs, (uint32_t)((uintptr_t)(context->data_memory + value) & patch_mask) / (uint32_t)patch_scale); break; case ENTRY_POINT: rel_entr_point = *(uint32_t*)bytes; bytes += 4; - entr_point = (entry_point_t)(executable_memory + rel_entr_point); + context->entr_point = (entry_point_t)(context->executable_memory + rel_entr_point); LOG("ENTRY_POINT rel_entr_point=%i\n", rel_entr_point); - mark_mem_executable(executable_memory, executable_memory_len); + mark_mem_executable(context->executable_memory, context->executable_memory_len); break; case RUN_PROG: LOG("RUN_PROG\n"); - int ret = entr_point(); - BLOG("Return value: %i\n", ret); + { + int ret = context->entr_point(); + (void)ret; + BLOG("Return value: %i\n", ret); + } break; case READ_DATA: @@ -205,14 +201,14 @@ int parse_commands(uint8_t *bytes) { size = *(uint32_t*)bytes; bytes += 4; BLOG("READ_DATA offs=%i size=%i data=", offs, size); for (uint32_t i = 0; i < size; i++) { - printf("%02X ", data_memory[offs + i]); + printf("%02X ", context->data_memory[offs + i]); } printf("\n"); break; case FREE_MEMORY: LOG("FREE_MENORY\n"); - free_memory(); + free_memory(context); break; case DUMP_CODE: diff --git a/src/coparun/runmem.h b/src/coparun/runmem.h index 7f11dde..31e9905 100644 --- a/src/coparun/runmem.h +++ b/src/coparun/runmem.h @@ -32,23 +32,24 @@ #define FREE_MEMORY 257 #define DUMP_CODE 258 -/* Memory blobs accessible by other translation units */ -extern uint8_t *data_memory; -extern uint32_t data_memory_len; -extern uint8_t *executable_memory; -extern uint32_t executable_memory_len; -extern int data_offs; - -/* Entry point type and variable */ +/* Entry point type */ typedef int (*entry_point_t)(void); -extern entry_point_t entr_point; +/* Struct for run-time memory state */ +typedef struct runmem_s { + uint8_t *data_memory; // Pointer to data memory + uint32_t data_memory_len; // Length of data memory + uint8_t *executable_memory; // Pointer to executable memory + uint32_t executable_memory_len; // Length of executable memory + int data_offs; // Offset of data memory relative to executable memory + entry_point_t entr_point; // Entry point function pointer +} runmem_t; /* Command parser: takes a pointer to the command stream and returns an error flag (0 on success according to current code) */ -int parse_commands(uint8_t *bytes); +int parse_commands(runmem_t *context, uint8_t *bytes); /* Free program and data memory */ -void free_memory(); +void free_memory(runmem_t *context); #endif /* RUNMEM_H */ \ No newline at end of file diff --git a/src/coparun_module.pyi b/src/coparun_module.pyi index 5eea2be..5a1a5aa 100644 --- a/src/coparun_module.pyi +++ b/src/coparun_module.pyi @@ -1,2 +1,4 @@ -def coparun(data: bytes) -> int: ... -def read_data_mem(rel_addr: int, length: int) -> bytes: ... +def coparun(context: int, data: bytes) -> int: ... +def read_data_mem(context: int, rel_addr: int, length: int) -> bytes: ... +def create_target() -> int: ... +def clear_target(context: int) -> None: ... \ No newline at end of file diff --git a/stencils/aux_functions.c b/stencils/aux_functions.c index a7ab664..dd92c72 100644 --- a/stencils/aux_functions.c +++ b/stencils/aux_functions.c @@ -4,13 +4,6 @@ volatile extern int dummy_int; volatile extern float dummy_float; -int floor_div(float arg1, float arg2) { - float x = arg1 / arg2; - int i = (int)x; - if (x < 0 && x != (float)i) i -= 1; - return i; -} - NOINLINE float auxsub_get_42(int n) { return n * 5.0f + 21.0f; } diff --git a/stencils/generate_stencils.py b/stencils/generate_stencils.py index 0a039d4..80bd15a 100644 --- a/stencils/generate_stencils.py +++ b/stencils/generate_stencils.py @@ -84,10 +84,19 @@ def get_cast(type1: str, type2: str, type_out: str) -> str: @norm_indent -def get_func1(func_name: str, type1: str, type2: str) -> str: +def get_func1(func_name: str, type1: str) -> str: return f""" - STENCIL void {func_name}_{type1}_{type2}({type1} arg1, {type2} arg2) {{ - result_float_{type2}(aux_{func_name}((float)arg1), arg2); + STENCIL void {func_name}_{type1}({type1} arg1) {{ + result_float(aux_{func_name}((float)arg1)); + }} + """ + + +@norm_indent +def get_custom_stencil(stencil_signature: str, stencil_body: str) -> str: + return f""" + STENCIL void {stencil_signature} {{ + {stencil_body} }} """ @@ -102,10 +111,10 @@ def get_func2(func_name: str, type1: str, type2: str) -> str: @norm_indent -def get_math_func1(func_name: str, type1: str) -> str: +def get_math_func1(func_name: str, type1: str, stencil_name: str) -> str: return f""" - STENCIL void {func_name}_{type1}({type1} arg1) {{ - result_float({func_name}f((float)arg1)); + STENCIL void {stencil_name}_{type1}({type1} arg1) {{ + result_float({func_name}((float)arg1)); }} """ @@ -149,7 +158,7 @@ def get_floordiv(op: str, type1: str, type2: str) -> str: else: return f""" STENCIL void {op}_{type1}_{type2}({type1} arg1, {type2} arg2) {{ - result_float_{type2}((float)floor_div((float)arg1, (float)arg2), arg2); + result_float_{type2}(floorf((float)arg1 / (float)arg2), arg2); }} """ @@ -238,11 +247,14 @@ if __name__ == "__main__": fnames = ['get_42'] for fn, t1 in permutate(fnames, types): - code += get_func1(fn, t1, t1) + code += get_func1(fn, t1) fnames = ['sqrt', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan'] for fn, t1 in permutate(fnames, types): - code += get_math_func1(fn, t1) + code += get_math_func1(fn + 'f', t1, fn) + + code += get_math_func1('fabsf', 'float', 'abs') + code += get_custom_stencil('abs_int(int arg1)', 'result_int(__builtin_abs(arg1));') fnames = ['atan2', 'pow'] for fn, t1, t2 in permutate(fnames, types, types): diff --git a/stencils/test.c b/stencils/test.c index 43284cf..976d3bd 100644 --- a/stencils/test.c +++ b/stencils/test.c @@ -3,7 +3,6 @@ int main() { // Test aux functions float a = 16.0f; - float div_result = (float)floor_div(-7.0f, 3.0f); float g42 = aux_get_42(0.0f); return 0; } diff --git a/tests/benchmark.py b/tests/benchmark.py index 804500f..6eb2c12 100644 --- a/tests/benchmark.py +++ b/tests/benchmark.py @@ -239,18 +239,18 @@ def save_svg_with_theme_styles(pyplot_obj, path): fill: #EEEEEE !important; } #patch_1 path { - fill: #444444 !important; + fill: #14141400 !important; } } @media (prefers-color-scheme: light) { path { - stroke: #444444 !important; + stroke: #141414 !important; } text { - fill: #444444 !important; + fill: #141414 !important; } #patch_1 path { - fill: #FFFFFF !important; + fill: #FFFFFF00 !important; } } #patch_1 path { diff --git a/tests/test_autograd.py b/tests/test_autograd.py index 841840f..f8b113d 100644 --- a/tests/test_autograd.py +++ b/tests/test_autograd.py @@ -4,7 +4,7 @@ import pytest def test_autograd(): - # Validate against micrograd results from Andrej Karpathy + # Validated against micrograd results from Andrej Karpathy # https://github.com/karpathy/micrograd/blob/master/test/test_engine.py a = value(-4.0) b = value(2.0) diff --git a/tests/test_branching_stencils.py b/tests/test_branching_stencils.py index 8e8c244..ae26f64 100644 --- a/tests/test_branching_stencils.py +++ b/tests/test_branching_stencils.py @@ -30,9 +30,9 @@ def test_compile(): il.write_com(_binwrite.Command.RUN_PROG) #il.write_com(_binwrite.Command.DUMP_CODE) - for net in ret_test: - assert isinstance(net, copapy.backend.Net) - add_read_command(il, variables, net) + for v in ret_test: + assert isinstance(v, value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) diff --git a/tests/test_comp_timing.py b/tests/test_comp_timing.py index b848fb9..5a1fe81 100644 --- a/tests/test_comp_timing.py +++ b/tests/test_comp_timing.py @@ -70,7 +70,7 @@ def test_timing_compiler(): # Get all nets/variables associated with heap memory - variable_list = get_nets([[const_net_list]], extended_output_ops) + variable_list = get_nets([const_net_list], extended_output_ops) stencil_names = {node.name for _, node in extended_output_ops} print(f'-- get_sub_functions: {len(stencil_names)}') diff --git a/tests/test_compile.py b/tests/test_compile.py index f34eae4..8f88aac 100644 --- a/tests/test_compile.py +++ b/tests/test_compile.py @@ -65,9 +65,9 @@ def test_compile(): # run program command il.write_com(_binwrite.Command.RUN_PROG) - for net in ret: - assert isinstance(net, copapy.backend.Net) - add_read_command(il, variables, net) + for v in ret: + assert isinstance(v, cp.value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) diff --git a/tests/test_compile_aarch64.py b/tests/test_compile_aarch64.py index 3235a9d..1cdeb14 100644 --- a/tests/test_compile_aarch64.py +++ b/tests/test_compile_aarch64.py @@ -60,9 +60,9 @@ def test_compile(): # run program command il.write_com(_binwrite.Command.RUN_PROG) - for net in ret: - assert isinstance(net, backend.Net) - add_read_command(il, variables, net) + for v in ret: + assert isinstance(v, cp.value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) diff --git a/tests/test_compile_armv7.py b/tests/test_compile_armv7.py index 6d52845..79ea027 100644 --- a/tests/test_compile_armv7.py +++ b/tests/test_compile_armv7.py @@ -61,9 +61,9 @@ def test_compile(): il.write_com(_binwrite.Command.RUN_PROG) #il.write_com(_binwrite.Command.DUMP_CODE) - for net in ret: - assert isinstance(net, backend.Net) - add_read_command(il, variables, net) + for v in ret: + assert isinstance(v, cp.value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) diff --git a/tests/test_compile_div.py b/tests/test_compile_div.py index ff702fa..8b355ad 100644 --- a/tests/test_compile_div.py +++ b/tests/test_compile_div.py @@ -1,5 +1,5 @@ from copapy import value, NumLike -from copapy.backend import Write, compile_to_dag +from copapy.backend import Write, compile_to_dag, add_read_command, Net import copapy import subprocess from copapy import _binwrite @@ -28,14 +28,14 @@ def test_compile(): out = [Write(r) for r in ret] - il, _ = compile_to_dag(out, copapy.generic_sdb) + il, vars = compile_to_dag(out, copapy.generic_sdb) # run program command il.write_com(_binwrite.Command.RUN_PROG) - il.write_com(_binwrite.Command.READ_DATA) - il.write_int(0) - il.write_int(36) + for v in ret: + assert isinstance(v, value) + add_read_command(il, vars, v.net) il.write_com(_binwrite.Command.END_COM) diff --git a/tests/test_compile_math.py b/tests/test_compile_math.py index d65f626..75731a5 100644 --- a/tests/test_compile_math.py +++ b/tests/test_compile_math.py @@ -28,9 +28,9 @@ def test_compile_sqrt(): # run program command il.write_com(_binwrite.Command.RUN_PROG) - for net in ret: - assert isinstance(net, copapy.backend.Net) - add_read_command(il, variables, net) + for v in ret: + assert isinstance(v, value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) @@ -62,9 +62,9 @@ def test_compile_log(): # run program command il.write_com(_binwrite.Command.RUN_PROG) - for net in ret: - assert isinstance(net, copapy.backend.Net) - add_read_command(il, variables, net) + for v in ret: + assert isinstance(v, value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) @@ -96,9 +96,9 @@ def test_compile_sin(): # run program command il.write_com(_binwrite.Command.RUN_PROG) - for net in ret: - assert isinstance(net, copapy.backend.Net) - add_read_command(il, variables, net) + for v in ret: + assert isinstance(v, copapy.value) + add_read_command(il, variables, v.net) il.write_com(_binwrite.Command.END_COM) diff --git a/tests/test_dag_optimization.py b/tests/test_dag_optimization.py index 5d705ee..a340e19 100644 --- a/tests/test_dag_optimization.py +++ b/tests/test_dag_optimization.py @@ -1,6 +1,41 @@ import copapy as cp -from copapy._basic_types import value -from copapy.backend import get_dag_stats +from copapy import value +from copapy.backend import get_dag_stats, Write +import copapy.backend as cpb +from typing import Any + + +def show_dag(val: value[Any]): + out = [Write(val.net)] + + print(out) + print('-- get_edges:') + + edges = list(cpb.get_all_dag_edges(out)) + for p in edges: + print('#', p) + + print('-- get_ordered_ops:') + ordered_ops = cpb.stable_toposort(edges) + for p in ordered_ops: + print('#', p) + + print('-- get_consts:') + const_list = cpb.get_const_nets(ordered_ops) + for p in const_list: + print('#', p) + + print('-- add_read_ops:') + output_ops = list(cpb.add_read_ops(ordered_ops)) + for p in output_ops: + print('#', p) + + print('-- add_write_ops:') + extended_output_ops = list(cpb.add_write_ops(output_ops, const_list)) + for p in extended_output_ops: + print('#', p) + print('--') + def test_get_dag_stats(): @@ -13,12 +48,26 @@ def test_get_dag_stats(): v3 = sum((v1 + i + 7) @ v2 for i in range(sum_size)) assert isinstance(v3, value) - stat = get_dag_stats([v3]) + stat = get_dag_stats([v3.net]) print(stat) assert stat['const_float'] == 2 * v_size assert stat['add_float_float'] == sum_size * v_size - 2 +def test_dag_reduction(): + + a = value(8) + + v3 = (a * 3 + 7 + 2) + (a * 3 + 7 + 2) + + show_dag(v3) + + assert isinstance(v3, value) + stat = get_dag_stats([v3.net]) + print(stat) + + if __name__ == "__main__": - test_get_dag_stats() \ No newline at end of file + test_get_dag_stats() + test_dag_reduction() \ No newline at end of file diff --git a/tests/test_jit_decorator.py b/tests/test_jit_decorator.py new file mode 100644 index 0000000..2f9f051 --- /dev/null +++ b/tests/test_jit_decorator.py @@ -0,0 +1,80 @@ +import copapy as cp + +@cp.jit +def calculation(x: float, y: float) -> float: + return sum(x ** 2 + y ** 2 + i for i in range(10)) + + +MASK = (1 << 31) - 1 # 0x7FFFFFFF + + +def rotl31(x: int, r: int) -> int: + r %= 31 + return ((x << r) | (x >> (31 - r))) & MASK + + +def slow_31bit_int_list_hash(data: list[int], rounds: int = 5)-> int: + """ + Intentionally slow hash using only 31-bit integer operations. + Input: list[int] + Output: 31-bit integer + """ + + # 31-bit initial state (non-zero) + state = 0x1234567 & MASK + + # Normalize input into 31-bit space + data = [abs(x) & MASK for x in data] + + for r in range(rounds): + for i, x in enumerate(data): + # Mix index, round, and data + state ^= (x + i + r) & MASK + + # Nonlinear mixing (carefully kept 31-bit) + state = (state * 1103515245) & MASK + state ^= (state >> 13) + state = (state * 12345) & MASK + + # Data-dependent rotation (forces serial dependency) + rot = (x ^ state) % 31 + state = rotl31(state, rot) + + # Cross-round diffusion + state ^= (state >> 11) + state = (state * 1664525) & MASK + state ^= (state >> 17) + + return state + + +def test_hash_without_decorator(): + nums = [12, 99, 2024] + h_ref = slow_31bit_int_list_hash(nums) + h = slow_31bit_int_list_hash([cp.value(num) for num in nums]) + + tg = cp.Target() + tg.compile(h) + tg.run() + + assert isinstance(h, cp.value) + assert tg.read_value(h) == h_ref + print(tg.read_value(h), h_ref) + + +def test_decorator(): + sumv = 0 + y = 5.7 + for i in range(2000): + x = i * 2.5 + sumv = calculation(x, y) + sumv + + assert abs(sumv - 166542418649.28778) < 1e14, sumv + + +def test_hash(): + nums = [12, 99, 2024] + h_ref = slow_31bit_int_list_hash(nums) + h = cp.jit(slow_31bit_int_list_hash)(nums) + print(h, h_ref) + assert h == h_ref diff --git a/tests/test_math.py b/tests/test_math.py index 534d309..17efda1 100644 --- a/tests/test_math.py +++ b/tests/test_math.py @@ -9,7 +9,6 @@ def test_fine(): a_f = 2.5 c_i = value(a_i) c_f = value(a_f) - # c_b = variable(True) ret_test = (c_f ** 2, c_i ** -1, @@ -19,7 +18,9 @@ def test_fine(): cp.sqrt(c_f), cp.sin(c_f), cp.cos(c_f), - cp.tan(c_f)) # , c_i & 3) + cp.tan(c_f), + cp.abs(-c_i), + cp.abs(-c_f)) re2_test = (a_f ** 2, a_i ** -1, @@ -29,7 +30,9 @@ def test_fine(): cp.sqrt(a_f), cp.sin(a_f), cp.cos(a_f), - cp.tan(a_f)) # , a_i & 3) + cp.tan(a_f), + cp.abs(-a_i), + cp.abs(-a_f)) ret_refe = (a_f ** 2, a_i ** -1, @@ -39,7 +42,9 @@ def test_fine(): ma.sqrt(a_f), ma.sin(a_f), ma.cos(a_f), - ma.tan(a_f)) # , a_i & 3) + ma.tan(a_f), + cp.abs(-a_i), + cp.abs(-a_f)) tg = Target() print('* compile and copy ...') diff --git a/tests/test_multi_targets.py b/tests/test_multi_targets.py new file mode 100644 index 0000000..3ccc2ce --- /dev/null +++ b/tests/test_multi_targets.py @@ -0,0 +1,36 @@ +import copapy as cp +import pytest + +def test_multi_target(): + # Define variables + a = cp.value(0.25) + b = cp.value(0.87) + + # Define computations + c = a + b * 2.0 + d = c ** 2 + cp.sin(a) + e = d + cp.sqrt(b) + + # Create a target, compile and run + tg1 = cp.Target() + tg1.compile(e) + + # Patch constant value + a.net.source = cp._basic_types.CPConstant(1000.0) + + tg2 = cp.Target() + tg2.compile(e) + + tg1.run() + tg2.run() + + print("Result tg1:", tg1.read_value(e)) + print("Result tg2:", tg2.read_value(e)) + + # Assertions to verify correctness + assert tg1.read_value(e) == pytest.approx((0.25 + 0.87 * 2.0) ** 2 + cp.sin(0.25) + cp.sqrt(0.87), 0.005) # pyright: ignore[reportUnknownMemberType] + assert tg2.read_value(e) == pytest.approx((1000.0 + 0.87 * 2.0) ** 2 + cp.sin(1000.0) + cp.sqrt(0.87), 0.005) # pyright: ignore[reportUnknownMemberType] + + +if __name__ == "__main__": + test_multi_target() diff --git a/tests/test_ops_aarch64.py b/tests/test_ops_aarch64.py index 26f026c..2158494 100644 --- a/tests/test_ops_aarch64.py +++ b/tests/test_ops_aarch64.py @@ -107,9 +107,9 @@ def test_compile(): dw.write_com(_binwrite.Command.RUN_PROG) #dw.write_com(_binwrite.Command.DUMP_CODE) - for net in ret_test: - assert isinstance(net, backend.Net) - add_read_command(dw, variables, net) + for v in ret_test: + assert isinstance(v, value) + add_read_command(dw, variables, v.net) #dw.write_com(_binwrite.Command.READ_DATA) #dw.write_int(0) @@ -146,7 +146,7 @@ def test_compile(): for test, ref in zip(ret_test, ret_ref): assert isinstance(test, value) - address = variables[test][0] + address = variables[test.net][0] data = result_data[address] if test.dtype == 'int': val = int.from_bytes(data, sdb.byteorder, signed=True) diff --git a/tests/test_ops_armv6.py b/tests/test_ops_armv6.py new file mode 100644 index 0000000..1797c95 --- /dev/null +++ b/tests/test_ops_armv6.py @@ -0,0 +1,171 @@ +from copapy import NumLike, iif, value +from copapy.backend import Write, compile_to_dag, add_read_command +import subprocess +from copapy import _binwrite +import copapy.backend as backend +import os +import warnings +import re +import struct +import pytest +import copapy as cp + +if os.name == 'nt': + # On Windows wsl and qemu-user is required: + # sudo apt install qemu-user + qemu_command = ['wsl', 'qemu-arm'] +else: + qemu_command = ['qemu-arm'] + + +def parse_results(log_text: str) -> dict[int, bytes]: + regex = r"^READ_DATA offs=(\d*) size=(\d*) data=(.*)$" + matches = re.finditer(regex, log_text, re.MULTILINE) + var_dict: dict[int, bytes] = {} + + for match in matches: + value_str: list[str] = match.group(3).strip().split(' ') + #print('--', value_str) + value = bytes(int(v, base=16) for v in value_str) + if len(value) <= 8: + var_dict[int(match.group(1))] = value + + return var_dict + + +def run_command(command: list[str]) -> str: + result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8', check=False) + assert result.returncode != 11, f"SIGSEGV (segmentation fault)\n -Error occurred: {result.stderr}\n -Output: {result.stdout}" + assert result.returncode == 0, f"\n -Error occurred: {result.stderr}\n -Output: {result.stdout}" + return result.stdout + + +def check_for_qemu() -> bool: + command = qemu_command + ['--version'] + try: + result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) + except Exception: + return False + return result.returncode == 0 + + +def function1(c1: NumLike) -> list[NumLike]: + return [c1 / 4, c1 / -4, c1 // 4, c1 // -4, (c1 * -1) // 4, + c1 * 4, c1 * -4, + c1 + 4, c1 - 4, + c1 > 2, c1 > 100, c1 < 4, c1 < 100] + + +def function2(c1: NumLike) -> list[NumLike]: + return [c1 * 4.44, c1 * -4.44] + + +def function3(c1: NumLike) -> list[NumLike]: + return [c1 / 4] + + +def function4(c1: NumLike) -> list[NumLike]: + return [c1 == 9, c1 == 4, c1 != 9, c1 != 4] + + +def function5(c1: NumLike) -> list[NumLike]: + return [c1 == True, c1 == False, c1 != True, c1 != False, c1 / 2, c1 + 2] + + +def function6(c1: NumLike) -> list[NumLike]: + return [c1 == True] + + +def iiftests(c1: NumLike) -> list[NumLike]: + return [iif(c1 > 5, 8, 9), + iif(c1 < 5, 8.5, 9.5), + iif(1 > 5, 3.3, 8.8) + c1, + iif(1 < 5, c1 * 3.3, 8.8), + iif(c1 < 5, c1 * 3.3, 8.8)] + + +@pytest.mark.runner +def test_compile(): + c_i = value(9) + c_f = value(1.111) + c_b = value(True) + + ret_test = function1(c_i) + function1(c_f) + function2(c_i) + function2(c_f) + function3(c_i) + function4(c_i) + function5(c_b) + [value(9) % 2] + iiftests(c_i) + iiftests(c_f) + [cp.asin(c_i/10)] + ret_ref = function1(9) + function1(1.111) + function2(9) + function2(1.111) + function3(9) + function4(9) + function5(True) + [9 % 2] + iiftests(9) + iiftests(1.111) + [cp.asin(9/10)] + + #ret_test = (c_i * 100 // 5, c_f * 10 // 5) + #ret_ref = (9 * 100 // 5, 1.111 * 10 // 5) + + out = [Write(r) for r in ret_test] + + sdb = backend.stencil_db_from_package('armv6') + dw, variables = compile_to_dag(out, sdb) + + #dw.write_com(_binwrite.Command.READ_DATA) + #dw.write_int(0) + #dw.write_int(28) + + # run program command + dw.write_com(_binwrite.Command.RUN_PROG) + #dw.write_com(_binwrite.Command.DUMP_CODE) + + for v in ret_test: + assert isinstance(v, value) + add_read_command(dw, variables, v.net) + + #dw.write_com(_binwrite.Command.READ_DATA) + #dw.write_int(0) + #dw.write_int(28) + + dw.write_com(_binwrite.Command.END_COM) + + #print('* Data to runner:') + #dw.print() + + dw.to_file('build/runner/test-armv6.copapy') + + if not check_for_qemu(): + warnings.warn("qemu-armv6 not found, armv6 test skipped!", UserWarning) + return + if not os.path.isfile('build/runner/coparun-armv6'): + warnings.warn("armv6 runner not found, armv6 test skipped!", UserWarning) + return + + command = qemu_command + ['build/runner/coparun-armv6', 'build/runner/test-armv6.copapy'] + ['build/runner/test-armv6.copapy.bin'] + #try: + result = run_command(command) + #except FileNotFoundError: + # warnings.warn(f"Test skipped, executable not found.", UserWarning) + # return + + #print('* Output from runner:\n--') + #print(result) + #print('--') + + assert 'Return value: 1' in result + + result_data = parse_results(result) + + for test, ref in zip(ret_test, ret_ref): + assert isinstance(test, value) + address = variables[test.net][0] + data = result_data[address] + if test.dtype == 'int': + val = int.from_bytes(data, sdb.byteorder, signed=True) + elif test.dtype == 'bool': + val = bool.from_bytes(data, sdb.byteorder) + elif test.dtype == 'float': + en = {'little': '<', 'big': '>'}[sdb.byteorder] + val = struct.unpack(en + 'f', data)[0] + assert isinstance(val, float) + else: + raise Exception(f"Unknown type: {test.dtype}") + print('+', val, ref, test.dtype, f" addr={address}") + for t in (int, float, bool): + assert isinstance(val, t) == isinstance(ref, t), f"Result type does not match for {val} and {ref}" + assert val == pytest.approx(ref, 1e-5), f"Result does not match: {val} and reference: {ref}" # pyright: ignore[reportUnknownMemberType] + + +if __name__ == "__main__": + #test_compile() + test_slow_31bit_int_list_hash() diff --git a/tests/test_ops_armv7.py b/tests/test_ops_armv7.py index 63a22eb..31e8e05 100644 --- a/tests/test_ops_armv7.py +++ b/tests/test_ops_armv7.py @@ -109,9 +109,9 @@ def test_compile(): dw.write_com(_binwrite.Command.RUN_PROG) #dw.write_com(_binwrite.Command.DUMP_CODE) - for net in ret_test: - assert isinstance(net, backend.Net) - add_read_command(dw, variables, net) + for v in ret_test: + assert isinstance(v, value) + add_read_command(dw, variables, v.net) #dw.write_com(_binwrite.Command.READ_DATA) #dw.write_int(0) @@ -148,7 +148,7 @@ def test_compile(): for test, ref in zip(ret_test, ret_ref): assert isinstance(test, value) - address = variables[test][0] + address = variables[test.net][0] data = result_data[address] if test.dtype == 'int': val = int.from_bytes(data, sdb.byteorder, signed=True) @@ -168,4 +168,4 @@ def test_compile(): if __name__ == "__main__": #test_example() - test_compile() + test_slow_31bit_int_list_hash() diff --git a/tests/test_ops_x86.py b/tests/test_ops_x86.py index 6ea427d..c7825d3 100644 --- a/tests/test_ops_x86.py +++ b/tests/test_ops_x86.py @@ -78,17 +78,17 @@ def test_compile(): #t5 = ((t3 * t1) * 2).magnitude() c_i = value(9) - #c_f = variable(1.111) - #c_b = variable(True) + c_f = value(1.111) + c_b = value(True) - #ret_test = function1(c_i) + function1(c_f) + function2(c_i) + function2(c_f) + function3(c_i) + function4(c_i) + function5(c_b) + [c_i % 2, sin(c_f)] + iiftests(c_i) + iiftests(c_f) - #ret_ref = function1(9) + function1(1.111) + function2(9) + function2(1.111) + function3(9) + function4(9) + function5(True) + [9 % 2, sin(1.111)] + iiftests(9) + iiftests(1.111) + ret_test = function1(c_i) + function1(c_f) + function2(c_i) + function2(c_f) + function3(c_i) + function4(c_i) + function5(c_b) + [c_i % 2, cp.sin(c_f)] + iiftests(c_i) + iiftests(c_f) + ret_ref = function1(9) + function1(1.111) + function2(9) + function2(1.111) + function3(9) + function4(9) + function5(True) + [9 % 2, cp.sin(1.111)] + iiftests(9) + iiftests(1.111) #ret_test = [cp.sin(c_i), cp.asin(variable(0.0))] #ret_ref = [cp.sin(9), cp.asin(0.0)] - ret_test: list[value[float]] = [] - ret_ref: list[float] = [] + #ret_test: list[value[float]] = [] + #ret_ref: list[float] = [] #sval = variable(8.0) #tval = 8.0 #for i in range(20): @@ -101,8 +101,8 @@ def test_compile(): #ret_test = [cp.sin(c_i)] #ret_ref = [cp.sin(9)] - ret_test = [cp.get_42(c_i)] - ret_ref = [cp.get_42(9)] + #ret_test = [cp.get_42(c_i)] + #ret_ref = [cp.get_42(9)] out = [Write(r) for r in ret_test] @@ -120,9 +120,9 @@ def test_compile(): dw.write_com(_binwrite.Command.RUN_PROG) #dw.write_com(_binwrite.Command.DUMP_CODE) - for net in ret_test: - assert isinstance(net, backend.Net) - add_read_command(dw, variables, net) + for v in ret_test: + assert isinstance(v, value) + add_read_command(dw, variables, v.net) #dw.write_com(_binwrite.Command.READ_DATA) #dw.write_int(0) @@ -156,7 +156,7 @@ def test_compile(): for test, ref in zip(ret_test, ret_ref): assert isinstance(test, value) - address = variables[test][0] + address = variables[test.net][0] data = result_data[address] if test.dtype == 'int': val = int.from_bytes(data, sdb.byteorder, signed=True) @@ -169,9 +169,145 @@ def test_compile(): else: raise Exception(f"Unknown type: {test.dtype}") print('+', val, ref, test.dtype, f" addr={address}") - #for t in (int, float, bool): - # assert isinstance(val, t) == isinstance(ref, t), f"Result type does not match for {val} and {ref}" - #assert val == pytest.approx(ref, 1e-5), f"Result does not match: {val} and reference: {ref}" # pyright: ignore[reportUnknownMemberType] + for t in (int, float, bool): + assert isinstance(val, t) == isinstance(ref, t), f"Result type does not match for {val} and {ref}" + assert val == pytest.approx(ref, 1e-5), f"Result does not match: {val} and reference: {ref}" # pyright: ignore[reportUnknownMemberType] + + +@pytest.mark.runner +def test_vector_compile(): + t1 = cp.vector([10, 11, 12]) + cp.vector(cp.value(v) for v in range(3)) + t2 = t1.sum() + + t3 = cp.vector(cp.value(1 / (v + 1)) for v in range(3)) + t4 = ((t3 * t1) * 2).sum() + t5 = ((t3 * t1) * 2).magnitude() + + ret = (t2, t4, t5) + + out = [Write(r) for r in ret] + + sdb = backend.stencil_db_from_package('x86') + il, variables = compile_to_dag(out, sdb) + + # run program command + il.write_com(_binwrite.Command.RUN_PROG) + #il.write_com(_binwrite.Command.DUMP_CODE) + + for v in ret: + assert isinstance(v, cp.value) + add_read_command(il, variables, v.net) + + il.write_com(_binwrite.Command.END_COM) + + #print('* Data to runner:') + #il.print() + + il.to_file('build/runner/test-x86.copapy') + + if platform.machine() != 'AMD64' and platform.machine() != 'x86_64': + warnings.warn(f"Test skipped, {platform.machine()} not supported for this test.", UserWarning) + else: + command = ['build/runner/coparun-x86', 'build/runner/test-x86.copapy', 'build/runner/test-x86.copapy.bin'] + try: + result = run_command(command) + except FileNotFoundError: + warnings.warn("Test skipped, executable not found.", UserWarning) + return + + print('* Output from runner:\n--') + print(result) + print('--') + + assert 'Return value: 1' in result + + # Compare to x86_64 reference results + assert " size=4 data=24 00 00 00" in result + assert " size=4 data=56 55 25 42" in result + assert " size=4 data=B4 F9 C8 41" in result + + +@pytest.mark.runner +def test_sinus(): + a_val = 1.25 # TODO: Error on x86: a > 2 PI --> Sin result > 1 + + a = cp.value(a_val) + b = cp.value(0.87) + + # Define computations + c = a + b * 2.0 + si = cp.sin(a) + d = c ** 2 + si + e = d + cp.sqrt(b) + + ret_test = [si, e] + ret_ref = [cp.sin(a_val), (a_val + 0.87 * 2.0) ** 2 + cp.sin(a_val) + cp.sqrt(0.87)] + + out = [Write(r) for r in ret_test] + + sdb = backend.stencil_db_from_package('x86') + dw, variables = compile_to_dag(out, sdb) + + #dw.write_com(_binwrite.Command.READ_DATA) + #dw.write_int(0) + #dw.write_int(28) + + # run program command + dw.write_com(_binwrite.Command.RUN_PROG) + #dw.write_com(_binwrite.Command.DUMP_CODE) + + for v in ret_test: + assert isinstance(v, value) + add_read_command(dw, variables, v.net) + + #dw.write_com(_binwrite.Command.READ_DATA) + #dw.write_int(0) + #dw.write_int(28) + + dw.write_com(_binwrite.Command.END_COM) + + #print('* Data to runner:') + #dw.print() + + dw.to_file('build/runner/test-x86.copapy') + + if platform.machine() != 'AMD64' and platform.machine() != 'x86_64': + warnings.warn(f"Test skipped, {platform.machine()} not supported for this test.", UserWarning) + else: + command = ['build/runner/coparun-x86', 'build/runner/test-x86.copapy', 'build/runner/test-x86.copapy.bin'] + + try: + result = run_command(command) + except FileNotFoundError: + warnings.warn("Test skipped, executable not found.", UserWarning) + return + + print('* Output from runner:\n--') + print(result) + print('--') + + assert 'Return value: 1' in result + + result_data = parse_results(result) + + for test, ref in zip(ret_test, ret_ref): + assert isinstance(test, value) + address = variables[test.net][0] + data = result_data[address] + if test.dtype == 'int': + val = int.from_bytes(data, sdb.byteorder, signed=True) + elif test.dtype == 'bool': + val = bool.from_bytes(data, sdb.byteorder) + elif test.dtype == 'float': + en = {'little': '<', 'big': '>'}[sdb.byteorder] + val = struct.unpack(en + 'f', data)[0] + assert isinstance(val, float) + else: + raise Exception(f"Unknown type: {test.dtype}") + print('+', val, ref, test.dtype, f" addr={address}") + for t in (int, float, bool): + assert isinstance(val, t) == isinstance(ref, t), f"Result type does not match for {val} and {ref}" + assert val == pytest.approx(ref, 1e-7), f"Result does not match: {val} and reference: {ref}" # pyright: ignore[reportUnknownMemberType] if __name__ == "__main__": diff --git a/tests/test_rev_kinematics.py b/tests/test_rev_kinematics.py index 9072490..8eeac3f 100644 --- a/tests/test_rev_kinematics.py +++ b/tests/test_rev_kinematics.py @@ -43,6 +43,8 @@ def test_two_arms(): print(f"End-effector position: {tg.read_value(effector)}") print(f"quadratic error = {tg.read_value(error)}") + assert tg.read_value(error) < 1e-6 + if __name__ == '__main__': test_two_arms() \ No newline at end of file diff --git a/tools/build.sh b/tools/build.sh index ae78afc..2ece1e8 100644 --- a/tools/build.sh +++ b/tools/build.sh @@ -21,6 +21,17 @@ gcc -Wall -Wextra -Wconversion -Wsign-conversion \ src/coparun/runmem.c src/coparun/coparun.c src/coparun/mem_man.c -o build/runner/coparun +echo "--------------arm-v6 32 bit----------------" +LIBGCC=$(arm-none-eabi-gcc -print-libgcc-file-name) +#LIBM=$(arm-none-eabi-gcc -print-file-name=libm.a) +#LIBC=$(arm-none-eabi-gcc -print-file-name=libc.a) + +arm-none-eabi-gcc -fno-pic -ffunction-sections -march=armv6 -mfpu=vfp -mfloat-abi=hard -marm -c $SRC -O3 -o build/stencils/stencils.o +arm-none-eabi-ld -r build/stencils/stencils.o build/musl/musl_objects_armv6.o $LIBGCC -o $DEST/stencils_armv6_O3.o +arm-none-eabi-objdump -d -x $DEST/stencils_armv6_O3.o > build/stencils/stencils_armv6_O3.asm +arm-linux-gnueabihf-gcc -march=armv6 -mfpu=vfp -mfloat-abi=hard -marm -static -Wall -Wextra -Wconversion -Wsign-conversion -Wshadow -Wstrict-overflow -O3 -DENABLE_LOGGING src/coparun/runmem.c src/coparun/coparun.c src/coparun/mem_man.c -o build/runner/coparun-armv6 + + echo "--------------arm-v7 32 bit----------------" LIBGCC=$(arm-none-eabi-gcc -print-libgcc-file-name) #LIBM=$(arm-none-eabi-gcc -print-file-name=libm.a) diff --git a/tools/cross_compiler_unix/packobjs.sh b/tools/cross_compiler_unix/packobjs.sh index af79b81..43bd0e9 100644 --- a/tools/cross_compiler_unix/packobjs.sh +++ b/tools/cross_compiler_unix/packobjs.sh @@ -24,14 +24,14 @@ cd ../build/stencil_objs ar x ../../musl/lib/libc.a sinf.o cosf.o tanf.o asinf.o acosf.o atanf.o atan2f.o ar x ../../musl/lib/libc.a sqrtf.o logf.o expf.o sqrt.o ar x ../../musl/lib/libc.a logf_data.o __tandf.o __cosdf.o __sindf.o -ar x ../../musl/lib/libc.a fabsf.o scalbn.o floor.o exp2f_data.o powf.o powf_data.o +ar x ../../musl/lib/libc.a fabsf.o scalbn.o floor.o floorf.o exp2f_data.o powf.o powf_data.o ar x ../../musl/lib/libc.a __rem_pio2f.o __math_invalidf.o __stack_chk_fail.o __math_divzerof.o __math_oflowf.o __rem_pio2_large.o __math_uflowf.o __math_xflowf.o # Check out .lo (PIC) ar x ../../musl/lib/libc.a sinf.lo cosf.lo tanf.lo asinf.lo acosf.lo atanf.lo atan2f.lo ar x ../../musl/lib/libc.a sqrtf.lo logf.lo expf.lo sqrt.lo ar x ../../musl/lib/libc.a logf_data.lo __tandf.lo __cosdf.lo __sindf.lo -ar x ../../musl/lib/libc.a fabsf.lo scalbn.lo floor.lo exp2f_data.lo powf.lo powf_data.lo +ar x ../../musl/lib/libc.a fabsf.lo scalbn.lo floor.lo floorf.o exp2f_data.lo powf.lo powf_data.lo ar x ../../musl/lib/libc.a __rem_pio2f.lo __math_invalidf.lo __stack_chk_fail.lo __math_divzerof.lo __math_oflowf.lo __rem_pio2_large.lo __math_uflowf.lo __math_xflowf.lo cd ../../musl diff --git a/tools/make_example.py b/tools/make_example.py index 6d9cebc..57c203c 100644 --- a/tools/make_example.py +++ b/tools/make_example.py @@ -1,31 +1,16 @@ from copapy import value from copapy.backend import Write, compile_to_dag, stencil_db_from_package from copapy._binwrite import Command -import copapy as cp +input = value(9.0) -def compile_example(arch: str = 'native') -> None: - """Test compilation of a simple program for x86_64.""" - c1 = value(9.0) +result = input ** 2 / 3.3 + 5 - #ret = [c1 / 4, c1 / -4, c1 // 4, c1 // -4, (c1 * -1) // 4] - ret = [c1 // 3.3 + 5] - #ret = [cp.sqrt(c1)] - #c2 = cp._math.get_42() - #ret = [c2] +arch = 'native' +sdb = stencil_db_from_package(arch) +dw, _ = compile_to_dag([Write(result)], sdb) - out = [Write(r) for r in ret] +# Instruct runner to dump patched code to a file: +dw.write_com(Command.DUMP_CODE) - sdb = stencil_db_from_package(arch) - dw, _ = compile_to_dag(out, sdb) - - dw.write_com(Command.DUMP_CODE) - - #print('* Data to runner:') - #dw.print() - - dw.to_file('build/runner/test.copapy') - - -if __name__ == "__main__": - compile_example() +dw.to_file('build/runner/test.copapy') \ No newline at end of file