From bb0251af66e163a27eb8262c80e159fb1a53d79e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 21:27:39 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- python/taichi/ad/_ad.py | 1 + python/taichi/aot/__init__.py | 1 + .../aot/conventions/gfxruntime140/dr.py | 1 + .../aot/conventions/gfxruntime140/sr.py | 1 + .../circle-packing/circle_packing_image.py | 1 + .../examples/algorithm/marching_squares.py | 1 + .../algorithm/poisson_disk_sampling.py | 1 + .../real_func/algorithm/marching_squares.py | 1 + .../algorithm/poisson_disk_sampling.py | 1 + .../real_func/rendering/taichi_ngp.py | 4 +- .../taichi/examples/rendering/taichi_ngp.py | 4 +- .../examples/simulation/snow_phaseField.py | 1 + .../taichi/examples/simulation/waterwave.py | 4 +- python/taichi/lang/ast/symbol_resolver.py | 1 + python/taichi/lang/struct.py | 10 +-- python/taichi/linalg/__init__.py | 1 + python/taichi/math/__init__.py | 1 + python/taichi/tools/__init__.py | 1 + python/taichi/types/__init__.py | 1 + python/taichi/types/quant.py | 1 + python/taichi/ui/__init__.py | 1 + taichi/analysis/gen_offline_cache_key.cpp | 4 +- taichi/codegen/llvm/llvm_compiled_data.h | 2 +- taichi/codegen/spirv/kernel_utils.h | 4 +- taichi/common/dict.h | 4 +- taichi/common/exceptions.h | 4 +- taichi/common/interface.h | 10 ++- taichi/common/json_serde.h | 13 ++-- taichi/common/serialization.h | 7 +- taichi/ir/expression.h | 6 +- taichi/ir/expression_ops.h | 12 ++- taichi/ir/ir.cpp | 5 +- taichi/ir/ir.h | 6 +- taichi/ir/type.h | 16 ++-- taichi/program/field_info.h | 12 ++- taichi/program/kernel.cpp | 3 +- taichi/program/kernel_profiler.h | 9 ++- taichi/program/sparse_matrix.cpp | 14 ++-- taichi/program/sparse_matrix.h | 4 +- taichi/program/sparse_solver.cpp | 10 +-- taichi/rhi/amdgpu/amdgpu_device.h | 70 ++++++++++-------- taichi/rhi/cpu/cpu_device.h | 74 +++++++++++-------- taichi/rhi/cuda/cuda_device.h | 74 +++++++++++-------- taichi/rhi/public_device.h | 6 +- taichi/rhi/vulkan/vulkan_device.h | 4 +- taichi/runtime/llvm/runtime_module/atomic.h | 18 +++-- .../runtime/llvm/runtime_module/runtime.cpp | 24 ++++-- .../program_impls/vulkan/vulkan_program.cpp | 12 +-- taichi/system/benchmark.h | 4 +- taichi/ui/ggui/app_context.cpp | 16 ++-- taichi/util/bit.h | 10 ++- tests/generate_compat_test_modules.py | 1 + tests/python/test_ssa.py | 1 + tests/python/test_test.py | 1 + tests/run_c_api_compat_test.py | 1 + 55 files changed, 300 insertions(+), 200 deletions(-) diff --git a/python/taichi/ad/_ad.py b/python/taichi/ad/_ad.py index e988fcde502db..b4f098984b081 100644 --- a/python/taichi/ad/_ad.py +++ b/python/taichi/ad/_ad.py @@ -3,6 +3,7 @@ This module supplies two decorators for users to customize their gradient computation task. """ + import warnings from functools import reduce diff --git a/python/taichi/aot/__init__.py b/python/taichi/aot/__init__.py index 4bcd21a1a1e79..e6cbe43993ae1 100644 --- a/python/taichi/aot/__init__.py +++ b/python/taichi/aot/__init__.py @@ -3,6 +3,7 @@ Users can use Taichi as a GPU compute shader/kernel compiler by compiling their Taichi kernels into an AOT module. """ + import taichi.aot.conventions from taichi.aot._export import export, export_as from taichi.aot.conventions.gfxruntime140 import GfxRuntime140 diff --git a/python/taichi/aot/conventions/gfxruntime140/dr.py b/python/taichi/aot/conventions/gfxruntime140/dr.py index 7650d6a84861a..306ddd4bb0b93 100644 --- a/python/taichi/aot/conventions/gfxruntime140/dr.py +++ b/python/taichi/aot/conventions/gfxruntime140/dr.py @@ -2,6 +2,7 @@ Data representation of all JSON data structures following the GfxRuntime140 convention. """ + from typing import Any, Dict, List, Optional from taichi.aot.utils import dump_json_data_model, json_data_model diff --git a/python/taichi/aot/conventions/gfxruntime140/sr.py b/python/taichi/aot/conventions/gfxruntime140/sr.py index 79dc5d40b2d7c..9e0333d9790c3 100644 --- a/python/taichi/aot/conventions/gfxruntime140/sr.py +++ b/python/taichi/aot/conventions/gfxruntime140/sr.py @@ -2,6 +2,7 @@ Structured representation of all JSON data structures following the GfxRuntime140. """ + from abc import ABC from enum import Enum from typing import Any, Dict, List, Optional diff --git a/python/taichi/examples/algorithm/circle-packing/circle_packing_image.py b/python/taichi/examples/algorithm/circle-packing/circle_packing_image.py index 9bc0f8de59398..e047641d763c5 100644 --- a/python/taichi/examples/algorithm/circle-packing/circle_packing_image.py +++ b/python/taichi/examples/algorithm/circle-packing/circle_packing_image.py @@ -1,6 +1,7 @@ """ Given an input image, redraw it with circle packings. """ + try: import cairo import cv2 diff --git a/python/taichi/examples/algorithm/marching_squares.py b/python/taichi/examples/algorithm/marching_squares.py index 7acf7a107f3c9..52b2bd71de07c 100644 --- a/python/taichi/examples/algorithm/marching_squares.py +++ b/python/taichi/examples/algorithm/marching_squares.py @@ -2,6 +2,7 @@ Marching squares algorithm in Taichi. See "https://en.wikipedia.org/wiki/Marching_squares" """ + import time import numpy as np diff --git a/python/taichi/examples/algorithm/poisson_disk_sampling.py b/python/taichi/examples/algorithm/poisson_disk_sampling.py index 7c60ea606874a..1307aaf2a8402 100644 --- a/python/taichi/examples/algorithm/poisson_disk_sampling.py +++ b/python/taichi/examples/algorithm/poisson_disk_sampling.py @@ -7,6 +7,7 @@ 1. Click on the window to restart the animation. 2. Press `p` to save screenshot. """ + import taichi as ti import taichi.math as tm diff --git a/python/taichi/examples/real_func/algorithm/marching_squares.py b/python/taichi/examples/real_func/algorithm/marching_squares.py index d9a2f2ffb16e4..47c3fc2cc5351 100644 --- a/python/taichi/examples/real_func/algorithm/marching_squares.py +++ b/python/taichi/examples/real_func/algorithm/marching_squares.py @@ -2,6 +2,7 @@ Marching squares algorithm in Taichi. See "https://en.wikipedia.org/wiki/Marching_squares" """ + import time import numpy as np diff --git a/python/taichi/examples/real_func/algorithm/poisson_disk_sampling.py b/python/taichi/examples/real_func/algorithm/poisson_disk_sampling.py index a8e16fae3f2d8..86c256735ed84 100644 --- a/python/taichi/examples/real_func/algorithm/poisson_disk_sampling.py +++ b/python/taichi/examples/real_func/algorithm/poisson_disk_sampling.py @@ -7,6 +7,7 @@ 1. Click on the window to restart the animation. 2. Press `p` to save screenshot. """ + import taichi as ti import taichi.math as tm import typing diff --git a/python/taichi/examples/real_func/rendering/taichi_ngp.py b/python/taichi/examples/real_func/rendering/taichi_ngp.py index 0b816e3288d47..b12a9e7f3dd6b 100644 --- a/python/taichi/examples/real_func/rendering/taichi_ngp.py +++ b/python/taichi/examples/real_func/rendering/taichi_ngp.py @@ -277,9 +277,7 @@ def hash_table_init(self): for i in range(self.level): resolution = int(np.ceil(self.base_res * np.exp(i * np.log(self.per_level_scales)) - 1.0)) + 1 params_in_level = resolution**3 - params_in_level = ( - int(resolution**3) if params_in_level % 8 == 0 else int((params_in_level + 8 - 1) / 8) * 8 - ) + params_in_level = int(resolution**3) if params_in_level % 8 == 0 else int((params_in_level + 8 - 1) / 8) * 8 params_in_level = min(self.max_params, params_in_level) self.offsets[i] = offset self.hash_map_sizes[i] = params_in_level diff --git a/python/taichi/examples/rendering/taichi_ngp.py b/python/taichi/examples/rendering/taichi_ngp.py index f5f6416d8ff1a..b61df5d9cea9e 100644 --- a/python/taichi/examples/rendering/taichi_ngp.py +++ b/python/taichi/examples/rendering/taichi_ngp.py @@ -277,9 +277,7 @@ def hash_table_init(self): for i in range(self.level): resolution = int(np.ceil(self.base_res * np.exp(i * np.log(self.per_level_scales)) - 1.0)) + 1 params_in_level = resolution**3 - params_in_level = ( - int(resolution**3) if params_in_level % 8 == 0 else int((params_in_level + 8 - 1) / 8) * 8 - ) + params_in_level = int(resolution**3) if params_in_level % 8 == 0 else int((params_in_level + 8 - 1) / 8) * 8 params_in_level = min(self.max_params, params_in_level) self.offsets[i] = offset self.hash_map_sizes[i] = params_in_level diff --git a/python/taichi/examples/simulation/snow_phaseField.py b/python/taichi/examples/simulation/snow_phaseField.py index 59e492307d5a3..f248b0811a575 100644 --- a/python/taichi/examples/simulation/snow_phaseField.py +++ b/python/taichi/examples/simulation/snow_phaseField.py @@ -2,6 +2,7 @@ space discretization: finite difference method, time integration: Runge-Kutta method repo's link: https://github.com/mo-hanxuan/Snow-PhaseField more details about physical interpretation refer to [Physica D 63(3-4): 410-423]""" + import numpy as np import taichi as ti diff --git a/python/taichi/examples/simulation/waterwave.py b/python/taichi/examples/simulation/waterwave.py index 73a238ba38a8d..263f0207f16c7 100644 --- a/python/taichi/examples/simulation/waterwave.py +++ b/python/taichi/examples/simulation/waterwave.py @@ -31,9 +31,7 @@ def reset(): @ti.func def laplacian(i, j): - return (-4 * height[i, j] + height[i, j - 1] + height[i, j + 1] + height[i + 1, j] + height[i - 1, j]) / ( - 4 * dx**2 - ) + return (-4 * height[i, j] + height[i, j - 1] + height[i, j + 1] + height[i + 1, j] + height[i - 1, j]) / (4 * dx**2) @ti.func diff --git a/python/taichi/lang/ast/symbol_resolver.py b/python/taichi/lang/ast/symbol_resolver.py index c0f82d9b2d58e..96f6aca67b1fd 100644 --- a/python/taichi/lang/ast/symbol_resolver.py +++ b/python/taichi/lang/ast/symbol_resolver.py @@ -1,4 +1,5 @@ """Provides helpers to resolve AST nodes.""" + import ast diff --git a/python/taichi/lang/struct.py b/python/taichi/lang/struct.py index 25b5f3fd3ba1d..6d1f6f267a698 100644 --- a/python/taichi/lang/struct.py +++ b/python/taichi/lang/struct.py @@ -210,11 +210,11 @@ def to_dict(self, include_methods=False, include_ndim=False): Dict: The result dictionary. """ res_dict = { - k: v.to_dict(include_methods=include_methods, include_ndim=include_ndim) - if isinstance(v, Struct) - else v.to_list() - if isinstance(v, Matrix) - else v + k: ( + v.to_dict(include_methods=include_methods, include_ndim=include_ndim) + if isinstance(v, Struct) + else v.to_list() if isinstance(v, Matrix) else v + ) for k, v in self.__entries.items() } if include_methods: diff --git a/python/taichi/linalg/__init__.py b/python/taichi/linalg/__init__.py index c4e28878b1b59..a652119124713 100644 --- a/python/taichi/linalg/__init__.py +++ b/python/taichi/linalg/__init__.py @@ -1,5 +1,6 @@ """Taichi support module for sparse matrix operations. """ + from taichi.linalg.sparse_cg import SparseCG from taichi.linalg.sparse_matrix import * from taichi.linalg.sparse_solver import SparseSolver diff --git a/python/taichi/math/__init__.py b/python/taichi/math/__init__.py index b31f96b061de1..c924fc6c9dd45 100644 --- a/python/taichi/math/__init__.py +++ b/python/taichi/math/__init__.py @@ -2,6 +2,7 @@ The math module supports glsl-style vectors, matrices and functions. """ + from ._complex import * from .mathimpl import * # pylint: disable=W0622 diff --git a/python/taichi/tools/__init__.py b/python/taichi/tools/__init__.py index 6686b0f86f82c..a66c175074e5b 100644 --- a/python/taichi/tools/__init__.py +++ b/python/taichi/tools/__init__.py @@ -4,6 +4,7 @@ - `video` submodule for exporting results to video files. - `diagnose` submodule for printing system environment information. """ + from taichi.tools.diagnose import * from taichi.tools.image import * from taichi.tools.np2ply import * diff --git a/python/taichi/types/__init__.py b/python/taichi/types/__init__.py index 9590746233d09..4a11219cd2681 100644 --- a/python/taichi/types/__init__.py +++ b/python/taichi/types/__init__.py @@ -7,6 +7,7 @@ - ndarray: for arbitrary arrays. - quant: for quantized types, see "https://yuanming.taichi.graphics/publication/2021-quantaichi/quantaichi.pdf" """ + from taichi.types import quant from taichi.types.annotations import * from taichi.types.compound_types import * diff --git a/python/taichi/types/quant.py b/python/taichi/types/quant.py index 2c5646ceecc05..02c7a59ee0600 100644 --- a/python/taichi/types/quant.py +++ b/python/taichi/types/quant.py @@ -2,6 +2,7 @@ This module defines generators of quantized types. For more details, read https://yuanming.taichi.graphics/publication/2021-quantaichi/quantaichi.pdf. """ + from taichi._lib.utils import ti_python_core as _ti_python_core from taichi.lang import impl from taichi.types.primitive_types import i32 diff --git a/python/taichi/ui/__init__.py b/python/taichi/ui/__init__.py index af07ac9d84ee1..66bbd4014616f 100644 --- a/python/taichi/ui/__init__.py +++ b/python/taichi/ui/__init__.py @@ -4,5 +4,6 @@ This module contains a cpu based GUI system, a vulkan based GGUI system, and other helper utilities like adding widgets and exporting video files. """ + from .gui import * from .ui import * diff --git a/taichi/analysis/gen_offline_cache_key.cpp b/taichi/analysis/gen_offline_cache_key.cpp index 62eca26d378ba..f57f597f99707 100644 --- a/taichi/analysis/gen_offline_cache_key.cpp +++ b/taichi/analysis/gen_offline_cache_key.cpp @@ -630,7 +630,9 @@ class ASTSerializer : public IRVisitor, public ExpressionVisitor { } #define DEFINE_EMIT_ENUM(EnumType) \ - void emit(EnumType type) { emit_pod(type); } + void emit(EnumType type) { \ + emit_pod(type); \ + } DEFINE_EMIT_ENUM(ExprOpCode); DEFINE_EMIT_ENUM(StmtOpCode); diff --git a/taichi/codegen/llvm/llvm_compiled_data.h b/taichi/codegen/llvm/llvm_compiled_data.h index 4f27eaa632acf..be185b1b8297e 100644 --- a/taichi/codegen/llvm/llvm_compiled_data.h +++ b/taichi/codegen/llvm/llvm_compiled_data.h @@ -22,7 +22,7 @@ class OffloadedTask { : name(name), block_dim(block_dim), grid_dim(grid_dim), - dynamic_shared_array_bytes(dynamic_shared_array_bytes){}; + dynamic_shared_array_bytes(dynamic_shared_array_bytes) {}; TI_IO_DEF(name, block_dim, grid_dim, dynamic_shared_array_bytes); }; diff --git a/taichi/codegen/spirv/kernel_utils.h b/taichi/codegen/spirv/kernel_utils.h index e3b967bdf766e..6d6889efd9278 100644 --- a/taichi/codegen/spirv/kernel_utils.h +++ b/taichi/codegen/spirv/kernel_utils.h @@ -315,8 +315,8 @@ class KernelContextAttributes { /** * Get all argpacks. */ - inline const std::vector, const Type *>> - &argpack_types() const { + inline const std::vector, const Type *>> & + argpack_types() const { return argpack_types_; } diff --git a/taichi/common/dict.h b/taichi/common/dict.h index 1cf63d376224b..95634c6f14638 100644 --- a/taichi/common/dict.h +++ b/taichi/common/dict.h @@ -188,8 +188,8 @@ class Dict { } template - std::enable_if_t::value, std::remove_reference_t> - &get(std::string key) const { + std::enable_if_t::value, std::remove_reference_t> & + get(std::string key) const { return *get_ptr>(key); } diff --git a/taichi/common/exceptions.h b/taichi/common/exceptions.h index 2557cf18b8cc9..3940f4e3b9bed 100644 --- a/taichi/common/exceptions.h +++ b/taichi/common/exceptions.h @@ -139,8 +139,8 @@ struct ErrorEmitter { std::string>>> ErrorEmitter(E &&error, T p_dbg_info, std::string &&error_msg) { if constexpr ((std::is_same_v, DebugInfo *> || - std::is_same_v, const DebugInfo *>)&&std:: - is_base_of_v>) { + std::is_same_v, const DebugInfo *>) && + std::is_base_of_v>) { // Indicates a failed C++ API call from Python side, we should not print // tb here error.msg_ = error_msg; diff --git a/taichi/common/interface.h b/taichi/common/interface.h index 057eeacc8f246..164f8d5c229da 100644 --- a/taichi/common/interface.h +++ b/taichi/common/interface.h @@ -325,8 +325,12 @@ class InterfaceHolder { } \ } ImplementationInjector_##base_class_name##class_name##instance; -#define TI_NAME(alias) \ - virtual std::string get_name() const override { return get_name_static(); } \ - static std::string get_name_static() { return alias; } +#define TI_NAME(alias) \ + virtual std::string get_name() const override { \ + return get_name_static(); \ + } \ + static std::string get_name_static() { \ + return alias; \ + } } // namespace taichi diff --git a/taichi/common/json_serde.h b/taichi/common/json_serde.h index 0973b1fd24165..37d98f818a613 100644 --- a/taichi/common/json_serde.h +++ b/taichi/common/json_serde.h @@ -88,12 +88,13 @@ using remove_cvref_t = typename remove_cvref::type; template struct has_ptr_serde { template - static constexpr auto helper(T_ *) -> std::is_same< - decltype((T_::jsonserde_ptr_io(std::declval(), - std::declval(), - std::declval(), - std::declval()))), - void>; + static constexpr auto helper(T_ *) + -> std::is_same< + decltype((T_::jsonserde_ptr_io(std::declval(), + std::declval(), + std::declval(), + std::declval()))), + void>; template static constexpr auto helper(...) -> std::false_type; diff --git a/taichi/common/serialization.h b/taichi/common/serialization.h index 0e43a1c562ee8..a6fb8a28014e8 100644 --- a/taichi/common/serialization.h +++ b/taichi/common/serialization.h @@ -193,9 +193,10 @@ class Serializer { template struct has_io { template - static constexpr auto helper(T_ *) -> std::is_same< - decltype((std::declval().io(std::declval()))), - void>; + static constexpr auto helper(T_ *) + -> std::is_same< + decltype((std::declval().io(std::declval()))), + void>; template static constexpr auto helper(...) -> std::false_type; diff --git a/taichi/ir/expression.h b/taichi/ir/expression.h index fb04b3e7fe338..4f21082b21d9a 100644 --- a/taichi/ir/expression.h +++ b/taichi/ir/expression.h @@ -179,7 +179,9 @@ class ExpressionVisitor { bool invoke_default_visitor_{false}; }; -#define TI_DEFINE_ACCEPT_FOR_EXPRESSION \ - void accept(ExpressionVisitor *visitor) override { visitor->visit(this); } +#define TI_DEFINE_ACCEPT_FOR_EXPRESSION \ + void accept(ExpressionVisitor *visitor) override { \ + visitor->visit(this); \ + } } // namespace taichi::lang diff --git a/taichi/ir/expression_ops.h b/taichi/ir/expression_ops.h index 48024b96d971d..120b423cdda26 100644 --- a/taichi/ir/expression_ops.h +++ b/taichi/ir/expression_ops.h @@ -12,19 +12,25 @@ Expr expr_##opname(const Expr &expr) { \ return Expr::make(UnaryOpType::opname, expr); \ } \ - Expr operator op(const Expr &expr) { return expr_##opname(expr); } + Expr operator op(const Expr &expr) { \ + return expr_##opname(expr); \ + } #define DEFINE_EXPRESSION_FUNC_UNARY(opname) \ Expr opname(const Expr &expr) { \ return Expr::make(UnaryOpType::opname, expr); \ } \ - Expr expr_##opname(const Expr &expr) { return opname(expr); } + Expr expr_##opname(const Expr &expr) { \ + return opname(expr); \ + } #define DEFINE_EXPRESSION_OP_BINARY(op, opname) \ Expr operator op(const Expr &lhs, const Expr &rhs) { \ return Expr::make(BinaryOpType::opname, lhs, rhs); \ } \ - Expr expr_##opname(const Expr &lhs, const Expr &rhs) { return lhs op rhs; } + Expr expr_##opname(const Expr &lhs, const Expr &rhs) { \ + return lhs op rhs; \ + } #define DEFINE_EXPRESSION_FUNC_BINARY(opname) \ Expr opname(const Expr &lhs, const Expr &rhs) { \ diff --git a/taichi/ir/ir.cpp b/taichi/ir/ir.cpp index c8203778033f0..f5c7b6f56fc65 100644 --- a/taichi/ir/ir.cpp +++ b/taichi/ir/ir.cpp @@ -69,8 +69,9 @@ class StatementTypeNameVisitor : public IRVisitor { StatementTypeNameVisitor() { } -#define PER_STATEMENT(x) \ - void visit(x *stmt) override { type_name = #x; } +#define PER_STATEMENT(x) \ + void visit(x *stmt) override{type_name = #x; \ + } #include "taichi/inc/statements.inc.h" #undef PER_STATEMENT diff --git a/taichi/ir/ir.h b/taichi/ir/ir.h index 6c5541101ff1d..ded58abf119be 100644 --- a/taichi/ir/ir.h +++ b/taichi/ir/ir.h @@ -269,8 +269,10 @@ class IRNode { std::unique_ptr clone(); }; -#define TI_DEFINE_ACCEPT \ - void accept(IRVisitor *visitor) override { visitor->visit(this); } +#define TI_DEFINE_ACCEPT \ + void accept(IRVisitor *visitor) override { \ + visitor->visit(this); \ + } #define TI_DEFINE_CLONE \ std::unique_ptr clone() const override { \ diff --git a/taichi/ir/type.h b/taichi/ir/type.h index 936138a0922fb..47239eae8e8ee 100644 --- a/taichi/ir/type.h +++ b/taichi/ir/type.h @@ -176,7 +176,7 @@ class TI_DLL_EXPORT PrimitiveType : public Type { class TI_DLL_EXPORT PointerType : public Type { public: - PointerType() : Type(TypeKind::Pointer){}; + PointerType() : Type(TypeKind::Pointer) {}; PointerType(Type *pointee, bool is_bit_pointer) : Type(TypeKind::Pointer), @@ -210,7 +210,7 @@ class TI_DLL_EXPORT PointerType : public Type { class TI_DLL_EXPORT TensorType : public Type { public: - TensorType() : Type(TypeKind::Tensor){}; + TensorType() : Type(TypeKind::Tensor) {}; TensorType(std::vector shape, Type *element) : Type(TypeKind::Tensor), shape_(std::move(shape)), element_(element) { } @@ -263,7 +263,7 @@ struct TI_DLL_EXPORT AbstractDictionaryMember { class TI_DLL_EXPORT AbstractDictionaryType : public Type { public: - explicit AbstractDictionaryType(TypeKind type_kind) : Type(type_kind){}; + explicit AbstractDictionaryType(TypeKind type_kind) : Type(type_kind) {}; explicit AbstractDictionaryType( TypeKind type_kind, const std::vector &elements, @@ -294,7 +294,7 @@ class TI_DLL_EXPORT AbstractDictionaryType : public Type { class TI_DLL_EXPORT StructType : public AbstractDictionaryType { public: - StructType() : AbstractDictionaryType(TypeKind::Struct){}; + StructType() : AbstractDictionaryType(TypeKind::Struct) {}; explicit StructType(const std::vector &elements, const std::string &layout = "none") : AbstractDictionaryType(TypeKind::Struct, elements, layout) { @@ -327,7 +327,7 @@ class TI_DLL_EXPORT StructType : public AbstractDictionaryType { class TI_DLL_EXPORT ArgPackType : public AbstractDictionaryType { public: - ArgPackType() : AbstractDictionaryType(TypeKind::ArgPack){}; + ArgPackType() : AbstractDictionaryType(TypeKind::ArgPack) {}; explicit ArgPackType(const std::vector &elements, const std::string &layout = "none") : AbstractDictionaryType(TypeKind::ArgPack, elements, layout) { @@ -344,7 +344,7 @@ class TI_DLL_EXPORT ArgPackType : public AbstractDictionaryType { class TI_DLL_EXPORT QuantIntType : public Type { public: - QuantIntType() : Type(TypeKind::QuantInt){}; + QuantIntType() : Type(TypeKind::QuantInt) {}; QuantIntType(int num_bits, bool is_signed, Type *compute_type = nullptr); std::string to_string() const override; @@ -375,7 +375,7 @@ class TI_DLL_EXPORT QuantIntType : public Type { class TI_DLL_EXPORT QuantFixedType : public Type { public: - QuantFixedType() : Type(TypeKind::QuantFixed){}; + QuantFixedType() : Type(TypeKind::QuantFixed) {}; QuantFixedType(Type *digits_type, Type *compute_type, float64 scale); std::string to_string() const override; @@ -442,7 +442,7 @@ class TI_DLL_EXPORT QuantFloatType : public Type { class TI_DLL_EXPORT BitStructType : public Type { public: - BitStructType() : Type(TypeKind::BitStruct){}; + BitStructType() : Type(TypeKind::BitStruct) {}; BitStructType(PrimitiveType *physical_type, const std::vector &member_types, const std::vector &member_bit_offsets, diff --git a/taichi/program/field_info.h b/taichi/program/field_info.h index bdfa78e7e7370..a1206017ec84e 100644 --- a/taichi/program/field_info.h +++ b/taichi/program/field_info.h @@ -13,10 +13,14 @@ enum class FieldSource : int { HostMappedPtr = 1, }; -#define DEFINE_PROPERTY(Type, name) \ - Type name; \ - void set_##name(const Type &new_name) { name = new_name; } \ - Type get_##name() { return name; } +#define DEFINE_PROPERTY(Type, name) \ + Type name; \ + void set_##name(const Type &new_name) { \ + name = new_name; \ + } \ + Type get_##name() { \ + return name; \ + } struct FieldInfo { DEFINE_PROPERTY(bool, valid) diff --git a/taichi/program/kernel.cpp b/taichi/program/kernel.cpp index 57f52ecbb9e5d..46ddb3f2fc5d8 100644 --- a/taichi/program/kernel.cpp +++ b/taichi/program/kernel.cpp @@ -27,8 +27,7 @@ Kernel::Kernel(Program &program, const std::string &primal_name, AutodiffMode autodiff_mode) { // due to #6362, we cannot write [func, this] { return func(this); } - this->init( - program, [&] { return func(this); }, primal_name, autodiff_mode); + this->init(program, [&] { return func(this); }, primal_name, autodiff_mode); } Kernel::Kernel(Program &program, diff --git a/taichi/program/kernel_profiler.h b/taichi/program/kernel_profiler.h index f72b18de81729..87863e2ad4692 100644 --- a/taichi/program/kernel_profiler.h +++ b/taichi/program/kernel_profiler.h @@ -68,15 +68,16 @@ class KernelProfilerBase { } // TODO: remove start and always use start_with_handle - virtual void start(const std::string &kernel_name){TI_NOT_IMPLEMENTED}; + virtual void start(const std::string &kernel_name) { TI_NOT_IMPLEMENTED }; - virtual TaskHandle start_with_handle(const std::string &kernel_name){ - TI_NOT_IMPLEMENTED}; + virtual TaskHandle start_with_handle(const std::string &kernel_name) { + TI_NOT_IMPLEMENTED + }; static void profiler_start(KernelProfilerBase *profiler, const char *kernel_name); - virtual void stop(){TI_NOT_IMPLEMENTED}; + virtual void stop() { TI_NOT_IMPLEMENTED }; virtual void stop(TaskHandle){TI_NOT_IMPLEMENTED}; diff --git a/taichi/program/sparse_matrix.cpp b/taichi/program/sparse_matrix.cpp index 2432b56ba886a..389660f4e33b4 100644 --- a/taichi/program/sparse_matrix.cpp +++ b/taichi/program/sparse_matrix.cpp @@ -16,14 +16,12 @@ matrix_.setFromTriplets(triplets->begin(), triplets->end()); \ } -#define MAKE_MATRIX(TYPE, STORAGE) \ - { \ - Pair("f" #TYPE, #STORAGE), \ - [](int rows, int cols, DataType dt) -> std::unique_ptr { \ - using FC = Eigen::SparseMatrix; \ - return std::make_unique>(rows, cols, dt); \ - } \ - } +#define MAKE_MATRIX(TYPE, STORAGE) \ + {Pair("f" #TYPE, #STORAGE), \ + [](int rows, int cols, DataType dt) -> std::unique_ptr { \ + using FC = Eigen::SparseMatrix; \ + return std::make_unique>(rows, cols, dt); \ + }} #define INSTANTIATE_SPMV(type, storage) \ template void \ diff --git a/taichi/program/sparse_matrix.h b/taichi/program/sparse_matrix.h index 712d6fbba972a..ec50cfbabc5f6 100644 --- a/taichi/program/sparse_matrix.h +++ b/taichi/program/sparse_matrix.h @@ -58,9 +58,9 @@ class SparseMatrixBuilder { class SparseMatrix { public: - SparseMatrix() : rows_(0), cols_(0), dtype_(PrimitiveType::f32){}; + SparseMatrix() : rows_(0), cols_(0), dtype_(PrimitiveType::f32) {}; SparseMatrix(int rows, int cols, DataType dt = PrimitiveType::f32) - : rows_{rows}, cols_(cols), dtype_(dt){}; + : rows_{rows}, cols_(cols), dtype_(dt) {}; SparseMatrix(SparseMatrix &sm) : rows_(sm.rows_), cols_(sm.cols_), dtype_(sm.dtype_) { } diff --git a/taichi/program/sparse_solver.cpp b/taichi/program/sparse_solver.cpp index 71e34e6d6397a..96a87774cb377 100644 --- a/taichi/program/sparse_solver.cpp +++ b/taichi/program/sparse_solver.cpp @@ -72,12 +72,10 @@ EIGEN_LU_SOLVER_INSTANTIATION(float64, LU, COLAMD); #define MAKE_EIGEN_SOLVER(dt, type, order) \ std::make_unique() -#define MAKE_SOLVER(dt, type, order) \ - { \ - {#dt, #type, #order}, []() -> std::unique_ptr { \ - return MAKE_EIGEN_SOLVER(dt, type, order); \ - } \ - } +#define MAKE_SOLVER(dt, type, order) \ + {{#dt, #type, #order}, []() -> std::unique_ptr { \ + return MAKE_EIGEN_SOLVER(dt, type, order); \ + }} using Triplets = std::tuple; namespace { diff --git a/taichi/rhi/amdgpu/amdgpu_device.h b/taichi/rhi/amdgpu/amdgpu_device.h index b3b4d55e05baa..696aef7c52b04 100644 --- a/taichi/rhi/amdgpu/amdgpu_device.h +++ b/taichi/rhi/amdgpu/amdgpu_device.h @@ -18,41 +18,53 @@ class AmdgpuCommandList : public CommandList { ~AmdgpuCommandList() override { } - void bind_pipeline(Pipeline *p) noexcept final{TI_NOT_IMPLEMENTED}; + void bind_pipeline(Pipeline *p) noexcept final { TI_NOT_IMPLEMENTED }; RhiResult bind_shader_resources(ShaderResourceSet *res, - int set_index = 0) noexcept final{ - TI_NOT_IMPLEMENTED}; - RhiResult bind_raster_resources(RasterResources *res) noexcept final{ - TI_NOT_IMPLEMENTED}; - void buffer_barrier(DevicePtr ptr, - size_t size) noexcept final{TI_NOT_IMPLEMENTED}; - void buffer_barrier(DeviceAllocation alloc) noexcept final{ - TI_NOT_IMPLEMENTED}; - void memory_barrier() noexcept final{TI_NOT_IMPLEMENTED}; - void buffer_copy(DevicePtr dst, DevicePtr src, size_t size) noexcept final{ - TI_NOT_IMPLEMENTED}; - void buffer_fill(DevicePtr ptr, size_t size, uint32_t data) noexcept final{ - TI_NOT_IMPLEMENTED}; + int set_index = 0) noexcept final { + TI_NOT_IMPLEMENTED + }; + RhiResult bind_raster_resources(RasterResources *res) noexcept final { + TI_NOT_IMPLEMENTED + }; + void buffer_barrier(DevicePtr ptr, size_t size) noexcept final { + TI_NOT_IMPLEMENTED + }; + void buffer_barrier(DeviceAllocation alloc) noexcept final { + TI_NOT_IMPLEMENTED + }; + void memory_barrier() noexcept final { TI_NOT_IMPLEMENTED }; + void buffer_copy(DevicePtr dst, DevicePtr src, size_t size) noexcept final { + TI_NOT_IMPLEMENTED + }; + void buffer_fill(DevicePtr ptr, size_t size, uint32_t data) noexcept final { + TI_NOT_IMPLEMENTED + }; RhiResult dispatch(uint32_t x, uint32_t y = 1, - uint32_t z = 1) noexcept override{TI_NOT_IMPLEMENTED}; + uint32_t z = 1) noexcept override { + TI_NOT_IMPLEMENTED + }; }; class AmdgpuStream : public Stream { public: - ~AmdgpuStream() override{}; + ~AmdgpuStream() override {}; - RhiResult new_command_list(CommandList **out_cmdlist) noexcept final{ - TI_NOT_IMPLEMENTED}; - StreamSemaphore submit(CommandList *cmdlist, - const std::vector &wait_semaphores = - {}) override{TI_NOT_IMPLEMENTED}; + RhiResult new_command_list(CommandList **out_cmdlist) noexcept final { + TI_NOT_IMPLEMENTED + }; + StreamSemaphore submit( + CommandList *cmdlist, + const std::vector &wait_semaphores = {}) override { + TI_NOT_IMPLEMENTED + }; StreamSemaphore submit_synced( CommandList *cmdlist, - const std::vector &wait_semaphores = {}) override{ - TI_NOT_IMPLEMENTED}; + const std::vector &wait_semaphores = {}) override { + TI_NOT_IMPLEMENTED + }; - void command_sync() override{TI_NOT_IMPLEMENTED}; + void command_sync() override { TI_NOT_IMPLEMENTED }; }; class AmdgpuDevice : public LlvmDevice { @@ -69,7 +81,7 @@ class AmdgpuDevice : public LlvmDevice { AllocInfo get_alloc_info(const DeviceAllocation handle); AmdgpuDevice(); - ~AmdgpuDevice() override{}; + ~AmdgpuDevice() override {}; RhiResult allocate_memory(const AllocParams ¶ms, DeviceAllocation *out_devalloc) override; @@ -80,7 +92,7 @@ class AmdgpuDevice : public LlvmDevice { uint64_t *allocate_llvm_runtime_memory_jit( const LlvmRuntimeAllocParams ¶ms) override; - ShaderResourceSet *create_resource_set() final{TI_NOT_IMPLEMENTED}; + ShaderResourceSet *create_resource_set() final { TI_NOT_IMPLEMENTED }; RhiResult create_pipeline(Pipeline **out_pipeline, const PipelineSourceDesc &src, @@ -94,7 +106,7 @@ class AmdgpuDevice : public LlvmDevice { } RhiResult map(DeviceAllocation alloc, void **mapped_ptr) final; - void unmap(DevicePtr ptr) override{TI_NOT_IMPLEMENTED}; + void unmap(DevicePtr ptr) override { TI_NOT_IMPLEMENTED }; void unmap(DeviceAllocation alloc) override; void memcpy_internal(DevicePtr dst, DevicePtr src, uint64_t size) override; @@ -109,9 +121,9 @@ class AmdgpuDevice : public LlvmDevice { return AMDGPUContext::get_instance().get_total_memory(); } - Stream *get_compute_stream() override{TI_NOT_IMPLEMENTED}; + Stream *get_compute_stream() override { TI_NOT_IMPLEMENTED }; - void wait_idle() override{TI_NOT_IMPLEMENTED}; + void wait_idle() override { TI_NOT_IMPLEMENTED }; void clear() override { allocations_.clear(); diff --git a/taichi/rhi/cpu/cpu_device.h b/taichi/rhi/cpu/cpu_device.h index 778763d6f2d5a..d23775118e27d 100644 --- a/taichi/rhi/cpu/cpu_device.h +++ b/taichi/rhi/cpu/cpu_device.h @@ -21,41 +21,57 @@ class CpuCommandList : public CommandList { ~CpuCommandList() override { } - void bind_pipeline(Pipeline *p) noexcept override{TI_NOT_IMPLEMENTED}; + void bind_pipeline(Pipeline *p) noexcept override { TI_NOT_IMPLEMENTED }; RhiResult bind_shader_resources(ShaderResourceSet *res, - int set_index = 0) noexcept override{ - TI_NOT_IMPLEMENTED}; - RhiResult bind_raster_resources(RasterResources *res) noexcept override{ - TI_NOT_IMPLEMENTED}; - void buffer_barrier(DevicePtr ptr, - size_t size) noexcept override{TI_NOT_IMPLEMENTED}; - void buffer_barrier(DeviceAllocation alloc) noexcept override{ - TI_NOT_IMPLEMENTED}; - void memory_barrier() noexcept override{TI_NOT_IMPLEMENTED}; - void buffer_copy(DevicePtr dst, DevicePtr src, size_t size) noexcept override{ - TI_NOT_IMPLEMENTED}; - void buffer_fill(DevicePtr ptr, size_t size, uint32_t data) noexcept override{ - TI_NOT_IMPLEMENTED}; + int set_index = 0) noexcept override { + TI_NOT_IMPLEMENTED + }; + RhiResult bind_raster_resources(RasterResources *res) noexcept override { + TI_NOT_IMPLEMENTED + }; + void buffer_barrier(DevicePtr ptr, size_t size) noexcept override { + TI_NOT_IMPLEMENTED + }; + void buffer_barrier(DeviceAllocation alloc) noexcept override { + TI_NOT_IMPLEMENTED + }; + void memory_barrier() noexcept override { TI_NOT_IMPLEMENTED }; + void buffer_copy(DevicePtr dst, + DevicePtr src, + size_t size) noexcept override { + TI_NOT_IMPLEMENTED + }; + void buffer_fill(DevicePtr ptr, + size_t size, + uint32_t data) noexcept override { + TI_NOT_IMPLEMENTED + }; RhiResult dispatch(uint32_t x, uint32_t y = 1, - uint32_t z = 1) noexcept override{TI_NOT_IMPLEMENTED}; + uint32_t z = 1) noexcept override { + TI_NOT_IMPLEMENTED + }; }; class CpuStream : public Stream { public: - ~CpuStream() override{}; + ~CpuStream() override {}; - RhiResult new_command_list(CommandList **out_cmdlist) noexcept override{ - TI_NOT_IMPLEMENTED}; - StreamSemaphore submit(CommandList *cmdlist, - const std::vector &wait_semaphores = - {}) override{TI_NOT_IMPLEMENTED}; + RhiResult new_command_list(CommandList **out_cmdlist) noexcept override { + TI_NOT_IMPLEMENTED + }; + StreamSemaphore submit( + CommandList *cmdlist, + const std::vector &wait_semaphores = {}) override { + TI_NOT_IMPLEMENTED + }; StreamSemaphore submit_synced( CommandList *cmdlist, - const std::vector &wait_semaphores = {}) override{ - TI_NOT_IMPLEMENTED}; + const std::vector &wait_semaphores = {}) override { + TI_NOT_IMPLEMENTED + }; - void command_sync() override{TI_NOT_IMPLEMENTED}; + void command_sync() override { TI_NOT_IMPLEMENTED }; }; class CpuDevice : public LlvmDevice { @@ -69,7 +85,7 @@ class CpuDevice : public LlvmDevice { AllocInfo get_alloc_info(const DeviceAllocation handle); CpuDevice(); - ~CpuDevice() override{}; + ~CpuDevice() override {}; RhiResult allocate_memory(const AllocParams ¶ms, DeviceAllocation *out_devalloc) override; @@ -92,7 +108,7 @@ class CpuDevice : public LlvmDevice { int num_alloc = 1, const std::vector &wait_sema = {}) noexcept override; - ShaderResourceSet *create_resource_set() override{TI_NOT_IMPLEMENTED}; + ShaderResourceSet *create_resource_set() override { TI_NOT_IMPLEMENTED }; RhiResult create_pipeline(Pipeline **out_pipeline, const PipelineSourceDesc &src, @@ -104,16 +120,16 @@ class CpuDevice : public LlvmDevice { RhiResult map_range(DevicePtr ptr, uint64_t size, void **mapped_ptr) final; RhiResult map(DeviceAllocation alloc, void **mapped_ptr) final; - void unmap(DevicePtr ptr) final{TI_NOT_IMPLEMENTED}; + void unmap(DevicePtr ptr) final { TI_NOT_IMPLEMENTED }; void unmap(DeviceAllocation alloc) final; DeviceAllocation import_memory(void *ptr, size_t size) override; void memcpy_internal(DevicePtr dst, DevicePtr src, uint64_t size) override; - Stream *get_compute_stream() override{TI_NOT_IMPLEMENTED}; + Stream *get_compute_stream() override { TI_NOT_IMPLEMENTED }; - void wait_idle() override{TI_NOT_IMPLEMENTED}; + void wait_idle() override { TI_NOT_IMPLEMENTED }; private: std::vector allocations_; diff --git a/taichi/rhi/cuda/cuda_device.h b/taichi/rhi/cuda/cuda_device.h index 3a94209084a13..4bd57ad837cf5 100644 --- a/taichi/rhi/cuda/cuda_device.h +++ b/taichi/rhi/cuda/cuda_device.h @@ -22,41 +22,57 @@ class CudaCommandList : public CommandList { ~CudaCommandList() override { } - void bind_pipeline(Pipeline *p) noexcept override{TI_NOT_IMPLEMENTED}; + void bind_pipeline(Pipeline *p) noexcept override { TI_NOT_IMPLEMENTED }; RhiResult bind_shader_resources(ShaderResourceSet *res, - int set_index = 0) noexcept final{ - TI_NOT_IMPLEMENTED}; - RhiResult bind_raster_resources(RasterResources *res) noexcept final{ - TI_NOT_IMPLEMENTED}; - void buffer_barrier(DevicePtr ptr, - size_t size) noexcept override{TI_NOT_IMPLEMENTED}; - void buffer_barrier(DeviceAllocation alloc) noexcept override{ - TI_NOT_IMPLEMENTED}; - void memory_barrier() noexcept override{TI_NOT_IMPLEMENTED}; - void buffer_copy(DevicePtr dst, DevicePtr src, size_t size) noexcept override{ - TI_NOT_IMPLEMENTED}; - void buffer_fill(DevicePtr ptr, size_t size, uint32_t data) noexcept override{ - TI_NOT_IMPLEMENTED}; + int set_index = 0) noexcept final { + TI_NOT_IMPLEMENTED + }; + RhiResult bind_raster_resources(RasterResources *res) noexcept final { + TI_NOT_IMPLEMENTED + }; + void buffer_barrier(DevicePtr ptr, size_t size) noexcept override { + TI_NOT_IMPLEMENTED + }; + void buffer_barrier(DeviceAllocation alloc) noexcept override { + TI_NOT_IMPLEMENTED + }; + void memory_barrier() noexcept override { TI_NOT_IMPLEMENTED }; + void buffer_copy(DevicePtr dst, + DevicePtr src, + size_t size) noexcept override { + TI_NOT_IMPLEMENTED + }; + void buffer_fill(DevicePtr ptr, + size_t size, + uint32_t data) noexcept override { + TI_NOT_IMPLEMENTED + }; RhiResult dispatch(uint32_t x, uint32_t y = 1, - uint32_t z = 1) noexcept override{TI_NOT_IMPLEMENTED}; + uint32_t z = 1) noexcept override { + TI_NOT_IMPLEMENTED + }; }; class CudaStream : public Stream { public: - ~CudaStream() override{}; + ~CudaStream() override {}; - RhiResult new_command_list(CommandList **out_cmdlist) noexcept final{ - TI_NOT_IMPLEMENTED}; - StreamSemaphore submit(CommandList *cmdlist, - const std::vector &wait_semaphores = - {}) override{TI_NOT_IMPLEMENTED}; + RhiResult new_command_list(CommandList **out_cmdlist) noexcept final { + TI_NOT_IMPLEMENTED + }; + StreamSemaphore submit( + CommandList *cmdlist, + const std::vector &wait_semaphores = {}) override { + TI_NOT_IMPLEMENTED + }; StreamSemaphore submit_synced( CommandList *cmdlist, - const std::vector &wait_semaphores = {}) override{ - TI_NOT_IMPLEMENTED}; + const std::vector &wait_semaphores = {}) override { + TI_NOT_IMPLEMENTED + }; - void command_sync() override{TI_NOT_IMPLEMENTED}; + void command_sync() override { TI_NOT_IMPLEMENTED }; }; class CudaDevice : public LlvmDevice { @@ -84,7 +100,7 @@ class CudaDevice : public LlvmDevice { AllocInfo get_alloc_info(const DeviceAllocation handle); CudaDevice(); - ~CudaDevice() override{}; + ~CudaDevice() override {}; RhiResult allocate_memory(const AllocParams ¶ms, DeviceAllocation *out_devalloc) override; @@ -107,7 +123,7 @@ class CudaDevice : public LlvmDevice { int num_alloc = 1, const std::vector &wait_sema = {}) noexcept override; - ShaderResourceSet *create_resource_set() final{TI_NOT_IMPLEMENTED}; + ShaderResourceSet *create_resource_set() final { TI_NOT_IMPLEMENTED }; RhiResult create_pipeline(Pipeline **out_pipeline, const PipelineSourceDesc &src, @@ -121,7 +137,7 @@ class CudaDevice : public LlvmDevice { } RhiResult map(DeviceAllocation alloc, void **mapped_ptr) final; - void unmap(DevicePtr ptr) final{TI_NOT_IMPLEMENTED}; + void unmap(DevicePtr ptr) final { TI_NOT_IMPLEMENTED }; void unmap(DeviceAllocation alloc) final; void memcpy_internal(DevicePtr dst, DevicePtr src, uint64_t size) override; @@ -136,9 +152,9 @@ class CudaDevice : public LlvmDevice { return CUDAContext::get_instance().get_total_memory(); } - Stream *get_compute_stream() override{TI_NOT_IMPLEMENTED}; + Stream *get_compute_stream() override { TI_NOT_IMPLEMENTED }; - void wait_idle() override{TI_NOT_IMPLEMENTED}; + void wait_idle() override { TI_NOT_IMPLEMENTED }; void clear() override { allocations_.clear(); diff --git a/taichi/rhi/public_device.h b/taichi/rhi/public_device.h index 5bd7c3f2e0919..3ca1993321060 100644 --- a/taichi/rhi/public_device.h +++ b/taichi/rhi/public_device.h @@ -57,7 +57,9 @@ constexpr size_t kBufferSizeEntireSize = std::numeric_limits::max(); inline name operator&(name a, name b) { \ return static_cast(int(a) & int(b)); \ } \ - inline bool operator&&(name a, name b) { return (int(a) & int(b)) != 0; } + inline bool operator&&(name a, name b) { \ + return (int(a) & int(b)) != 0; \ + } enum class BlendOp : uint32_t { add, subtract, reverse_subtract, min, max }; @@ -622,7 +624,7 @@ class RHI_DLL_EXPORT Device { DeviceCapabilityConfig caps_{}; public: - virtual ~Device(){}; + virtual ~Device() {}; struct AllocParams { uint64_t size{0}; diff --git a/taichi/rhi/vulkan/vulkan_device.h b/taichi/rhi/vulkan/vulkan_device.h index 4e65a9790ae3d..aef638189bea7 100644 --- a/taichi/rhi/vulkan/vulkan_device.h +++ b/taichi/rhi/vulkan/vulkan_device.h @@ -322,8 +322,8 @@ class VulkanPipeline : public Pipeline { return graphics_pipeline_template_ != nullptr; } - std::unordered_map - &get_resource_set_templates() { + std::unordered_map & + get_resource_set_templates() { return set_templates_; } diff --git a/taichi/runtime/llvm/runtime_module/atomic.h b/taichi/runtime/llvm/runtime_module/atomic.h index 8dcb813288ff9..08b3d4d76ccf1 100644 --- a/taichi/runtime/llvm/runtime_module/atomic.h +++ b/taichi/runtime/llvm/runtime_module/atomic.h @@ -36,14 +36,20 @@ DEFINE_ATOMIC_OP_INTRINSIC(xor, i64) DEFINE_ATOMIC_OP_INTRINSIC(xor, u32) DEFINE_ATOMIC_OP_INTRINSIC(xor, u64) -#define DEFINE_ADD(T) \ - T add_##T(T a, T b) { return a + b; } +#define DEFINE_ADD(T) \ + T add_##T(T a, T b) { \ + return a + b; \ + } -#define DEFINE_MIN(T) \ - T min_##T(T a, T b) { return b > a ? a : b; } +#define DEFINE_MIN(T) \ + T min_##T(T a, T b) { \ + return b > a ? a : b; \ + } -#define DEFINE_MAX(T) \ - T max_##T(T a, T b) { return b < a ? a : b; } +#define DEFINE_MAX(T) \ + T max_##T(T a, T b) { \ + return b < a ? a : b; \ + } #define DEFINE_ATOMIC_OP_COMP_EXCH(OP, T) \ T atomic_##OP##_##T(volatile T *dest, T inc) { \ diff --git a/taichi/runtime/llvm/runtime_module/runtime.cpp b/taichi/runtime/llvm/runtime_module/runtime.cpp index 1e63bbeaf83d7..50cffcfcd6896 100644 --- a/taichi/runtime/llvm/runtime_module/runtime.cpp +++ b/taichi/runtime/llvm/runtime_module/runtime.cpp @@ -55,10 +55,16 @@ __asm__(".symver expf,expf@GLIBC_2.2.5"); #endif // For accessing struct fields -#define STRUCT_FIELD(S, F) \ - extern "C" decltype(S::F) S##_get_##F(S *s) { return s->F; } \ - extern "C" decltype(S::F) *S##_get_ptr_##F(S *s) { return &(s->F); } \ - extern "C" void S##_set_##F(S *s, decltype(S::F) f) { s->F = f; } +#define STRUCT_FIELD(S, F) \ + extern "C" decltype(S::F) S##_get_##F(S *s) { \ + return s->F; \ + } \ + extern "C" decltype(S::F) *S##_get_ptr_##F(S *s) { \ + return &(s->F); \ + } \ + extern "C" void S##_set_##F(S *s, decltype(S::F) f) { \ + s->F = f; \ + } #define STRUCT_FIELD_ARRAY(S, F) \ extern "C" std::remove_all_extents_t S##_get_##F(S *s, \ @@ -159,9 +165,13 @@ std::size_t taichi_strlen(const char *str) { return len; } -#define DEFINE_UNARY_REAL_FUNC(F) \ - f32 F##_f32(f32 x) { return std::F(x); } \ - f64 F##_f64(f64 x) { return std::F(x); } +#define DEFINE_UNARY_REAL_FUNC(F) \ + f32 F##_f32(f32 x) { \ + return std::F(x); \ + } \ + f64 F##_f64(f64 x) { \ + return std::F(x); \ + } DEFINE_UNARY_REAL_FUNC(exp) DEFINE_UNARY_REAL_FUNC(log) diff --git a/taichi/runtime/program_impls/vulkan/vulkan_program.cpp b/taichi/runtime/program_impls/vulkan/vulkan_program.cpp index 10dd7ece4c6c5..7fce9f5212a48 100644 --- a/taichi/runtime/program_impls/vulkan/vulkan_program.cpp +++ b/taichi/runtime/program_impls/vulkan/vulkan_program.cpp @@ -49,15 +49,15 @@ std::vector get_required_instance_extensions() { } std::vector get_required_device_extensions() { - static std::vector extensions { - VK_KHR_SWAPCHAIN_EXTENSION_NAME, + static std::vector extensions{ + VK_KHR_SWAPCHAIN_EXTENSION_NAME, #if TI_WITH_CUDA - // so that we can do cuda-vk interop - VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, + // so that we can do cuda-vk interop + VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, #ifdef _WIN64 - VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME, #else - VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME, #endif #endif // TI_WITH_CUDA }; diff --git a/taichi/system/benchmark.h b/taichi/system/benchmark.h index 058df3657f182..63f5e7c299ee4 100644 --- a/taichi/system/benchmark.h +++ b/taichi/system/benchmark.h @@ -17,11 +17,11 @@ class Benchmark : public Unit { int64 workload; bool returns_time; - virtual void setup(){}; + virtual void setup() {}; virtual void iterate() = 0; - virtual void finalize(){}; + virtual void finalize() {}; public: void initialize(const Config &config) override { diff --git a/taichi/ui/ggui/app_context.cpp b/taichi/ui/ggui/app_context.cpp index b1cf07a8330d6..36b110b445c3c 100644 --- a/taichi/ui/ggui/app_context.cpp +++ b/taichi/ui/ggui/app_context.cpp @@ -45,17 +45,17 @@ std::vector get_required_instance_extensions() { } std::vector get_required_device_extensions() { - static std::vector extensions { - VK_KHR_SWAPCHAIN_EXTENSION_NAME, + static std::vector extensions{ + VK_KHR_SWAPCHAIN_EXTENSION_NAME, #if !defined(ANDROID) - VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, - VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, + VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME, #ifdef _WIN64 - VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME, - VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME, + VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME, #else - VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME, - VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME, + VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, #endif #endif }; diff --git a/taichi/util/bit.h b/taichi/util/bit.h index 6d98026136f52..77b8a548e2ae3 100644 --- a/taichi/util/bit.h +++ b/taichi/util/bit.h @@ -78,9 +78,13 @@ constexpr int bit_length() { return std::is_same() ? 1 : sizeof(T) * 8; } -#define TI_BIT_FIELD(T, name, start) \ - T get_##name() const { return (T)Base::get()>(); } \ - void set_##name(const T &val) { Base::set()>(val); } +#define TI_BIT_FIELD(T, name, start) \ + T get_##name() const { \ + return (T)Base::get()>(); \ + } \ + void set_##name(const T &val) { \ + Base::set()>(val); \ + } template TI_FORCE_INLINE constexpr T product(const std::array arr) { diff --git a/tests/generate_compat_test_modules.py b/tests/generate_compat_test_modules.py index 51b265d5d7f64..c22590eb3a6d7 100644 --- a/tests/generate_compat_test_modules.py +++ b/tests/generate_compat_test_modules.py @@ -3,6 +3,7 @@ from this currently building one in development branch) for `run_c_api_compat_test.py` to consume. """ + import glob import os import pathlib diff --git a/tests/python/test_ssa.py b/tests/python/test_ssa.py index 8bfed0a2cd4a6..5be2d6daa3917 100644 --- a/tests/python/test_ssa.py +++ b/tests/python/test_ssa.py @@ -3,6 +3,7 @@ 1. Ensure working well when computation result is assigned to self. 2. Prevent duplicate-evaluation on expression with side-effect like random. """ + import math import numpy as np diff --git a/tests/python/test_test.py b/tests/python/test_test.py index d0b0fb4f50f0a..a81cb4c2e0906 100644 --- a/tests/python/test_test.py +++ b/tests/python/test_test.py @@ -3,6 +3,7 @@ TODO: Skips these tests after all tests are using @ti.test """ + import os import pytest diff --git a/tests/run_c_api_compat_test.py b/tests/run_c_api_compat_test.py index 725336b900701..32270a3d669b8 100644 --- a/tests/run_c_api_compat_test.py +++ b/tests/run_c_api_compat_test.py @@ -2,6 +2,7 @@ Ensure AOT modules compiled by old versions of Taichi is compatible with the latest Taichi Runtime. """ + import glob import os import subprocess