Skip to content

Commit

Permalink
Merge branch 'rust-ml:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
AndersonYin authored Oct 10, 2024
2 parents 8071fb1 + e1dfd46 commit 856e593
Show file tree
Hide file tree
Showing 10 changed files with 33 additions and 24 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/checking.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jobs:
fail-fast: false
matrix:
toolchain:
- 1.54.0
- 1.65.0
- stable
- nightly
os:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/codequality.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jobs:
strategy:
matrix:
toolchain:
- 1.54.0
- 1.65.0
- stable

steps:
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@ jobs:
fail-fast: false
matrix:
toolchain:
- 1.54.0
- 1.65.0
- stable
os:
- ubuntu-18.04
- windows-2019
- ubuntu-latest
- windows-latest

steps:
- name: Checkout sources
Expand Down
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Changelog

## [0.2.0] - 2024-08-28

### Changed

- Update ndarray to 0.16 along with associated dependencies
11 changes: 6 additions & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,27 +1,28 @@
[package]
name = "linfa-linalg"
version = "0.1.0"
version = "0.2.0"
edition = "2018"
authors = ["Yuhan Lin <yuhanliin@protonmail.com>"]
license = "MIT/Apache-2.0"
readme = "README.md"
description = "Pure-Rust implementation of linear algebra routines for ndarray"
repository = "https://github.com/rust-ml/linfa-linalg"
rust-version = "1.65"

keywords = ["ndarray", "matrix", "linalg"]
categories = ["algorithms", "mathematics", "science"]

[dependencies]
ndarray = { version = "0.15", features = ["approx"] }
ndarray = { version = "0.16", features = ["approx"] }
num-traits = "0.2.0"
thiserror = "1"
rand = { version = "0.8", optional=true }

[dev-dependencies]
approx = "0.4"
approx = "0.5"
proptest = "1.0"
proptest-derive = "0.2.0"
ndarray-rand = "0.14"
proptest-derive = "0.5.0"
ndarray-rand = "0.15"
rand_xoshiro = { version = "0.6" }

[features]
Expand Down
1 change: 1 addition & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
//! linker errors if no BLAS backend is specified.

#![allow(clippy::many_single_char_names)]
#![allow(clippy::result_large_err)]

pub mod bidiagonal;
pub mod cholesky;
Expand Down
18 changes: 9 additions & 9 deletions src/lobpcg/algorithm.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
//! Locally Optimal Block Preconditioned Conjugated
//!
//! This module implements the Locally Optimal Block Preconditioned Conjugated (LOBPCG) algorithm,
//! which can be used as a solver for large symmetric eigenproblems.
use ndarray::concatenate;
use ndarray::prelude::*;
use num_traits::NumCast;
///! Locally Optimal Block Preconditioned Conjugated
///!
///! This module implements the Locally Optimal Block Preconditioned Conjugated (LOBPCG) algorithm,
///which can be used as a solver for large symmetric eigenproblems.
use std::iter::Sum;

use crate::{cholesky::*, eigh::*, norm::*, triangular::*};
Expand Down Expand Up @@ -100,15 +100,15 @@ fn orthonormalize<T: NdFloat>(v: Array2<T>) -> Result<(Array2<T>, Array2<T>)> {
///
/// # Arguments
/// * `a` - An operator defining the problem, usually a sparse (sometimes also dense) matrix
/// multiplication. Also called the "stiffness matrix".
/// multiplication. Also called the "stiffness matrix".
/// * `x` - Initial approximation of the k eigenvectors. If `a` has shape=(n,n), then `x` should
/// have shape=(n,k).
/// have shape=(n,k).
/// * `m` - Preconditioner to `a`, by default the identity matrix. Should approximate the inverse
/// of `a`.
/// of `a`.
/// * `y` - Constraints of (n,size_y), iterations are performed in the orthogonal complement of the
/// column-space of `y`. It must be full rank.
/// column-space of `y`. It must be full rank.
/// * `tol` - The tolerance values defines at which point the solver stops the optimization. The approximation
/// of a eigenvalue stops when then l2-norm of the residual is below this threshold.
/// of a eigenvalue stops when then l2-norm of the residual is below this threshold.
/// * `maxiter` - The maximal number of iterations
/// * `order` - Whether to solve for the largest or lowest eigenvalues
///
Expand Down
2 changes: 1 addition & 1 deletion src/lobpcg/eig.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ impl<A: NdFloat + Sum, R: Rng> Iterator for TruncatedEigIterator<A, R> {
let eigvecs_arr: Vec<_> = constraints
.columns()
.into_iter()
.chain(eigvecs.columns().into_iter())
.chain(eigvecs.columns())
.collect();

stack(Axis(1), &eigvecs_arr).unwrap()
Expand Down
2 changes: 1 addition & 1 deletion src/lobpcg/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//! ```
//! where A is symmetric and (x, lambda) the solution. It has the following advantages:
//! * matrix free: does not require storing the coefficient matrix explicitely and only evaluates
//! matrix-vector products.
//! matrix-vector products.
//! * factorization-free: does not require any matrix decomposition
//! * linear-convergence: theoretically guaranteed and practically observed
//!
Expand Down
6 changes: 3 additions & 3 deletions src/lobpcg/svd.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
///! Truncated singular value decomposition
///!
///! This module computes the k largest/smallest singular values/vectors for a dense matrix.
//! Truncated singular value decomposition
//!
//! This module computes the k largest/smallest singular values/vectors for a dense matrix.
use crate::{
lobpcg::{lobpcg, random, Lobpcg},
Order, Result,
Expand Down

0 comments on commit 856e593

Please sign in to comment.