Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix non-structural Clippy warns #441

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl CognitoClient {
.group_name(group.to_string())
.send()
.await
.map_err(|e| anyhow::Error::new(e))
.map_err(anyhow::Error::new)
.map(|_x| ())
}
}
1 change: 0 additions & 1 deletion admin-event-handlers/src/auth/post-confirmation/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use aws_lambda_events::cognito::{
CognitoEventUserPoolsPostConfirmation,
CognitoEventUserPoolsPostConfirmationResponse as CognitoPostConfirmationResponse,
};
use cognito_idp_operations::CognitoClient;
use google_sheets_operations::SheetInterpretation;
use itertools::Itertools;
use lambda_runtime::{service_fn, Error, LambdaEvent};
Expand Down
11 changes: 5 additions & 6 deletions graphql/src/query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,11 @@ use dailp::{
use itertools::Itertools;

use {
dailp::async_graphql::{self, dataloader::DataLoader, Context, FieldResult, Guard, Object},
dailp::async_graphql::{self, dataloader::DataLoader, Context, FieldResult},
dailp::{
AnnotatedDoc, AnnotatedFormUpdate, CherokeeOrthography, Database, EditedCollection,
MorphemeId, MorphemeReference, MorphemeTag, ParagraphUpdate, WordsInDocument,
},
serde::{Deserialize, Serialize},
};

/// Home for all read-only queries
Expand Down Expand Up @@ -256,8 +255,8 @@ impl Query {
.into_group_map();

Ok(clusters
.into_iter()
.map(|(_, forms)| {
.into_values()
.map(|forms| {
let dates = forms.iter().filter_map(|f| f.date_recorded.as_ref());
let start = dates.clone().min();
let end = dates.max();
Expand Down Expand Up @@ -439,8 +438,8 @@ impl Mutation {
if comment_object.posted_by.id.0 != user.id.to_string() {
return Err("User attempted to edit another user's comment".into());
}

db.update_comment(comment).await;
// Note: We should probably handle an error here more gracefully.
let _ = db.update_comment(comment).await;

// We return the parent object, for GraphCache interop
return comment_object.parent(context).await;
Expand Down
2 changes: 1 addition & 1 deletion graphql/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ impl Endpoint<()> for AuthedEndpoint {
.and_then(|values| values.iter().next())
.and_then(
|value| match cognito::user_info_from_authorization(value.as_str()) {
Ok(value) => Some(value.into()),
Ok(value) => Some(value),
Err(err) => {
error!("{:?}", err);
None
Expand Down
10 changes: 3 additions & 7 deletions migration/src/audio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ impl DrsRes {
if val.is_none() {
return String::from("");
}
return val.unwrap().clone();
val.unwrap().clone()
}
}

Expand Down Expand Up @@ -238,9 +238,7 @@ impl AudioRes {

let cf_domain = std::env::var("CF_URL")?;
let s3_location = format!("https://{}", cf_domain);
let is_drs_key = |test_value: &str| -> bool {
return test_value.contains("neu");
};
let is_drs_key = |test_value: &str| -> bool { test_value.contains("neu") };
// FIXME: this could be refactored, esp. for annotations
return Ok(Self {
audio_url: if is_drs_key(audio_ref_key) {
Expand Down Expand Up @@ -313,9 +311,7 @@ impl AudioRes {
/// # Errors
/// Fails if annotation field does not meet structural requirements or is empty.
pub fn into_audio_slices(self) -> Option<Vec<AudioSlice>> {
if self.annotations.is_none() {
return None;
}
self.annotations.as_ref()?;
let mut result: Vec<AudioSlice> = vec![];
use csv::ReaderBuilder;
let parse = self.annotations.unwrap();
Expand Down
8 changes: 2 additions & 6 deletions migration/src/early_vocab.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ async fn parse_early_vocab(
// Convert the normalized source to simple phonetics.
normalized_source
.as_ref()
.map(|s| dailp::PhonemicString::parse_crg(&s).into_learner())
.map(|s| dailp::PhonemicString::parse_crg(s).into_learner())
});

let commentary = if has_notes {
Expand Down Expand Up @@ -177,11 +177,7 @@ async fn parse_early_vocab(
english_gloss: vec![gloss],
line_break: None,
page_break: None,
position: dailp::PositionInDocument::new(
doc_id.clone(),
page_number,
index as i64 + 1,
),
position: dailp::PositionInDocument::new(doc_id, page_number, index as i64 + 1),
date_recorded: meta.date.clone(),
id: None,
ingested_audio_track: None,
Expand Down
3 changes: 0 additions & 3 deletions migration/src/edited_collection.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
use crate::spreadsheets::SheetInterpretation;
use anyhow::Result;
use dailp::raw::CollectionChapter;
use dailp::raw::EditedCollection;
use dailp::Database;
use dailp::SheetResult;

Expand Down
10 changes: 5 additions & 5 deletions migration/src/lexical.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ async fn parse_numerals(
let _numeric = values.next()?;
let simple_phonetics = values.next()?;
let syllabary = values.next()?;
let position = PositionInDocument::new(doc_id.clone(), page_num, key);
let position = PositionInDocument::new(doc_id, page_num, key);
let segments = vec![WordSegment::new(root_dailp, gloss.clone(), None)];
Some(AnnotatedForm {
id: None,
Expand Down Expand Up @@ -314,15 +314,15 @@ fn parse_new_df1975(
.filter(|cols| cols.len() > 4 && !cols[2].is_empty())
.group_by(|columns| {
columns
.get(0)
.first()
.and_then(|s| s.split(",").next().unwrap().parse::<i64>().ok())
})
.into_iter()
.enumerate()
// The rest are relevant to the verb itself.
.filter_map(move |(index, (key, rows))| {
let rows: Vec<_> = rows.collect();
let columns = rows.get(0)?.clone();
let columns = rows.first()?.clone();

// The columns are as follows: key, page number, root, root gloss,
// translations 1, 2, 3, transitivity, UDB class, blank, surface forms.
Expand All @@ -338,7 +338,7 @@ fn parse_new_df1975(
.filter(|s| !s.is_empty())
.collect();
let date = Date::from_ymd(year, 1, 1);
let pos = PositionInDocument::new(doc_id.clone(), page_number, key);
let pos = PositionInDocument::new(doc_id, page_number, key);
let mut form_cells = rows
.into_iter()
.flat_map(|row| row.into_iter().skip(4 + translations + after_root));
Expand Down Expand Up @@ -429,7 +429,7 @@ async fn ingest_ac1995(db: &Database, sheet_id: &str) -> Result<()> {
let _romanized = row.next()?;
let normalized = row.next()?;
let translation = row.next()?;
let pos = PositionInDocument::new(meta.id.clone(), "1".to_owned(), index);
let pos = PositionInDocument::new(meta.id, "1".to_owned(), index);
Some(AnnotatedForm {
id: None,
simple_phonetics: Some(normalized),
Expand Down
62 changes: 26 additions & 36 deletions migration/src/spreadsheets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,16 @@ use dailp::collection::CollectionSection::Body;
use dailp::collection::CollectionSection::Credit;
use dailp::collection::CollectionSection::Intro;

use dailp::SheetResult;
use dailp::{
convert_udb, root_noun_surface_forms, root_verb_surface_forms, slugify_ltree, AnnotatedDoc,
AnnotatedForm, AnnotatedSeg, AudioSlice, Contributor, Database, Date, DocumentId,
DocumentMetadata, LexicalConnection, LineBreak, MorphemeId, PageBreak, Uuid, WordSegment,
AnnotatedForm, AnnotatedSeg, Contributor, Date, DocumentId, DocumentMetadata, LineBreak,
MorphemeId, WordSegment,
};
use dailp::{PositionInDocument, SourceAttribution};
use itertools::Itertools;
use log::{error, info, warn};
use log::info;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, fs::File, io::Write, time::Duration};
use tokio::time::sleep;
use std::{collections::HashMap, fs::File, io::Write};

// Define the delimiters used in spreadsheets for marking phrases, blocks,
// lines, and pages.
Expand Down Expand Up @@ -71,7 +69,7 @@ impl SheetInterpretation {
// This is, in fact, a file path.
let start = start + 3;
let (_, rest) = input.split_at(start);
let end = rest.find('/').unwrap_or_else(|| rest.len());
let end = rest.find('/').unwrap_or(rest.len());
rest.split_at(end).0
} else {
// This is probably already a bare ID. Anyway, we couldn't parse it.
Expand Down Expand Up @@ -118,7 +116,7 @@ impl SheetInterpretation {
let mut chapter_type = 0;
for cur_row in row {
if cur_row[0].is_empty() {
chapter_type = chapter_type + 1;
chapter_type += 1;
} else {
let mut row_values = cur_row.into_iter().peekable();

Expand Down Expand Up @@ -200,7 +198,7 @@ impl SheetInterpretation {
let page_number = root_values.next()?;
let mut form_values = root_values;
let date = Date::from_ymd(year, 1, 1);
let position = PositionInDocument::new(doc_id.clone(), page_number, key);
let position = PositionInDocument::new(doc_id, page_number, key);
Some(LexicalEntryWithForms {
forms: root_verb_surface_forms(
&position,
Expand Down Expand Up @@ -251,13 +249,13 @@ impl SheetInterpretation {
// First two rows are simply headers.
.skip(2)
.filter(|cols| cols.len() > 4 && !cols[2].is_empty())
.group_by(|cols| cols.get(0).and_then(|s| s.parse::<i64>().ok()))
.group_by(|cols| cols.first().and_then(|s| s.parse::<i64>().ok()))
.into_iter()
.enumerate()
// The rest are relevant to the noun itself.
.filter_map(|(index, (key, rows))| {
let rows: Vec<_> = rows.collect();
let columns = rows.get(0)?.clone();
let columns = rows.first()?.clone();
// The columns are as follows: key, root, root gloss, page ref,
// category, tags, surface forms.

Expand All @@ -277,7 +275,7 @@ impl SheetInterpretation {
.into_iter()
.flat_map(|row| row.into_iter().skip(4 + after_root));
let date = Date::from_ymd(year, 1, 1);
let position = PositionInDocument::new(doc_id.clone(), page_number?, index);
let position = PositionInDocument::new(doc_id, page_number?, index);
Some(LexicalEntryWithForms {
forms: root_noun_surface_forms(&position, &date, &mut form_values, has_comment),
entry: AnnotatedForm {
Expand Down Expand Up @@ -361,8 +359,7 @@ impl SheetInterpretation {
.next()
.ok_or_else(|| anyhow::format_err!("Missing image source row"))?
.into_iter()
.skip(1)
.next()
.nth(1)
.map(|src| src.to_ascii_lowercase());
let image_ids = values
.next()
Expand Down Expand Up @@ -561,32 +558,32 @@ impl<'a> AnnotatedLine {
let line_num = line_idx + 1;
let source_row = line
.rows
.get(0)
.expect(&format!("No source row for line {}", line_num));
.first()
.unwrap_or_else(|| panic!("No source row for line {}", line_num));
let simple_phonetics_row = line
.rows
.get(2)
.expect(&format!("No simple phonetics for line {}", line_num));
.unwrap_or_else(|| panic!("No simple phonetics for line {}", line_num));
let phonemic_row = line
.rows
.get(3)
.expect(&format!("No phonemic representation for line {}", line_num));
.unwrap_or_else(|| panic!("No phonemic representation for line {}", line_num));
let morpheme_row = line
.rows
.get(4)
.expect(&format!("No morphemic segmentation for line {}", line_num));
.unwrap_or_else(|| panic!("No morphemic segmentation for line {}", line_num));
let gloss_row = line
.rows
.get(5)
.expect(&format!("No morphemic gloss for line {}", line_num));
.unwrap_or_else(|| panic!("No morphemic gloss for line {}", line_num));
let translation_row = line
.rows
.get(6)
.expect(&format!("No translation for line {}", line_num));
.unwrap_or_else(|| panic!("No translation for line {}", line_num));
let commentary_row = line
.rows
.get(7)
.expect(&format!("No commentary for line {}", line_num));
.unwrap_or_else(|| panic!("No commentary for line {}", line_num));
// For each word, extract the necessary data from every row.
let words: Result<Vec<_>> = (0..num_words)
// Only use words with a syllabary source entry.
Expand All @@ -601,11 +598,7 @@ impl<'a> AnnotatedLine {
// TODO Extract into public function!
// id: format!("{}.{}", meta.id.0, word_index),
id: None,
position: PositionInDocument::new(
meta.id.clone(),
"1".to_owned(),
word_index,
),
position: PositionInDocument::new(meta.id, "1".to_owned(), word_index),
source: source_text.trim().replace(LINE_BREAK, ""),
normalized_source: None,
simple_phonetics: simple_phonetics_row
Expand All @@ -618,10 +611,7 @@ impl<'a> AnnotatedLine {
} else {
None
},
english_gloss: vec![translation]
.into_iter()
.filter_map(|x| x)
.collect(),
english_gloss: vec![translation].into_iter().flatten().collect(),
commentary: commentary_row.items.get(i).map(|x| x.to_owned()),
page_break: pb.map(|i| i as i32),
line_break: pb
Expand Down Expand Up @@ -670,7 +660,7 @@ impl<'a> AnnotatedLine {
let mut line_num = 0;
let mut page_num = 0;
let mut word_idx = 1;
let mut seg_idx = 1;
let seg_idx = 1;
let mut block_idx = 1;
let mut pages = vec![vec![vec![]]];

Expand All @@ -693,7 +683,7 @@ impl<'a> AnnotatedLine {
// Give the word an index within the whole document.
let word = AnnotatedForm {
position: PositionInDocument::new(
document_id.clone(),
*document_id,
(page_num + 1).to_string(),
word_idx,
),
Expand All @@ -708,7 +698,7 @@ impl<'a> AnnotatedLine {
line_num += 1;
}

let mut source = &word.source.trim()[..];
let mut source = word.source.trim();
// Check for the start of a block.
while source.starts_with(BLOCK_START) {
source = &source[1..];
Expand All @@ -731,7 +721,7 @@ impl<'a> AnnotatedLine {
source = &source[..source.len() - 1];
blocks_to_pop += 1;
}
let mut count_to_pop = 0;
let count_to_pop = 0;
// while source.ends_with(PHRASE_END) {
// source = &source[..source.len() - 1];
// count_to_pop += 1;
Expand All @@ -740,7 +730,7 @@ impl<'a> AnnotatedLine {
let finished_word = AnnotatedSeg::Word(AnnotatedForm {
source: source.to_owned(),
line_break: word.line_break.map(|_| line_num as i32),
page_break: word.page_break.map(|_| page_num as i32),
page_break: word.page_break.map(|_| page_num),
date_recorded: date.clone(),
..word
});
Expand Down
1 change: 0 additions & 1 deletion migration/src/tags.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use anyhow::Result;
use dailp::{AbstractMorphemeTag, Database, MorphemeTag, SheetResult, Uuid, WordSegmentRole};
use log::info;

/// Cherokee has many functional morphemes that are documented.
/// Pulls all the details we have about each morpheme from our spreadsheets,
Expand Down
2 changes: 1 addition & 1 deletion types/src/audio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use sqlx::types::Uuid;
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, async_graphql::NewType)]
pub struct DocumentAudioId(pub String);

// An ID for an audio slice
/// An ID for an audio slice
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, async_graphql::NewType)]
pub struct AudioSliceId(pub String);

Expand Down
Loading