Skip to content

Commit a0f8797

Browse files
committed
fix clippy issues
1 parent cea1a31 commit a0f8797

File tree

3 files changed

+26
-32
lines changed

3 files changed

+26
-32
lines changed

src/dict_management.rs

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,15 @@ use std::time::Duration;
88

99
use zstd::dict::{DecoderDictionary, EncoderDictionary};
1010

11+
type EncoderCache = LruCache<(usize, i32, i32), Arc<EncoderDictionary<'static>>>;
1112
// we cache the instantiated encoder dictionaries keyed by (DbConnection, dict_id, compression_level)
1213
// DbConnection would ideally be db.path() because it's the same for multiple connections to the same db, but that would be less robust (e.g. in-memory databases)
1314
// we use a Mutex and not a RwLock because even the .get() methods on LruCache need to write (to update expiry and least recently used time)
14-
static ENCODER_DICTS: LazyLock<
15-
Mutex<LruCache<(usize, i32, i32), Arc<EncoderDictionary<'static>>>>,
16-
> = LazyLock::new(|| Mutex::new(LruCache::with_expiry_duration(Duration::from_secs(10))));
15+
static ENCODER_DICTS: LazyLock<Mutex<EncoderCache>> =
16+
LazyLock::new(|| Mutex::new(LruCache::with_expiry_duration(Duration::from_secs(10))));
1717

18-
static DECODER_DICTS: LazyLock<Mutex<LruCache<(usize, i32), Arc<DecoderDictionary<'static>>>>> =
18+
type DecoderCache = LruCache<(usize, i32), Arc<DecoderDictionary<'static>>>;
19+
static DECODER_DICTS: LazyLock<Mutex<DecoderCache>> =
1920
LazyLock::new(|| Mutex::new(LruCache::with_expiry_duration(Duration::from_secs(10))));
2021

2122
/// when we open a new connection, it may reuse the same pointer location as an old connection, so we need to invalidate parts of the dict cache
@@ -60,7 +61,7 @@ pub fn encoder_dict_from_ctx(
6061
params![id],
6162
|r| r.get(0),
6263
)
63-
.with_context(|| format!("getting dict with id={} from _zstd_dicts", id))?;
64+
.with_context(|| format!("getting dict with id={id} from _zstd_dicts"))?;
6465
let dict = EncoderDictionary::copy(&dict_raw, level);
6566
Arc::new(dict)
6667
}),
@@ -98,7 +99,7 @@ pub fn decoder_dict_from_ctx(
9899
params![id],
99100
|r| r.get(0),
100101
)
101-
.with_context(|| format!("getting dict with id={} from _zstd_dicts", id))?;
102+
.with_context(|| format!("getting dict with id={id} from _zstd_dicts"))?;
102103
let dict = DecoderDictionary::copy(&dict_raw);
103104
Arc::new(dict)
104105
}),

src/transparent.rs

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -54,20 +54,20 @@ pub struct TransparentCompressConfig {
5454
/// Examples:
5555
///
5656
/// * `'a'`
57-
/// This will cause a single dictionary to be trained for everything.
57+
/// This will cause a single dictionary to be trained for everything.
5858
///
5959
/// * `strftime(created, '%Y-%m')`
60-
/// This will cause every month of data to be compressed with its own dictionary.
60+
/// This will cause every month of data to be compressed with its own dictionary.
6161
///
6262
/// * `nullif(strftime(created, '%Y-%m'), strftime('now', '%Y-%m'))`
6363
///
64-
/// The same as above, but if the entry is from the current month it will stay uncompressed.
65-
/// This is handy because it means that the dictionary for the month will only be created when the month is over
66-
/// and can thus be optimized the most for the given data
64+
/// The same as above, but if the entry is from the current month it will stay uncompressed.
65+
/// This is handy because it means that the dictionary for the month will only be created when the month is over
66+
/// and can thus be optimized the most for the given data
6767
/// * `case when date(timestamp, ''weekday 0'') < date(''now'', ''weekday 0'') then data_type || ''.'' || date(timestamp, ''weekday 0'') else null end`
6868
///
69-
/// This one uses keys like data_type.2020-11-01` where the date is the first day of the week, except for the current week which stays uncompressed.
70-
/// This means that every different data_type will be compressed separately and separately for each week.
69+
/// This one uses keys like data_type.2020-11-01` where the date is the first day of the week, except for the current week which stays uncompressed.
70+
/// This means that every different data_type will be compressed separately and separately for each week.
7171
///
7272
/// You can return the special string `[nodict]` to compress the given data without a dictionary.
7373
/// Note that the compression key is global for all tables. So if you want your dict to only apply to this table return
@@ -97,7 +97,7 @@ pub fn pretty_bytes(bytes: i64) -> String {
9797
} else if bytes >= 1_000 {
9898
format!("{:.2}kB", bytes as f64 / 1e3)
9999
} else {
100-
format!("{}B", bytes)
100+
format!("{bytes}B")
101101
}
102102
}
103103

@@ -161,13 +161,13 @@ pub fn zstd_enable_transparent<'a>(ctx: &Context) -> anyhow::Result<ToSqlOutput<
161161

162162
let config_str: String = ctx.get(arg_config)?;
163163
let config: TransparentCompressConfig = serde_json::from_str(&config_str)
164-
.with_context(|| format!("parsing json config '{}'", config_str))?;
164+
.with_context(|| format!("parsing json config '{config_str}'"))?;
165165
let db = &mut unsafe { ctx.get_connection()? };
166166
let db = db
167167
.unchecked_transaction()
168168
.context("Could not start transaction")?;
169169
let table_name = &config.table;
170-
let new_table_name = format!("_{}_zstd", table_name);
170+
let new_table_name = format!("_{table_name}_zstd");
171171

172172
let configs = get_configs(&db)?;
173173
let already_compressed_columns = configs
@@ -265,7 +265,7 @@ pub fn zstd_enable_transparent<'a>(ctx: &Context) -> anyhow::Result<ToSqlOutput<
265265
let to_compress_column = columns_info
266266
.iter()
267267
.find(|e| &e.name == column_name)
268-
.with_context(|| format!("Column {} does not exist in {}", column_name, table_name))?;
268+
.with_context(|| format!("Column {column_name} does not exist in {table_name}"))?;
269269
if to_compress_column.is_primary_key {
270270
anyhow::bail!(
271271
"Can't compress column {} since it is part of primary key (this could probably be supported, but currently isn't)",
@@ -287,7 +287,7 @@ pub fn zstd_enable_transparent<'a>(ctx: &Context) -> anyhow::Result<ToSqlOutput<
287287
// small sanity check of chooser statement
288288
db.query_row(&query, params![], |row| row.get::<_, String>(0))
289289
.optional()
290-
.with_context(|| format!("Tried to execute:\n{}", query))
290+
.with_context(|| format!("Tried to execute:\n{query}"))
291291
.context(r#"Dict chooser expression does not seem to be valid. Make sure you return a string and get your escaping right: If you want an sqlite string inside a json string inside a sqlite string you need to do '{"foo": "''bar''"}'"#)?;
292292
}
293293
{
@@ -376,7 +376,7 @@ pub fn zstd_enable_transparent<'a>(ctx: &Context) -> anyhow::Result<ToSqlOutput<
376376
}
377377

378378
fn get_dict_id(column_name: &str) -> String {
379-
format!("_{}_dict", column_name)
379+
format!("_{column_name}_dict")
380380
}
381381

382382
fn check_table_exists(db: &rusqlite::Connection, table_name: &str) -> bool {
@@ -496,7 +496,7 @@ fn create_insert_trigger(
496496
internal_table_name: &str,
497497
_config: &TransparentCompressConfig,
498498
) -> anyhow::Result<()> {
499-
let trigger_name = format!("{}_insert_trigger", table_name);
499+
let trigger_name = format!("{table_name}_insert_trigger");
500500

501501
// expressions that map backing table columns to view columns
502502
let mut insert_selection = vec![];
@@ -549,7 +549,7 @@ fn create_delete_trigger(
549549
internal_table_name: &str,
550550
primary_key_condition: &str,
551551
) -> anyhow::Result<()> {
552-
let trigger_name = format!("{}_delete_trigger", table_name);
552+
let trigger_name = format!("{table_name}_delete_trigger");
553553

554554
let deletetrigger_query = format!(
555555
"
@@ -839,7 +839,7 @@ fn maintenance_for_todo(
839839
":compact": COMPACT
840840
},
841841
)
842-
.with_context(|| format!("while compressing chunk for key {}", dict_choice))?;
842+
.with_context(|| format!("while compressing chunk for key {dict_choice}"))?;
843843

844844
total_updated += updated as i64;
845845
log::debug!("Compressed {} / {}", total_updated, todo.count);
@@ -985,7 +985,7 @@ mod tests {
985985
}
986986

987987
fn get_whole_table(db: &Connection, tbl_name: &str) -> anyhow::Result<Vec<Vec<Value>>> {
988-
let mut stmt = db.prepare(&format!("select * from {} ORDER BY id", tbl_name))?;
988+
let mut stmt = db.prepare(&format!("select * from {tbl_name} ORDER BY id"))?;
989989
let q1: Vec<Vec<Value>> = stmt
990990
.query_map(params![], |e| row_to_thong(e).map_err(ah))?
991991
.collect::<Result<_, rusqlite::Error>>()?;

src/util.rs

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ macro_rules! format_sqlite {
2525
}
2626

2727
pub fn ah(e: anyhow::Error) -> rusqlite::Error {
28-
rusqlite::Error::UserFunctionError(format!("{:?}", e).into())
28+
rusqlite::Error::UserFunctionError(format!("{e:?}").into())
2929
}
3030

3131
/*pub fn debug_row(r: &rusqlite::Row) {
@@ -61,17 +61,10 @@ pub fn escape_sqlite_identifier(identifier: &str) -> String {
6161
format!("`{}`", identifier.replace('`', "``"))
6262
}
6363

64-
/**
65-
* this is needed sometimes because _parameters are not allowed in views_, so using prepared statements is not possible :/
66-
*/
67-
/*pub fn escape_sqlite_string(string: &str) -> String {
68-
format!("'{}'", string.replace("'", "''"))
69-
}*/
70-
7164
pub fn init_logging(default_level: LevelFilter) {
7265
if std::env::var("SQLITE_ZSTD_LOG").is_err() {
7366
// TODO: Audit that the environment access only happens in single-threaded code.
74-
unsafe { std::env::set_var("SQLITE_ZSTD_LOG", format!("{}", default_level)) };
67+
unsafe { std::env::set_var("SQLITE_ZSTD_LOG", format!("{default_level}")) };
7568
}
7669
env_logger::try_init_from_env(env_logger::Env::new().filter("SQLITE_ZSTD_LOG")).ok();
7770
}

0 commit comments

Comments
 (0)