diff --git a/CHANGELOG.md b/CHANGELOG.md
index f2ebd3e..f8ad8c8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,54 @@
All notable changes to this project are documented in this file.
+## [2.0.0] - 2026-01-24
+
+### Major Release — Unified API
+
+This release completes the migration to a **single-module architecture**, providing a simpler, more intuitive API while maintaining all existing functionality.
+
+### Breaking Changes
+
+- **Module consolidation**: `str/core`, `str/extra`, and `str/tokenize` are now internal. Import `import str` instead.
+- **Removed deprecated public APIs** from `str/core`, `str/extra`, and `str/tokenize`.
+- **Type re-exports**: `SearchStrategy` and `FillPosition` types are now exported directly from `str`.
+
+### Added
+
+- **Unified entry point**: All functions are now accessible via `import str`.
+- **New `str/advanced` module** for power users who need fine-grained control:
+ - `build_kmp_maps/1` — Build KMP prefix/lookup maps for a pattern.
+ - `kmp_search_all_with_maps/3` — KMP search with pre-built maps.
+ - `kmp_index_of_with_maps/4` — KMP index with pre-built maps.
+ - `kmp_index_of/2`, `kmp_search_all/2` — Direct KMP algorithm access.
+ - `sliding_index_of/2`, `sliding_search_all/2` — Direct sliding-window access.
+ - `choose_search_strategy/2` — Expose the library's heuristic chooser.
+- **Alias functions** for clarity:
+ - `index_of_simple/2` — Explicit simple/stable implementation.
+ - `count_simple/3` — Explicit simple/stable implementation.
+- **Re-exported types** from main module:
+ - `str.SearchStrategy` with constructors `str.Kmp`, `str.Sliding`.
+ - `str.FillPosition` with constructors `str.Left`, `str.Right`, `str.Both`.
+
+### Changed
+
+- **Default search behavior**: `index_of/2` and `count/3` remain simple/stable implementations. Use `index_of_auto/2` and `count_auto/3` for heuristic-based auto-optimization.
+- **Internal restructure**: Implementation details moved to `str/internal/*` modules.
+- **Improved documentation**: Module-level docs with comprehensive examples.
+
+### Deprecated
+
+- **`str/core`, `str/extra`, `str/tokenize`**: These modules are now internal. All public APIs have been moved to `str` or `str/advanced`.
+
+### Notes
+
+- Zero OTP dependencies maintained.
+- Grapheme-aware behavior unchanged.
+
+Contributed by: Daniele (`lupodevelop`)
+
+---
+
## [1.3.0] - 2026-01-09
### Deprecated
- Deprecated public APIs in internal modules (`str/core`, `str/extra`, and `str/tokenize`) in
diff --git a/README.md b/README.md
index 765b690..136dd27 100644
--- a/README.md
+++ b/README.md
@@ -11,6 +11,7 @@
"
/// unescape_html("Tom & Jerry") -> "Tom & Jerry"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn unescape_html(text: String) -> String {
odysseus.unescape(text)
}
@@ -1860,7 +1788,6 @@ pub fn unescape_html(text: String) -> String {
/// escape_regex("[test]") -> "\\[test\\]"
/// escape_regex("a+b*c?") -> "a\\+b\\*c\\?"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn escape_regex(text: String) -> String {
text
|> string.replace("\\", "\\\\")
@@ -1891,7 +1818,6 @@ pub fn escape_regex(text: String) -> String {
/// similarity("abc", "xyz") -> 0.0
/// similarity("", "") -> 1.0
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn similarity(a: String, b: String) -> Float {
let a_len = grapheme_len(a)
let b_len = grapheme_len(b)
@@ -1918,7 +1844,6 @@ pub fn similarity(a: String, b: String) -> Float {
/// hamming_distance("hello", "hallo") -> Ok(1)
/// hamming_distance("abc", "ab") -> Error(Nil)
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn hamming_distance(a: String, b: String) -> Result(Int, Nil) {
let a_chars = string.to_graphemes(a)
let b_chars = string.to_graphemes(b)
@@ -1958,7 +1883,6 @@ pub type FillPosition {
/// fill("hi", 6, "*", Right) -> "hi****"
/// fill("x", 5, "-", Both) -> "--x--"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn fill(
text: String,
width: Int,
@@ -2206,12 +2130,10 @@ fn kmp_fallback_j(
}
/// Public wrappers accepting `String` inputs for easier testing.
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn build_prefix_table(pattern: String) -> List(Int) {
build_prefix_table_list(string.to_graphemes(pattern))
}
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn kmp_search_all(text: String, pattern: String) -> List(Int) {
kmp_search_all_list(string.to_graphemes(text), string.to_graphemes(pattern))
}
@@ -2220,7 +2142,6 @@ pub fn kmp_search_all(text: String, pattern: String) -> List(Int) {
/// searches. Returns a tuple `#(pmap, pimap)` where:
/// - `pmap` is a `Dict(Int, String)` mapping index -> pattern grapheme
/// - `pimap` is a `Dict(Int, Int)` mapping index -> prefix table value
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn build_kmp_maps(
pattern: String,
) -> #(dict.Dict(Int, String), dict.Dict(Int, Int)) {
@@ -2235,7 +2156,6 @@ pub fn build_kmp_maps(
/// KMP search using precomputed `pmap` and `pimap`. Useful when the same
/// pattern is searched against many texts to avoid rebuilding maps repeatedly.
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn kmp_search_all_with_maps(
text: String,
pmap: dict.Dict(Int, String),
@@ -2339,7 +2259,6 @@ fn sliding_search_all_list(
}
}
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn sliding_search_all(text: String, pattern: String) -> List(Int) {
sliding_search_all_list(
string.to_graphemes(text),
@@ -2388,7 +2307,6 @@ fn sliding_index_loop(
}
}
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn sliding_index_of(text: String, pattern: String) -> Result(Int, Nil) {
sliding_index_of_list(string.to_graphemes(text), string.to_graphemes(pattern))
}
@@ -2458,14 +2376,12 @@ fn kmp_index_loop(
}
}
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn kmp_index_of(text: String, pattern: String) -> Result(Int, Nil) {
kmp_index_of_list(string.to_graphemes(text), string.to_graphemes(pattern))
}
/// KMP index search using precomputed `pmap` and `pimap`. Useful for repeated
/// searches with the same pattern.
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn kmp_index_of_with_maps(
text: String,
pattern: String,
@@ -2554,7 +2470,6 @@ fn choose_search_strategy_list(
}
}
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn choose_search_strategy(text: String, pattern: String) -> SearchStrategy {
choose_search_strategy_list(
string.to_graphemes(text),
diff --git a/src/str/internal_decompose.gleam b/src/str/internal/decompose.gleam
similarity index 100%
rename from src/str/internal_decompose.gleam
rename to src/str/internal/decompose.gleam
diff --git a/src/str/extra.gleam b/src/str/internal/extra.gleam
similarity index 88%
rename from src/str/extra.gleam
rename to src/str/internal/extra.gleam
index cefb8f1..4a47ca9 100644
--- a/src/str/extra.gleam
+++ b/src/str/internal/extra.gleam
@@ -7,8 +7,8 @@
import gleam/list
import gleam/string
-import str/internal_decompose
-import str/internal_translit
+import str/internal/decompose
+import str/internal/translit
/// Core ASCII folding implementation with optional decomposition.
/// Applies replacement tables, optionally decomposes Latin chars and removes
@@ -24,7 +24,7 @@ import str/internal_translit
///
fn ascii_fold_full(s: String, decompose: Bool) -> String {
// Use the centralized replacement table from the internal module.
- let reps = internal_translit.replacements()
+ let reps = translit.replacements()
// Apply replacement table first (handles precomposed characters)
let replaced =
@@ -40,8 +40,8 @@ fn ascii_fold_full(s: String, decompose: Bool) -> String {
True -> {
let after_decompose =
replaced
- |> internal_decompose.decompose_latin
- |> internal_translit.remove_combining_marks
+ |> decompose.decompose_latin
+ |> translit.remove_combining_marks
// Second pass: catch precomposed chars that didn't match initially
// because they had combining marks attached (string.replace matches
@@ -62,7 +62,6 @@ fn ascii_fold_full(s: String, decompose: Bool) -> String {
/// ascii_fold("straße") -> "strasse"
/// ascii_fold("Crème Brûlée") -> "Creme Brulee"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn ascii_fold(s: String) -> String {
ascii_fold_full(s, True)
}
@@ -72,7 +71,6 @@ pub fn ascii_fold(s: String) -> String {
///
/// ascii_fold_no_decompose("café") -> "cafe"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn ascii_fold_no_decompose(s: String) -> String {
ascii_fold_full(s, False)
}
@@ -90,9 +88,8 @@ pub fn ascii_fold_no_decompose(s: String) -> String {
///
/// ascii_fold_with_normalizer("café", my_nfd) -> "cafe"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn ascii_fold_with_normalizer(s: String, normalizer) -> String {
- let reps = internal_translit.replacements()
+ let reps = translit.replacements()
let replaced =
list.fold(reps, s, fn(acc, pair) {
@@ -102,9 +99,9 @@ pub fn ascii_fold_with_normalizer(s: String, normalizer) -> String {
let after_normalize =
replaced
- |> internal_decompose.decompose_latin
+ |> decompose.decompose_latin
|> normalizer
- |> internal_translit.remove_combining_marks
+ |> translit.remove_combining_marks
// Second pass for chars that didn't match due to attached combining marks
list.fold(reps, after_normalize, fn(acc, pair) {
@@ -123,9 +120,8 @@ pub fn ascii_fold_with_normalizer(s: String, normalizer) -> String {
/// 2. Apply custom normalizer (may produce new precomposed chars)
/// 3. Apply replacement table again (catch newly composed chars)
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn ascii_fold_no_decompose_with_normalizer(s: String, normalizer) -> String {
- let reps = internal_translit.replacements()
+ let reps = translit.replacements()
// First pass: handle precomposed characters in the input
let replaced =
@@ -184,7 +180,6 @@ fn is_alnum_grapheme(g: String) -> Bool {
/// slugify("Hello, World!") -> "hello-world"
/// slugify("Café & Bar") -> "cafe-bar"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn slugify(s: String) -> String {
slugify_opts(s, -1, "-", False)
}
@@ -193,7 +188,6 @@ pub fn slugify(s: String) -> String {
///
/// slugify_with_normalizer("Café", my_nfd) -> "cafe"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn slugify_with_normalizer(s: String, normalizer) -> String {
slugify_opts_with_normalizer(s, -1, "-", False, normalizer)
}
@@ -202,7 +196,6 @@ pub fn slugify_with_normalizer(s: String, normalizer) -> String {
///
/// to_kebab_case("Hello World") -> "hello-world"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn to_kebab_case(s: String) -> String {
slugify(s)
}
@@ -214,7 +207,6 @@ pub fn to_kebab_case(s: String) -> String {
/// slugify_opts("one two three", 2, "-", False) -> "one-two"
/// slugify_opts("Hello World", -1, "_", False) -> "hello_world"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn slugify_opts(
s: String,
max_len: Int,
@@ -275,7 +267,6 @@ pub fn slugify_opts(
///
/// slugify_opts_with_normalizer("Crème Brûlée", 2, "-", False, my_nfd) -> "creme-brulee"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn slugify_opts_with_normalizer(
s: String,
max_len: Int,
@@ -337,7 +328,6 @@ pub fn slugify_opts_with_normalizer(
///
/// to_snake_case("Hello World") -> "hello_world"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn to_snake_case(s: String) -> String {
slugify(s) |> string.replace("-", "_")
}
@@ -348,7 +338,6 @@ pub fn to_snake_case(s: String) -> String {
/// to_camel_case("hello world") -> "helloWorld"
/// to_camel_case("get user by id") -> "getUserById"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn to_camel_case(s: String) -> String {
let parts = string.split(slugify(s), "-")
list.fold(parts, "", fn(acc, part) {
@@ -372,7 +361,6 @@ pub fn to_camel_case(s: String) -> String {
/// to_pascal_case("hello world") -> "HelloWorld"
/// to_pascal_case("get user by id") -> "GetUserById"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn to_pascal_case(s: String) -> String {
let parts = string.split(slugify(s), "-")
list.fold(parts, "", fn(acc, part) {
@@ -393,7 +381,6 @@ pub fn to_pascal_case(s: String) -> String {
/// to_title_case("get user by id") -> "Get User By Id"
/// to_title_case("café brûlée") -> "Cafe Brulee"
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn to_title_case(s: String) -> String {
let parts = string.split(slugify(s), "-")
let capitalized =
diff --git a/src/str/tokenize.gleam b/src/str/internal/tokenize.gleam
similarity index 94%
rename from src/str/tokenize.gleam
rename to src/str/internal/tokenize.gleam
index a354986..efc581a 100644
--- a/src/str/tokenize.gleam
+++ b/src/str/internal/tokenize.gleam
@@ -97,7 +97,6 @@ fn rec_build(cps, clusters, current_rev, pending) -> List(String) {
/// Example:
/// chars("café") -> ["c", "a", "f", "é"]
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn chars(text: String) -> List(String) {
let cps = string.to_utf_codepoints(text)
@@ -108,7 +107,6 @@ pub fn chars(text: String) -> List(String) {
///
/// chars_stdlib("café") -> ["c", "a", "f", "é"]
///
-@deprecated("Will be removed in str 2.0; prefer the unified `str` module when available")
pub fn chars_stdlib(text: String) -> List(String) {
string.to_graphemes(text)
}
diff --git a/src/str/internal_translit.gleam b/src/str/internal/translit.gleam
similarity index 100%
rename from src/str/internal_translit.gleam
rename to src/str/internal/translit.gleam
diff --git a/test/str_auto_test.gleam b/test/str_auto_test.gleam
index 6b7927d..e6e9643 100644
--- a/test/str_auto_test.gleam
+++ b/test/str_auto_test.gleam
@@ -1,5 +1,5 @@
import gleam/list
-import str/core
+import str
pub fn index_of_auto_matches_legacy_test() {
let cases = [
@@ -12,7 +12,7 @@ pub fn index_of_auto_matches_legacy_test() {
list.each(cases, fn(pair) {
let #(text, pat) = pair
- assert core.index_of_auto(text, pat) == core.index_of(text, pat)
+ assert str.index_of_auto(text, pat) == str.index_of(text, pat)
})
}
@@ -27,13 +27,13 @@ pub fn count_auto_matches_legacy_test() {
// overlapping True
list.each(cases, fn(pair) {
let #(text, pat) = pair
- assert core.count_auto(text, pat, True) == core.count(text, pat, True)
+ assert str.count_auto(text, pat, True) == str.count(text, pat, True)
})
// non-overlapping False
list.each(cases, fn(pair) {
let #(text, pat) = pair
- assert core.count_auto(text, pat, False) == core.count(text, pat, False)
+ assert str.count_auto(text, pat, False) == str.count(text, pat, False)
})
}
// helper removed: previously unused `repeat` function
diff --git a/test/str_combining_test.gleam b/test/str_combining_test.gleam
index ef6f510..aeffc2b 100644
--- a/test/str_combining_test.gleam
+++ b/test/str_combining_test.gleam
@@ -1,4 +1,4 @@
-import str/extra
+import str
// string module not required here
@@ -7,17 +7,17 @@ pub fn combining_ordering_test() {
let a = "a\u{0301}\u{0323}"
// a + acute + dot below
// Without decomposition, ascii_fold should remove combining marks
- assert extra.ascii_fold(a) == "a"
+ assert str.ascii_fold(a) == "a"
// Decomposed precombined letter with multiple marks (simulate)
let composed = "Å\u{0323}"
// Å plus dot below
- let dec = extra.ascii_fold(composed)
+ let dec = str.ascii_fold(composed)
assert dec == "A"
}
pub fn long_combining_sequence_test() {
// A base letter with several combining marks (tilde, acute, dot)
let s = "o\u{0303}\u{0301}\u{0323}"
- assert extra.ascii_fold(s) == "o"
+ assert str.ascii_fold(s) == "o"
}
diff --git a/test/str_config_test.gleam b/test/str_config_test.gleam
new file mode 100644
index 0000000..a7d6988
--- /dev/null
+++ b/test/str_config_test.gleam
@@ -0,0 +1,26 @@
+import gleam/list
+import str
+
+// removed unused import
+
+pub fn smart_search_default_test() {
+ assert str.smart_search_enabled() == False
+}
+
+fn make_repeat(s: String, n: Int) -> String {
+ list.fold(list.range(1, n), "", fn(acc, _) { acc <> s })
+}
+
+pub fn choose_strategy_min_pattern_test() {
+ let min = str.kmp_min_pattern_len()
+ // pattern of length `min` should prefer KMP
+ let pat = make_repeat("a", min)
+ let strategy = str.choose_search_strategy("some text", pat)
+ assert strategy == str.Kmp
+}
+
+pub fn choose_strategy_small_pattern_test() {
+ let pat = "a"
+ let strategy = str.choose_search_strategy("short", pat)
+ assert strategy == str.Sliding
+}
diff --git a/test/str_core_test.gleam b/test/str_core_test.gleam
index 7173dd3..ac9731b 100644
--- a/test/str_core_test.gleam
+++ b/test/str_core_test.gleam
@@ -1,288 +1,287 @@
import gleam/list
import gleam/string
-import str/core
-import str/tokenize
+import str
pub fn pad_and_center_tests() {
- assert core.pad_left("x", 3, "*") == "**x"
- assert core.pad_right("x", 3, "*") == "x**"
- assert core.center("ab", 5, "-") == "--ab-"
+ assert str.pad_left("x", 3, "*") == "**x"
+ assert str.pad_right("x", 3, "*") == "x**"
+ assert str.center("ab", 5, "-") == "--ab-"
// left bias
}
pub fn surround_and_unwrap_test() {
- let t = core.surround("hello", "
", "")
+ let t = str.surround("hello", "
", "")
assert t == "
hello"
- assert core.unwrap(t, "
", "") == "hello"
+ assert str.unwrap(t, "
", "") == "hello"
}
pub fn count_overlapping_tests() {
let s = "aaaa"
// overlapping: 'aa' occurs at positions 0,1,2 -> 3
- assert core.count(s, "aa", True) == 3
+ assert str.count(s, "aa", True) == 3
// non-overlapping: positions 0 and 2 -> 2
- assert core.count(s, "aa", False) == 2
+ assert str.count(s, "aa", False) == 2
}
pub fn reverse_and_tokenize_tests() {
let s = "a👩\u{200D}👩b"
- let r = core.reverse(s)
+ let r = str.reverse(s)
// reverse twice returns original
- assert core.reverse(r) == s
+ assert str.reverse(r) == s
// tokenizer returns grapheme clusters
- let t1 = tokenize.chars(s)
- let t2 = tokenize.chars_stdlib(s)
+ let t1 = str.chars(s)
+ let t2 = str.chars_stdlib(s)
assert list.length(t1) == list.length(t2)
}
pub fn truncate_preserve_emoji_test() {
let family = "👩\u{200D}👩\u{200D}👧 family"
// keep whole emoji cluster when truncating
- let out = core.truncate_preserve(family, 4, "…")
+ let out = str.truncate_preserve(family, 4, "…")
// ensure we didn't slice into an emoji cluster: output should contain the emoji or be shorter
assert string.contains(out, "👩") || string.length(out) <= 4
}
pub fn pad_noop_and_words_tests() {
// pad noop when width <= length
- assert core.pad_left("abcd", 2, "*") == "abcd"
+ assert str.pad_left("abcd", 2, "*") == "abcd"
// words splits various whitespace
- let w = core.words("a\n b\t c")
+ let w = str.words("a\n b\t c")
assert list.length(w) == 3
}
pub fn truncate_strict_suffix_only_test() {
// Not enough room for content: should return truncated suffix
- assert core.truncate_strict("hello", 1, "...") == "."
+ assert str.truncate_strict("hello", 1, "...") == "."
}
pub fn count_empty_needle_test() {
- assert core.count("abc", "", True) == 0
+ assert str.count("abc", "", True) == 0
}
pub fn surround_no_unwrap_test() {
// if prefix+suffix longer than text, unwrap should be no-op
let s = "x"
- let t = core.surround(s, "<<", ">>")
- assert core.unwrap(t, "<<<", ">>>") == t
+ let t = str.surround(s, "<<", ">>")
+ assert str.unwrap(t, "<<<", ">>>") == t
}
pub fn truncate_simple_test() {
let input = "Hello, world!"
- assert core.truncate_default(input, 5) == "He..."
+ assert str.truncate_default(input, 5) == "He..."
}
pub fn truncate_noop_test() {
let input = "Hi"
- assert core.truncate_default(input, 10) == "Hi"
+ assert str.truncate_default(input, 10) == "Hi"
}
pub fn truncate_unicode_grapheme_test() {
let input = "Hello 👩🏽⚕️ World"
- let out = core.truncate_default(input, 8)
+ let out = str.truncate_default(input, 8)
assert string.contains(out, "👩🏽⚕️")
}
pub fn reverse_basic_test() {
- assert core.reverse("abcde") == "edcba"
+ assert str.reverse("abcde") == "edcba"
}
pub fn reverse_combining_test() {
let e_accent = "e\u{0301}"
let input = "a" <> e_accent <> "b"
- let rev = core.reverse(input)
+ let rev = str.reverse(input)
assert rev == "b" <> e_accent <> "a"
}
pub fn reverse_involutive_test() {
let text = "👨👩👧👦abc"
- assert core.reverse(core.reverse(text)) == text
+ assert str.reverse(str.reverse(text)) == text
}
pub fn pad_left_ascii_test() {
- assert core.pad_left("a", 3, " ") == " a"
+ assert str.pad_left("a", 3, " ") == " a"
}
pub fn pad_right_ascii_test() {
- assert core.pad_right("a", 3, " ") == "a "
+ assert str.pad_right("a", 3, " ") == "a "
}
pub fn center_ascii_test() {
- assert core.center("a", 3, " ") == " a "
+ assert str.center("a", 3, " ") == " a "
}
pub fn center_left_bias_test() {
- assert core.center("hi", 5, " ") == " hi "
+ assert str.center("hi", 5, " ") == " hi "
}
pub fn pad_emoji_test() {
- let result = core.pad_left("x", 3, "😊")
+ let result = str.pad_left("x", 3, "😊")
assert string.contains(result, "😊")
}
pub fn count_non_overlapping_test() {
- assert core.count("aaaa", "aa", False) == 2
+ assert str.count("aaaa", "aa", False) == 2
}
pub fn count_emoji_test() {
let t = "👩👩👩"
- assert core.count(t, "👩", True) == 3
+ assert str.count(t, "👩", True) == 3
}
pub fn words_simple_test() {
let input = " Hello world \nThis\tis test "
let expect = ["Hello", "world", "This", "is", "test"]
- assert core.words(input) == expect
+ assert str.words(input) == expect
}
pub fn words_unicode_test() {
let input = "ciao mondo 😊"
let expect = ["ciao", "mondo", "😊"]
- assert core.words(input) == expect
+ assert str.words(input) == expect
}
pub fn words_empty_test() {
let input = " "
let expect: List(String) = []
- assert core.words(input) == expect
+ assert str.words(input) == expect
}
pub fn surround_basic_test() {
let s = "world"
- let w = core.surround(s, "Hello ", "!")
+ let w = str.surround(s, "Hello ", "!")
assert w == "Hello world!"
}
pub fn unwrap_basic_test() {
- assert core.unwrap("Hello world!", "Hello ", "!") == "world"
+ assert str.unwrap("Hello world!", "Hello ", "!") == "world"
}
pub fn unwrap_emoji_test() {
- let wrapped = core.surround("mid", "👩🏽⚕️ ", " 😊")
- assert core.unwrap(wrapped, "👩🏽⚕️ ", " 😊") == "mid"
+ let wrapped = str.surround("mid", "👩🏽⚕️ ", " 😊")
+ assert str.unwrap(wrapped, "👩🏽⚕️ ", " 😊") == "mid"
}
pub fn unwrap_missing_prefix_test() {
- assert core.unwrap("hello", "<", ">") == "hello"
+ assert str.unwrap("hello", "<", ">") == "hello"
}
pub fn truncate_preserve_vs_strict_emoji_test() {
let input = "Hello 👩🏽⚕️ World"
- let preserve = core.truncate_preserve(input, 8, "...")
+ let preserve = str.truncate_preserve(input, 8, "...")
assert string.contains(preserve, "👩🏽⚕️")
- let strict = core.truncate_strict(input, 8, "...")
+ let strict = str.truncate_strict(input, 8, "...")
assert !string.contains(strict, "👩🏽⚕️")
}
pub fn truncate_preserve_flag_test() {
let input = "Hello 🇮🇹 World"
- let out = core.truncate_preserve(input, 8, "...")
+ let out = str.truncate_preserve(input, 8, "...")
assert string.contains(out, "🇮🇹")
}
pub fn truncate_strict_flag_test() {
let input = "Hello 🇮🇹 World"
- let out = core.truncate_strict(input, 8, "...")
+ let out = str.truncate_strict(input, 8, "...")
assert !string.contains(out, "🇮🇹")
}
pub fn truncate_keycap_test() {
let input = "Num 1️⃣ test"
- let out = core.truncate_preserve(input, 6, "...")
+ let out = str.truncate_preserve(input, 6, "...")
assert string.contains(out, "1️⃣")
}
pub fn count_keycap_test() {
let k = "1️⃣1️⃣1️⃣"
- assert core.count(k, "1️⃣", True) == 3
+ assert str.count(k, "1️⃣", True) == 3
}
pub fn pad_multigrapheme_test() {
let pad = "🙂👍"
- let out = core.pad_left("x", 3, pad)
+ let out = str.pad_left("x", 3, pad)
assert out == pad <> pad <> "x"
}
pub fn center_multigrapheme_test() {
- let centered = core.center("x", 5, "ab")
+ let centered = str.center("x", 5, "ab")
assert centered == "ababxabab"
}
pub fn tokenize_simple_emoji_test() {
let s1 = "😊"
- let c1 = tokenize.chars(s1)
+ let c1 = str.chars(s1)
assert list.length(c1) == 1
}
pub fn tokenize_family_emoji_test() {
let family = "👨👩👧👦"
- let cf = tokenize.chars(family)
+ let cf = str.chars(family)
assert list.length(cf) == 1
}
pub fn tokenize_skin_tone_test() {
let thumbs = "👍🏿"
- let ct = tokenize.chars(thumbs)
+ let ct = str.chars(thumbs)
assert list.length(ct) == 1
}
pub fn truncate_suffix_longer_than_max_test() {
- let res = core.truncate("abcd", 2, "!!!")
+ let res = str.truncate("abcd", 2, "!!!")
assert res == "!!"
}
pub fn reverse_zwj_test() {
let family = "👨👩👧👦"
- assert core.reverse(family) == family
+ assert str.reverse(family) == family
}
pub fn combined_pad_count_test() {
let base = "aba"
- let padded = core.pad_left(base, 5, "-")
- assert list.length(tokenize.chars(padded)) == 5
+ let padded = str.pad_left(base, 5, "-")
+ assert list.length(str.chars(padded)) == 5
let doubled = base <> base
- assert core.count(doubled, "a", True) == 4
+ assert str.count(doubled, "a", True) == 4
}
pub fn pad_right_noop_test() {
- assert core.pad_right("hello", 3, "*") == "hello"
+ assert str.pad_right("hello", 3, "*") == "hello"
}
pub fn center_even_width_test() {
- assert core.center("ab", 6, " ") == " ab "
+ assert str.center("ab", 6, " ") == " ab "
}
pub fn is_blank_empty_test() {
- assert core.is_blank("") == True
+ assert str.is_blank("") == True
}
pub fn is_blank_spaces_test() {
- assert core.is_blank(" ") == True
+ assert str.is_blank(" ") == True
}
pub fn is_blank_tabs_newlines_test() {
- assert core.is_blank("\t\n\r") == True
+ assert str.is_blank("\t\n\r") == True
}
pub fn is_blank_mixed_whitespace_test() {
- assert core.is_blank(" \t\n ") == True
+ assert str.is_blank(" \t\n ") == True
}
pub fn is_blank_with_content_test() {
- assert core.is_blank(" hello ") == False
+ assert str.is_blank(" hello ") == False
}
pub fn is_blank_unicode_spaces_test() {
// Non-breaking space is NOT treated as whitespace by Gleam's string.trim
// This is consistent with Erlang/BEAM behavior
- assert core.is_blank("\u{00A0}") == False
+ assert str.is_blank("\u{00A0}") == False
}
pub fn is_blank_single_char_test() {
- assert core.is_blank("x") == False
+ assert str.is_blank("x") == False
}
// ============================================================================
@@ -292,329 +291,329 @@ pub fn is_blank_single_char_test() {
// --- take/drop/at tests ---
pub fn take_basic_test() {
- assert core.take("hello", 3) == "hel"
+ assert str.take("hello", 3) == "hel"
}
pub fn take_emoji_test() {
- assert core.take("👨👩👧👦abc", 2) == "👨👩👧👦a"
+ assert str.take("👨👩👧👦abc", 2) == "👨👩👧👦a"
}
pub fn take_exceeds_length_test() {
- assert core.take("hi", 10) == "hi"
+ assert str.take("hi", 10) == "hi"
}
pub fn take_zero_test() {
- assert core.take("hello", 0) == ""
+ assert str.take("hello", 0) == ""
}
pub fn drop_basic_test() {
- assert core.drop("hello", 2) == "llo"
+ assert str.drop("hello", 2) == "llo"
}
pub fn drop_emoji_test() {
- assert core.drop("👨👩👧👦abc", 1) == "abc"
+ assert str.drop("👨👩👧👦abc", 1) == "abc"
}
pub fn drop_exceeds_length_test() {
- assert core.drop("hi", 10) == ""
+ assert str.drop("hi", 10) == ""
}
pub fn drop_zero_test() {
- assert core.drop("hello", 0) == "hello"
+ assert str.drop("hello", 0) == "hello"
}
pub fn at_basic_test() {
- assert core.at("hello", 1) == Ok("e")
+ assert str.at("hello", 1) == Ok("e")
}
pub fn at_emoji_test() {
- assert core.at("👨👩👧👦abc", 0) == Ok("👨👩👧👦")
+ assert str.at("👨👩👧👦abc", 0) == Ok("👨👩👧👦")
}
pub fn at_out_of_bounds_test() {
- assert core.at("hi", 10) == Error(Nil)
+ assert str.at("hi", 10) == Error(Nil)
}
pub fn at_negative_test() {
- assert core.at("hello", -1) == Error(Nil)
+ assert str.at("hello", -1) == Error(Nil)
}
// --- lines/dedent/indent tests ---
pub fn lines_basic_test() {
- assert core.lines("a\nb\nc") == ["a", "b", "c"]
+ assert str.lines("a\nb\nc") == ["a", "b", "c"]
}
pub fn lines_crlf_test() {
- assert core.lines("a\r\nb\r\nc") == ["a", "b", "c"]
+ assert str.lines("a\r\nb\r\nc") == ["a", "b", "c"]
}
pub fn lines_single_test() {
- assert core.lines("hello") == ["hello"]
+ assert str.lines("hello") == ["hello"]
}
pub fn dedent_basic_test() {
- assert core.dedent(" a\n b\n c") == "a\nb\nc"
+ assert str.dedent(" a\n b\n c") == "a\nb\nc"
}
pub fn dedent_mixed_indent_test() {
- assert core.dedent(" hello\n world") == "hello\nworld"
+ assert str.dedent(" hello\n world") == "hello\nworld"
}
pub fn dedent_no_indent_test() {
- assert core.dedent("hello\nworld") == "hello\nworld"
+ assert str.dedent("hello\nworld") == "hello\nworld"
}
pub fn indent_basic_test() {
- assert core.indent("hello\nworld", 2) == " hello\n world"
+ assert str.indent("hello\nworld", 2) == " hello\n world"
}
pub fn indent_single_line_test() {
- assert core.indent("hi", 4) == " hi"
+ assert str.indent("hi", 4) == " hi"
}
// --- wrap_at/ellipsis tests ---
pub fn wrap_at_basic_test() {
- let result = core.wrap_at("hello world foo bar", 11)
+ let result = str.wrap_at("hello world foo bar", 11)
assert string.contains(result, "\n")
}
pub fn wrap_at_no_wrap_needed_test() {
- assert core.wrap_at("hello", 100) == "hello"
+ assert str.wrap_at("hello", 100) == "hello"
}
pub fn wrap_at_zero_width_test() {
- assert core.wrap_at("hello", 0) == "hello"
+ assert str.wrap_at("hello", 0) == "hello"
}
pub fn wrap_at_emoji_grapheme_test() {
// Ensure grapheme-aware wrapping treats emoji as single units
let s = "a 👨👩👧👦 b"
// Width 2 should force a newline between "a" and the emoji
- assert string.contains(core.wrap_at(s, 2), "\n")
+ assert string.contains(str.wrap_at(s, 2), "\n")
}
pub fn grapple_len_behavior_test() {
// Verify grapheme-aware counting on representative cases
- assert core.length("") == 0
- assert core.length("abc") == 3
+ assert str.length("") == 0
+ assert str.length("abc") == 3
// a + combining acute accent should be one grapheme
- assert core.length("a\u{0301}") == 1
+ assert str.length("a\u{0301}") == 1
// Regional indicator flag (two codepoints) is a single grapheme
- assert core.length("🇮🇹") == 1
+ assert str.length("🇮🇹") == 1
// Family ZWJ sequence should be one grapheme cluster
- assert core.length("👨👩👧👦") == 1
+ assert str.length("👨👩👧👦") == 1
// Stress: long ASCII string should return its length
let long = list.fold(list.range(1, 1000), "", fn(acc, _) { acc <> "x" })
- assert core.length(long) == 1000
+ assert str.length(long) == 1000
}
pub fn ellipsis_basic_test() {
- let result = core.ellipsis("Hello World", 8)
+ let result = str.ellipsis("Hello World", 8)
assert string.ends_with(result, "…")
}
// --- strip/squeeze/chomp tests ---
pub fn strip_basic_test() {
- assert core.strip("..hello..", ".") == "hello"
+ assert str.strip("..hello..", ".") == "hello"
}
pub fn strip_multiple_chars_test() {
- assert core.strip("xxhelloxx", "x") == "hello"
+ assert str.strip("xxhelloxx", "x") == "hello"
}
pub fn strip_no_match_test() {
- assert core.strip("hello", "x") == "hello"
+ assert str.strip("hello", "x") == "hello"
}
pub fn squeeze_basic_test() {
- assert core.squeeze("heeello", "e") == "hello"
+ assert str.squeeze("heeello", "e") == "hello"
}
pub fn squeeze_spaces_test() {
- assert core.squeeze(" hello world ", " ") == " hello world "
+ assert str.squeeze(" hello world ", " ") == " hello world "
}
pub fn squeeze_no_consecutive_test() {
- assert core.squeeze("hello", "l") == "helo"
+ assert str.squeeze("hello", "l") == "helo"
}
pub fn chomp_newline_test() {
- assert core.chomp("hello\n") == "hello"
+ assert str.chomp("hello\n") == "hello"
}
pub fn chomp_crlf_test() {
- assert core.chomp("hello\r\n") == "hello"
- assert core.chomp("hi\r\n") == "hi"
+ assert str.chomp("hello\r\n") == "hello"
+ assert str.chomp("hi\r\n") == "hi"
}
pub fn chomp_no_newline_test() {
- assert core.chomp("hello") == "hello"
+ assert str.chomp("hello") == "hello"
}
// --- partition tests ---
pub fn partition_basic_test() {
- assert core.partition("a-b-c", "-") == #("a", "-", "b-c")
+ assert str.partition("a-b-c", "-") == #("a", "-", "b-c")
}
pub fn partition_no_match_test() {
- assert core.partition("hello", "-") == #("hello", "", "")
+ assert str.partition("hello", "-") == #("hello", "", "")
}
// --- common_prefix/suffix tests ---
pub fn common_prefix_basic_test() {
- assert core.common_prefix(["abc", "abd", "abe"]) == "ab"
+ assert str.common_prefix(["abc", "abd", "abe"]) == "ab"
}
pub fn common_prefix_no_common_test() {
- assert core.common_prefix(["hello", "world"]) == ""
+ assert str.common_prefix(["hello", "world"]) == ""
}
pub fn common_prefix_empty_list_test() {
- assert core.common_prefix([]) == ""
+ assert str.common_prefix([]) == ""
}
pub fn common_prefix_single_test() {
- assert core.common_prefix(["hello"]) == "hello"
+ assert str.common_prefix(["hello"]) == "hello"
}
pub fn common_suffix_basic_test() {
- assert core.common_suffix(["abc", "xbc", "zbc"]) == "bc"
+ assert str.common_suffix(["abc", "xbc", "zbc"]) == "bc"
}
pub fn common_suffix_no_common_test() {
- assert core.common_suffix(["hello", "world"]) == ""
+ assert str.common_suffix(["hello", "world"]) == ""
}
// --- is_numeric/alpha/alphanumeric tests ---
pub fn is_numeric_true_test() {
- assert core.is_numeric("12345") == True
+ assert str.is_numeric("12345") == True
}
pub fn is_numeric_false_test() {
- assert core.is_numeric("123.45") == False
+ assert str.is_numeric("123.45") == False
}
pub fn is_numeric_empty_test() {
- assert core.is_numeric("") == False
+ assert str.is_numeric("") == False
}
pub fn is_alpha_true_test() {
- assert core.is_alpha("hello") == True
+ assert str.is_alpha("hello") == True
}
pub fn is_alpha_mixed_case_test() {
- assert core.is_alpha("HeLLo") == True
+ assert str.is_alpha("HeLLo") == True
}
pub fn is_alpha_with_numbers_test() {
- assert core.is_alpha("hello123") == False
+ assert str.is_alpha("hello123") == False
}
pub fn is_alpha_empty_test() {
- assert core.is_alpha("") == False
+ assert str.is_alpha("") == False
}
pub fn is_alphanumeric_true_test() {
- assert core.is_alphanumeric("hello123") == True
+ assert str.is_alphanumeric("hello123") == True
}
pub fn is_alphanumeric_with_special_test() {
- assert core.is_alphanumeric("hello-world") == False
+ assert str.is_alphanumeric("hello-world") == False
}
pub fn is_alphanumeric_empty_test() {
- assert core.is_alphanumeric("") == False
+ assert str.is_alphanumeric("") == False
}
// --- remove/ensure prefix/suffix tests ---
pub fn remove_prefix_present_test() {
- assert core.remove_prefix("hello world", "hello ") == "world"
+ assert str.remove_prefix("hello world", "hello ") == "world"
}
pub fn remove_prefix_absent_test() {
- assert core.remove_prefix("hello", "bye") == "hello"
+ assert str.remove_prefix("hello", "bye") == "hello"
}
pub fn remove_suffix_present_test() {
- assert core.remove_suffix("hello world", " world") == "hello"
+ assert str.remove_suffix("hello world", " world") == "hello"
}
pub fn remove_suffix_absent_test() {
- assert core.remove_suffix("hello", "bye") == "hello"
+ assert str.remove_suffix("hello", "bye") == "hello"
}
pub fn ensure_prefix_absent_test() {
- assert core.ensure_prefix("world", "hello ") == "hello world"
+ assert str.ensure_prefix("world", "hello ") == "hello world"
}
pub fn ensure_prefix_present_test() {
- assert core.ensure_prefix("hello world", "hello ") == "hello world"
+ assert str.ensure_prefix("hello world", "hello ") == "hello world"
}
pub fn remove_prefix_emoji_test() {
- assert core.remove_prefix("👨👩👧👦 family", "👨👩👧👦") == " family"
+ assert str.remove_prefix("👨👩👧👦 family", "👨👩👧👦") == " family"
}
pub fn remove_suffix_emoji_test() {
- assert core.remove_suffix("family 👨👩👧👦", "👨👩👧👦") == "family "
+ assert str.remove_suffix("family 👨👩👧👦", "👨👩👧👦") == "family "
}
pub fn ensure_prefix_emoji_test() {
- assert core.ensure_prefix("family", "👨👩👧👦 ") == "👨👩👧👦 family"
- assert core.ensure_prefix("👨👩👧👦 family", "👨👩👧👦 ") == "👨👩👧👦 family"
+ assert str.ensure_prefix("family", "👨👩👧👦 ") == "👨👩👧👦 family"
+ assert str.ensure_prefix("👨👩👧👦 family", "👨👩👧👦 ") == "👨👩👧👦 family"
}
pub fn ensure_suffix_emoji_test() {
- assert core.ensure_suffix("family", " 👨👩👧👦") == "family 👨👩👧👦"
- assert core.ensure_suffix("family 👨👩👧👦", " 👨👩👧👦") == "family 👨👩👧👦"
+ assert str.ensure_suffix("family", " 👨👩👧👦") == "family 👨👩👧👦"
+ assert str.ensure_suffix("family 👨👩👧👦", " 👨👩👧👦") == "family 👨👩👧👦"
}
pub fn ensure_suffix_absent_test() {
- assert core.ensure_suffix("hello", " world") == "hello world"
+ assert str.ensure_suffix("hello", " world") == "hello world"
}
pub fn ensure_suffix_present_test() {
- assert core.ensure_suffix("hello world", " world") == "hello world"
+ assert str.ensure_suffix("hello world", " world") == "hello world"
}
// --- swapcase tests ---
pub fn swapcase_basic_test() {
- assert core.swapcase("Hello World") == "hELLO wORLD"
+ assert str.swapcase("Hello World") == "hELLO wORLD"
}
pub fn swapcase_all_upper_test() {
- assert core.swapcase("ABC") == "abc"
+ assert str.swapcase("ABC") == "abc"
}
pub fn swapcase_all_lower_test() {
- assert core.swapcase("abc") == "ABC"
+ assert str.swapcase("abc") == "ABC"
}
// --- distance tests ---
pub fn distance_same_test() {
- assert core.distance("hello", "hello") == 0
+ assert str.distance("hello", "hello") == 0
}
pub fn distance_empty_test() {
- assert core.distance("", "abc") == 3
+ assert str.distance("", "abc") == 3
}
pub fn distance_kitten_sitting_test() {
- assert core.distance("kitten", "sitting") == 3
+ assert str.distance("kitten", "sitting") == 3
}
pub fn distance_single_char_test() {
- assert core.distance("a", "b") == 1
+ assert str.distance("a", "b") == 1
}
// ============================================================================
@@ -624,361 +623,361 @@ pub fn distance_single_char_test() {
// --- index_of tests ---
pub fn index_of_basic_test() {
- assert core.index_of("hello world", "world") == Ok(6)
+ assert str.index_of("hello world", "world") == Ok(6)
}
pub fn index_of_not_found_test() {
- assert core.index_of("hello", "x") == Error(Nil)
+ assert str.index_of("hello", "x") == Error(Nil)
}
pub fn index_of_emoji_test() {
- assert core.index_of("👨👩👧👦 family", "family") == Ok(2)
+ assert str.index_of("👨👩👧👦 family", "family") == Ok(2)
}
pub fn index_of_start_test() {
- assert core.index_of("hello", "hello") == Ok(0)
+ assert str.index_of("hello", "hello") == Ok(0)
}
pub fn index_of_empty_needle_test() {
- assert core.index_of("hello", "") == Error(Nil)
+ assert str.index_of("hello", "") == Error(Nil)
}
// --- last_index_of tests ---
pub fn last_index_of_basic_test() {
- assert core.last_index_of("hello hello", "hello") == Ok(6)
+ assert str.last_index_of("hello hello", "hello") == Ok(6)
}
pub fn last_index_of_not_found_test() {
- assert core.last_index_of("hello", "x") == Error(Nil)
+ assert str.last_index_of("hello", "x") == Error(Nil)
}
pub fn last_index_of_single_test() {
- assert core.last_index_of("hello", "hello") == Ok(0)
+ assert str.last_index_of("hello", "hello") == Ok(0)
}
pub fn last_index_of_separator_test() {
- assert core.last_index_of("a-b-c", "-") == Ok(3)
+ assert str.last_index_of("a-b-c", "-") == Ok(3)
}
// --- contains_any tests ---
pub fn contains_any_found_test() {
- assert core.contains_any("hello world", ["foo", "world"]) == True
+ assert str.contains_any("hello world", ["foo", "world"]) == True
}
pub fn contains_any_not_found_test() {
- assert core.contains_any("hello", ["x", "y", "z"]) == False
+ assert str.contains_any("hello", ["x", "y", "z"]) == False
}
pub fn contains_any_empty_list_test() {
- assert core.contains_any("test", []) == False
+ assert str.contains_any("test", []) == False
}
// --- contains_all tests ---
pub fn contains_all_true_test() {
- assert core.contains_all("hello world", ["hello", "world"]) == True
+ assert str.contains_all("hello world", ["hello", "world"]) == True
}
pub fn contains_all_false_test() {
- assert core.contains_all("hello", ["hello", "x"]) == False
+ assert str.contains_all("hello", ["hello", "x"]) == False
}
pub fn contains_all_empty_list_test() {
- assert core.contains_all("test", []) == True
+ assert str.contains_all("test", []) == True
}
// --- replace_first tests ---
pub fn replace_first_basic_test() {
- assert core.replace_first("hello hello", "hello", "hi") == "hi hello"
+ assert str.replace_first("hello hello", "hello", "hi") == "hi hello"
}
pub fn replace_first_single_test() {
- assert core.replace_first("aaa", "a", "b") == "baa"
+ assert str.replace_first("aaa", "a", "b") == "baa"
}
pub fn replace_first_not_found_test() {
- assert core.replace_first("test", "x", "y") == "test"
+ assert str.replace_first("test", "x", "y") == "test"
}
// --- replace_last tests ---
pub fn replace_last_basic_test() {
- assert core.replace_last("hello hello", "hello", "hi") == "hello hi"
+ assert str.replace_last("hello hello", "hello", "hi") == "hello hi"
}
pub fn replace_last_single_test() {
- assert core.replace_last("aaa", "a", "b") == "aab"
+ assert str.replace_last("aaa", "a", "b") == "aab"
}
pub fn replace_last_not_found_test() {
- assert core.replace_last("test", "x", "y") == "test"
+ assert str.replace_last("test", "x", "y") == "test"
}
// --- is_uppercase tests ---
pub fn is_uppercase_true_test() {
- assert core.is_uppercase("HELLO") == True
+ assert str.is_uppercase("HELLO") == True
}
pub fn is_uppercase_false_test() {
- assert core.is_uppercase("Hello") == False
+ assert str.is_uppercase("Hello") == False
}
pub fn is_uppercase_with_numbers_test() {
- assert core.is_uppercase("HELLO123") == True
+ assert str.is_uppercase("HELLO123") == True
}
pub fn is_uppercase_only_numbers_test() {
- assert core.is_uppercase("123") == False
+ assert str.is_uppercase("123") == False
}
pub fn is_uppercase_empty_test() {
- assert core.is_uppercase("") == False
+ assert str.is_uppercase("") == False
}
// --- is_lowercase tests ---
pub fn is_lowercase_true_test() {
- assert core.is_lowercase("hello") == True
+ assert str.is_lowercase("hello") == True
}
pub fn is_lowercase_false_test() {
- assert core.is_lowercase("Hello") == False
+ assert str.is_lowercase("Hello") == False
}
pub fn is_lowercase_with_numbers_test() {
- assert core.is_lowercase("hello123") == True
+ assert str.is_lowercase("hello123") == True
}
pub fn is_lowercase_only_numbers_test() {
- assert core.is_lowercase("123") == False
+ assert str.is_lowercase("123") == False
}
pub fn is_lowercase_empty_test() {
- assert core.is_lowercase("") == False
+ assert str.is_lowercase("") == False
}
// --- is_ascii tests ---
pub fn is_ascii_true_test() {
- assert core.is_ascii("hello") == True
+ assert str.is_ascii("hello") == True
}
pub fn is_ascii_with_symbols_test() {
- assert core.is_ascii("hello!@#") == True
+ assert str.is_ascii("hello!@#") == True
}
pub fn is_ascii_false_test() {
- assert core.is_ascii("café") == False
+ assert str.is_ascii("café") == False
}
pub fn is_ascii_emoji_test() {
- assert core.is_ascii("👋") == False
+ assert str.is_ascii("👋") == False
}
pub fn is_ascii_empty_test() {
- assert core.is_ascii("") == True
+ assert str.is_ascii("") == True
}
// --- is_printable tests ---
pub fn is_printable_true_test() {
- assert core.is_printable("hello") == True
+ assert str.is_printable("hello") == True
}
pub fn is_printable_newline_test() {
- assert core.is_printable("hello\n") == False
+ assert str.is_printable("hello\n") == False
}
pub fn is_printable_tab_test() {
- assert core.is_printable("hello\t") == False
+ assert str.is_printable("hello\t") == False
}
pub fn is_printable_empty_test() {
- assert core.is_printable("") == True
+ assert str.is_printable("") == True
}
// --- is_hex tests ---
pub fn is_hex_lowercase_test() {
- assert core.is_hex("abc123") == True
+ assert str.is_hex("abc123") == True
}
pub fn is_hex_uppercase_test() {
- assert core.is_hex("DEADBEEF") == True
+ assert str.is_hex("DEADBEEF") == True
}
pub fn is_hex_invalid_test() {
- assert core.is_hex("xyz") == False
+ assert str.is_hex("xyz") == False
}
pub fn is_hex_empty_test() {
- assert core.is_hex("") == False
+ assert str.is_hex("") == False
}
// --- escape_html tests ---
pub fn escape_html_tags_test() {
- assert core.escape_html("
Hello
") == "<div>Hello</div>"
+ assert str.escape_html("
Hello
") == "<div>Hello</div>"
}
pub fn escape_html_ampersand_test() {
- assert core.escape_html("Tom & Jerry") == "Tom & Jerry"
+ assert str.escape_html("Tom & Jerry") == "Tom & Jerry"
}
pub fn escape_html_quotes_test() {
- assert core.escape_html("Say \"hello\"") == "Say "hello""
+ assert str.escape_html("Say \"hello\"") == "Say "hello""
}
pub fn escape_html_single_quote_test() {
- assert core.escape_html("It's") == "It's"
+ assert str.escape_html("It's") == "It's"
}
// --- unescape_html tests ---
pub fn unescape_html_tags_test() {
- assert core.unescape_html("<div>") == "
"
+ assert str.unescape_html("<div>") == "
"
}
pub fn unescape_html_ampersand_test() {
- assert core.unescape_html("Tom & Jerry") == "Tom & Jerry"
+ assert str.unescape_html("Tom & Jerry") == "Tom & Jerry"
}
pub fn unescape_html_quotes_test() {
- assert core.unescape_html("Say "hello"") == "Say \"hello\""
+ assert str.unescape_html("Say "hello"") == "Say \"hello\""
}
// --- escape_regex tests ---
pub fn escape_regex_dot_test() {
- assert core.escape_regex("hello.world") == "hello\\.world"
+ assert str.escape_regex("hello.world") == "hello\\.world"
}
pub fn escape_regex_brackets_test() {
- assert core.escape_regex("[test]") == "\\[test\\]"
+ assert str.escape_regex("[test]") == "\\[test\\]"
}
pub fn escape_regex_quantifiers_test() {
- assert core.escape_regex("a+b*c?") == "a\\+b\\*c\\?"
+ assert str.escape_regex("a+b*c?") == "a\\+b\\*c\\?"
}
pub fn escape_regex_anchors_test() {
- assert core.escape_regex("^start$end") == "\\^start\\$end"
+ assert str.escape_regex("^start$end") == "\\^start\\$end"
}
// --- similarity tests ---
pub fn similarity_identical_test() {
- let result = core.similarity("hello", "hello")
+ let result = str.similarity("hello", "hello")
assert result == 1.0
}
pub fn similarity_one_diff_test() {
- let result = core.similarity("hello", "hallo")
+ let result = str.similarity("hello", "hallo")
assert result == 0.8
}
pub fn similarity_totally_diff_test() {
- let result = core.similarity("abc", "xyz")
+ let result = str.similarity("abc", "xyz")
assert result == 0.0
}
pub fn similarity_empty_test() {
- let result = core.similarity("", "")
+ let result = str.similarity("", "")
assert result == 1.0
}
// --- hamming_distance tests ---
pub fn hamming_distance_basic_test() {
- assert core.hamming_distance("karolin", "kathrin") == Ok(3)
+ assert str.hamming_distance("karolin", "kathrin") == Ok(3)
}
pub fn hamming_distance_one_diff_test() {
- assert core.hamming_distance("hello", "hallo") == Ok(1)
+ assert str.hamming_distance("hello", "hallo") == Ok(1)
}
pub fn hamming_distance_diff_length_test() {
- assert core.hamming_distance("abc", "ab") == Error(Nil)
+ assert str.hamming_distance("abc", "ab") == Error(Nil)
}
pub fn hamming_distance_same_test() {
- assert core.hamming_distance("abc", "abc") == Ok(0)
+ assert str.hamming_distance("abc", "abc") == Ok(0)
}
// --- take_right tests ---
pub fn take_right_basic_test() {
- assert core.take_right("hello", 3) == "llo"
+ assert str.take_right("hello", 3) == "llo"
}
pub fn take_right_emoji_test() {
- assert core.take_right("👨👩👧👦abc", 2) == "bc"
+ assert str.take_right("👨👩👧👦abc", 2) == "bc"
}
pub fn take_right_exceeds_test() {
- assert core.take_right("hi", 10) == "hi"
+ assert str.take_right("hi", 10) == "hi"
}
pub fn take_right_zero_test() {
- assert core.take_right("hello", 0) == ""
+ assert str.take_right("hello", 0) == ""
}
// --- drop_right tests ---
pub fn drop_right_basic_test() {
- assert core.drop_right("hello", 2) == "hel"
+ assert str.drop_right("hello", 2) == "hel"
}
pub fn drop_right_emoji_test() {
- assert core.drop_right("👨👩👧👦abc", 2) == "👨👩👧👦a"
+ assert str.drop_right("👨👩👧👦abc", 2) == "👨👩👧👦a"
}
pub fn drop_right_exceeds_test() {
- assert core.drop_right("hi", 10) == ""
+ assert str.drop_right("hi", 10) == ""
}
pub fn drop_right_zero_test() {
- assert core.drop_right("hello", 0) == "hello"
+ assert str.drop_right("hello", 0) == "hello"
}
// --- reverse_words tests ---
pub fn reverse_words_basic_test() {
- assert core.reverse_words("hello world") == "world hello"
+ assert str.reverse_words("hello world") == "world hello"
}
pub fn reverse_words_three_test() {
- assert core.reverse_words("one two three") == "three two one"
+ assert str.reverse_words("one two three") == "three two one"
}
pub fn reverse_words_single_test() {
- assert core.reverse_words("single") == "single"
+ assert str.reverse_words("single") == "single"
}
pub fn reverse_words_empty_test() {
- assert core.reverse_words("") == ""
+ assert str.reverse_words("") == ""
}
// --- initials tests ---
pub fn initials_basic_test() {
- assert core.initials("John Doe") == "JD"
+ assert str.initials("John Doe") == "JD"
}
pub fn initials_lowercase_test() {
- assert core.initials("visual studio code") == "VSC"
+ assert str.initials("visual studio code") == "VSC"
}
pub fn initials_single_word_test() {
- assert core.initials("hello") == "H"
+ assert str.initials("hello") == "H"
}
pub fn initials_empty_test() {
- assert core.initials("") == ""
+ assert str.initials("") == ""
}
// ============================================================================
@@ -986,28 +985,28 @@ pub fn initials_empty_test() {
// ============================================================================
pub fn capitalize_basic_test() {
- assert core.capitalize("hello") == "Hello"
+ assert str.capitalize("hello") == "Hello"
}
pub fn capitalize_mixed_case_test() {
- assert core.capitalize("hELLO wORLD") == "Hello world"
+ assert str.capitalize("hELLO wORLD") == "Hello world"
}
pub fn capitalize_empty_test() {
- assert core.capitalize("") == ""
+ assert str.capitalize("") == ""
}
pub fn capitalize_single_char_test() {
- assert core.capitalize("a") == "A"
+ assert str.capitalize("a") == "A"
}
pub fn capitalize_already_capitalized_test() {
- assert core.capitalize("Hello") == "Hello"
+ assert str.capitalize("Hello") == "Hello"
}
pub fn capitalize_emoji_prefix_test() {
// Emoji at start: should uppercase emoji (no-op) and lowercase rest
- assert core.capitalize("👋 HELLO") == "👋 hello"
+ assert str.capitalize("👋 HELLO") == "👋 hello"
}
// ============================================================================
@@ -1015,28 +1014,27 @@ pub fn capitalize_emoji_prefix_test() {
// ============================================================================
pub fn rpartition_basic_test() {
- assert core.rpartition("a-b-c", "-") == #("a-b", "-", "c")
+ assert str.rpartition("a-b-c", "-") == #("a-b", "-", "c")
}
pub fn rpartition_not_found_test() {
- assert core.rpartition("hello", "-") == #("", "", "hello")
+ assert str.rpartition("hello", "-") == #("", "", "hello")
}
pub fn rpartition_multi_char_sep_test() {
- assert core.rpartition("one::two::three", "::")
- == #("one::two", "::", "three")
+ assert str.rpartition("one::two::three", "::") == #("one::two", "::", "three")
}
pub fn rpartition_single_occurrence_test() {
- assert core.rpartition("hello-world", "-") == #("hello", "-", "world")
+ assert str.rpartition("hello-world", "-") == #("hello", "-", "world")
}
pub fn rpartition_at_start_test() {
- assert core.rpartition("-hello", "-") == #("", "-", "hello")
+ assert str.rpartition("-hello", "-") == #("", "-", "hello")
}
pub fn rpartition_at_end_test() {
- assert core.rpartition("hello-", "-") == #("hello", "-", "")
+ assert str.rpartition("hello-", "-") == #("hello", "-", "")
}
// ============================================================================
@@ -1044,27 +1042,27 @@ pub fn rpartition_at_end_test() {
// ============================================================================
pub fn splitn_basic_test() {
- assert core.splitn("a-b-c-d", "-", 2) == ["a", "b-c-d"]
+ assert str.splitn("a-b-c-d", "-", 2) == ["a", "b-c-d"]
}
pub fn splitn_three_test() {
- assert core.splitn("a-b-c-d", "-", 3) == ["a", "b", "c-d"]
+ assert str.splitn("a-b-c-d", "-", 3) == ["a", "b", "c-d"]
}
pub fn splitn_no_sep_test() {
- assert core.splitn("hello", "-", 5) == ["hello"]
+ assert str.splitn("hello", "-", 5) == ["hello"]
}
pub fn splitn_zero_test() {
- assert core.splitn("a-b-c", "-", 0) == []
+ assert str.splitn("a-b-c", "-", 0) == []
}
pub fn splitn_one_test() {
- assert core.splitn("a-b-c", "-", 1) == ["a-b-c"]
+ assert str.splitn("a-b-c", "-", 1) == ["a-b-c"]
}
pub fn splitn_exceeds_parts_test() {
- assert core.splitn("a-b", "-", 10) == ["a", "b"]
+ assert str.splitn("a-b", "-", 10) == ["a", "b"]
}
// ============================================================================
@@ -1072,35 +1070,35 @@ pub fn splitn_exceeds_parts_test() {
// ============================================================================
pub fn is_title_case_true_test() {
- assert core.is_title_case("Hello World") == True
+ assert str.is_title_case("Hello World") == True
}
pub fn is_title_case_false_lowercase_second_test() {
- assert core.is_title_case("Hello world") == False
+ assert str.is_title_case("Hello world") == False
}
pub fn is_title_case_all_caps_test() {
- assert core.is_title_case("HELLO WORLD") == False
+ assert str.is_title_case("HELLO WORLD") == False
}
pub fn is_title_case_single_word_test() {
- assert core.is_title_case("Hello") == True
+ assert str.is_title_case("Hello") == True
}
pub fn is_title_case_empty_test() {
- assert core.is_title_case("") == False
+ assert str.is_title_case("") == False
}
pub fn is_title_case_with_numbers_test() {
- assert core.is_title_case("Hello 123 World") == True
+ assert str.is_title_case("Hello 123 World") == True
}
pub fn is_title_case_with_emoji_test() {
- assert core.is_title_case("Hello 👋 World") == True
- assert core.is_title_case("👋 Hello World") == True
- assert core.is_title_case("Hello World 🎉") == True
+ assert str.is_title_case("Hello 👋 World") == True
+ assert str.is_title_case("👋 Hello World") == True
+ assert str.is_title_case("Hello World 🎉") == True
// Only emoji - no cased words, should be False
- assert core.is_title_case("🎉 👋 🌍") == False
+ assert str.is_title_case("🎉 👋 🌍") == False
}
// ============================================================================
@@ -1108,15 +1106,15 @@ pub fn is_title_case_with_emoji_test() {
// ============================================================================
pub fn fill_left_test() {
- assert core.fill("42", 5, "0", core.Left) == "00042"
+ assert str.fill("42", 5, "0", str.Left) == "00042"
}
pub fn fill_right_test() {
- assert core.fill("hi", 6, "*", core.Right) == "hi****"
+ assert str.fill("hi", 6, "*", str.Right) == "hi****"
}
pub fn fill_both_test() {
- assert core.fill("x", 5, "-", core.Both) == "--x--"
+ assert str.fill("x", 5, "-", str.Both) == "--x--"
}
// ============================================================================
@@ -1124,27 +1122,27 @@ pub fn fill_both_test() {
// ============================================================================
pub fn chunk_basic_test() {
- assert core.chunk("abcdefg", 2) == ["ab", "cd", "ef", "g"]
+ assert str.chunk("abcdefg", 2) == ["ab", "cd", "ef", "g"]
}
pub fn chunk_three_test() {
- assert core.chunk("hello", 3) == ["hel", "lo"]
+ assert str.chunk("hello", 3) == ["hel", "lo"]
}
pub fn chunk_emoji_test() {
- assert core.chunk("👨👩👧👦abc", 2) == ["👨👩👧👦a", "bc"]
+ assert str.chunk("👨👩👧👦abc", 2) == ["👨👩👧👦a", "bc"]
}
pub fn chunk_larger_than_text_test() {
- assert core.chunk("hi", 10) == ["hi"]
+ assert str.chunk("hi", 10) == ["hi"]
}
pub fn chunk_zero_size_test() {
- assert core.chunk("hello", 0) == []
+ assert str.chunk("hello", 0) == []
}
pub fn chunk_single_test() {
- assert core.chunk("abc", 1) == ["a", "b", "c"]
+ assert str.chunk("abc", 1) == ["a", "b", "c"]
}
// ============================================================================
@@ -1152,19 +1150,19 @@ pub fn chunk_single_test() {
// ============================================================================
pub fn starts_with_any_true_test() {
- assert core.starts_with_any("hello", ["hi", "he", "ho"]) == True
+ assert str.starts_with_any("hello", ["hi", "he", "ho"]) == True
}
pub fn starts_with_any_false_test() {
- assert core.starts_with_any("hello", ["bye", "world"]) == False
+ assert str.starts_with_any("hello", ["bye", "world"]) == False
}
pub fn starts_with_any_empty_list_test() {
- assert core.starts_with_any("test", []) == False
+ assert str.starts_with_any("test", []) == False
}
pub fn starts_with_any_exact_match_test() {
- assert core.starts_with_any("hello", ["hello"]) == True
+ assert str.starts_with_any("hello", ["hello"]) == True
}
// ============================================================================
@@ -1172,19 +1170,19 @@ pub fn starts_with_any_exact_match_test() {
// ============================================================================
pub fn ends_with_any_true_test() {
- assert core.ends_with_any("hello.txt", [".txt", ".md", ".gleam"]) == True
+ assert str.ends_with_any("hello.txt", [".txt", ".md", ".gleam"]) == True
}
pub fn ends_with_any_false_test() {
- assert core.ends_with_any("hello", ["bye", "world"]) == False
+ assert str.ends_with_any("hello", ["bye", "world"]) == False
}
pub fn ends_with_any_empty_list_test() {
- assert core.ends_with_any("test", []) == False
+ assert str.ends_with_any("test", []) == False
}
pub fn ends_with_any_exact_match_test() {
- assert core.ends_with_any("hello", ["hello"]) == True
+ assert str.ends_with_any("hello", ["hello"]) == True
}
// ============================================================================
@@ -1192,25 +1190,25 @@ pub fn ends_with_any_exact_match_test() {
// ============================================================================
pub fn normalize_whitespace_basic_test() {
- assert core.normalize_whitespace("hello world") == "hello world"
+ assert str.normalize_whitespace("hello world") == "hello world"
}
pub fn normalize_whitespace_tabs_newlines_test() {
- assert core.normalize_whitespace("a\t\nb") == "a b"
+ assert str.normalize_whitespace("a\t\nb") == "a b"
}
pub fn normalize_whitespace_leading_trailing_test() {
- assert core.normalize_whitespace(" foo bar baz ") == "foo bar baz"
+ assert str.normalize_whitespace(" foo bar baz ") == "foo bar baz"
}
pub fn normalize_whitespace_single_word_test() {
- assert core.normalize_whitespace("hello") == "hello"
+ assert str.normalize_whitespace("hello") == "hello"
}
pub fn normalize_whitespace_empty_test() {
- assert core.normalize_whitespace("") == ""
+ assert str.normalize_whitespace("") == ""
}
pub fn normalize_whitespace_only_spaces_test() {
- assert core.normalize_whitespace(" ") == ""
+ assert str.normalize_whitespace(" ") == ""
}
diff --git a/test/str_corpus_test.gleam b/test/str_corpus_test.gleam
index a030084..351c573 100644
--- a/test/str_corpus_test.gleam
+++ b/test/str_corpus_test.gleam
@@ -1,28 +1,28 @@
-import str/extra
+import str
// internal_decompose not needed directly here; folding covers decomposed inputs
pub fn corpus_french_test() {
- assert extra.ascii_fold("Élévation déjà") == "Elevation deja"
+ assert str.ascii_fold("Élévation déjà") == "Elevation deja"
}
pub fn corpus_polish_test() {
- assert extra.ascii_fold("Żywiec Łódź Gdańsk") == "Zywiec Lodz Gdansk"
+ assert str.ascii_fold("Żywiec Łódź Gdańsk") == "Zywiec Lodz Gdansk"
}
pub fn corpus_czech_test() {
- assert extra.ascii_fold("Příliš žluťoučký kůň") == "Prilis zlutoucky kun"
+ assert str.ascii_fold("Příliš žluťoučký kůň") == "Prilis zlutoucky kun"
}
pub fn corpus_slovak_test() {
// note: our folding is pragmatic; test expected approximations
- assert extra.ascii_fold("Žltý kôň Ťažký") == "Zlty kon Tazky"
+ assert str.ascii_fold("Žltý kôň Ťažký") == "Zlty kon Tazky"
}
pub fn corpus_lithuanian_test() {
- assert extra.ascii_fold("Žemėlapis ėė ąč") == "Zemelapis ee ac"
+ assert str.ascii_fold("Žemėlapis ėė ąč") == "Zemelapis ee ac"
}
pub fn corpus_latvian_test() {
- assert extra.ascii_fold("Ķekava Ēriks Ūdens") == "Kekava Eriks Udens"
+ assert str.ascii_fold("Ķekava Ēriks Ūdens") == "Kekava Eriks Udens"
}
diff --git a/test/str_extra_full_test.gleam b/test/str_extra_full_test.gleam
index 2ed0fd2..591aa38 100644
--- a/test/str_extra_full_test.gleam
+++ b/test/str_extra_full_test.gleam
@@ -1,57 +1,56 @@
-import str/core
-import str/extra
-import str/internal_decompose
+import str
+import str/internal/decompose
// ASCII fold basic mappings (precomposed)
pub fn ascii_fold_basic_test() {
- assert extra.ascii_fold("Å") == "A"
- assert extra.ascii_fold("æ") == "ae"
- assert extra.ascii_fold("œ") == "oe"
- assert extra.ascii_fold("ß") == "ss"
- assert extra.ascii_fold("Ł") == "L"
+ assert str.ascii_fold("Å") == "A"
+ assert str.ascii_fold("æ") == "ae"
+ assert str.ascii_fold("œ") == "oe"
+ assert str.ascii_fold("ß") == "ss"
+ assert str.ascii_fold("Ł") == "L"
}
// Decomposed input handling: decomposer expands and ascii_fold removes marks
pub fn ascii_fold_decomposed_test() {
- let dec = internal_decompose.decompose_latin("Å")
+ let dec = decompose.decompose_latin("Å")
// decomposed should contain combining ring
assert dec != "Å"
// ascii_fold should normalize decomposed to base
- assert extra.ascii_fold(dec) == "A"
+ assert str.ascii_fold(dec) == "A"
// ascii_fold_no_decompose should leave decomposed sequence intact
- assert extra.ascii_fold_no_decompose(dec) == dec
+ assert str.ascii_fold_no_decompose(dec) == dec
}
// slugify behavior with preserve_unicode flag
pub fn slugify_preserve_flag_test() {
- let s1 = extra.slugify_opts("Crème Brûlée", 0, "-", False)
+ let s1 = str.slugify_opts("Crème Brûlée", 0, "-", False)
assert s1 == "creme-brulee"
- let s2 = extra.slugify_opts("mañana niño", 0, "-", True)
+ let s2 = str.slugify_opts("mañana niño", 0, "-", True)
assert s2 == "mañana-niño"
- let s3 = extra.slugify_opts("hello world!!", 0, "-", False)
+ let s3 = str.slugify_opts("hello world!!", 0, "-", False)
assert s3 == "hello-world"
- let s4 = extra.slugify_opts("one two three four", 2, "-", False)
+ let s4 = str.slugify_opts("one two three four", 2, "-", False)
assert s4 == "one-two"
}
// Naming helpers
pub fn naming_helpers_test() {
- assert extra.to_snake_case("Hello World") == "hello_world"
- assert extra.to_kebab_case("Hello World") == "hello-world"
- assert extra.to_camel_case("hello world") == "helloWorld"
+ assert str.to_snake_case("Hello World") == "hello_world"
+ assert str.to_kebab_case("Hello World") == "hello-world"
+ assert str.to_camel_case("hello world") == "helloWorld"
}
// Truncation edge cases and suffix logic
pub fn truncation_suffix_test() {
- let t = core.truncate("hello", 3, "..")
+ let t = str.truncate("hello", 3, "..")
// max_len 3, suffix length 2 -> take 1 char then suffix
assert t == "h.."
// ensure preserve doesn't split grapheme sequences (ZWJ family)
let family = "👩\u{200D}👩\u{200D}👧\u{200D}👦 family"
- let p = core.truncate_preserve(family, 1, "")
+ let p = str.truncate_preserve(family, 1, "")
assert p == "👩\u{200D}👩\u{200D}👧\u{200D}👦"
}
diff --git a/test/str_extra_test.gleam b/test/str_extra_test.gleam
index f7c7ee8..1b05fbc 100644
--- a/test/str_extra_test.gleam
+++ b/test/str_extra_test.gleam
@@ -1,31 +1,31 @@
import gleam/list
import gleam/string
-import str/extra
-import str/internal_decompose
-import str/internal_translit
+import str
+import str/internal/decompose
+import str/internal/translit
pub fn ascii_fold_internal_helpers_test() {
// remove combining marks should strip acute accent
let with_comb = "e\u{0301}"
- let removed = internal_translit.remove_combining_marks(with_comb)
+ let removed = translit.remove_combining_marks(with_comb)
assert removed == "e"
// decompose Latin should expand common precomposed characters
- let dec = internal_decompose.decompose_latin("é")
+ let dec = decompose.decompose_latin("é")
// decomposer may produce e + combining acute; ensure combining exists
assert string.contains(dec, "\u{0301}")
}
pub fn ascii_fold_ligature_test() {
// ligature æ should transliterate to ae
- assert extra.ascii_fold("æ") == "ae" || extra.ascii_fold("Æ") == "AE"
+ assert str.ascii_fold("æ") == "ae" || str.ascii_fold("Æ") == "AE"
}
pub fn slugify_normalizer_behavior_test() {
// If preserve_unicode is True the normalizer must NOT be applied.
let s = "Café"
let noisy = fn(_) { "X" }
- let res = extra.slugify_opts_with_normalizer(s, -1, "-", True, noisy)
+ let res = str.slugify_opts_with_normalizer(s, -1, "-", True, noisy)
assert res == "café"
}
@@ -33,201 +33,201 @@ pub fn slugify_with_normalizer_token_limit_test() {
let s = "uno due tre quattro"
let fake = fn(x) { x }
// identity normalizer
- let slug = extra.slugify_opts_with_normalizer(s, 2, "-", False, fake)
+ let slug = str.slugify_opts_with_normalizer(s, 2, "-", False, fake)
assert slug == "uno-due"
}
pub fn camel_and_snake_tests() {
let s = " multiple separators__and--caps "
- let camel = extra.to_camel_case(s)
+ let camel = str.to_camel_case(s)
assert string.length(camel) > 0
assert string.slice(camel, 0, 1) != "-"
- let snake = extra.to_snake_case("Hello World")
+ let snake = str.to_snake_case("Hello World")
assert snake == "hello_world"
}
pub fn slugify_emoji_and_numbers_test() {
let s = "I ❤️ Gleam 2025"
- assert extra.slugify(s) == "i-gleam-2025"
+ assert str.slugify(s) == "i-gleam-2025"
}
pub fn ascii_fold_no_decompose_identity_test() {
// ascii_fold_no_decompose with identity normalizer should preserve precomposed handling
let s = "Ångström"
- let res = extra.ascii_fold_no_decompose_with_normalizer(s, fn(x) { x })
+ let res = str.ascii_fold_no_decompose_with_normalizer(s, fn(x) { x })
assert string.contains(res, "A") || string.contains(res, "Ang")
}
pub fn ascii_fold_with_normalizer_effect_test() {
// fake normalizer that turns ö into o + diaeresis combining
let fake = fn(x) { string.replace(x, "ö", "o\u{0308}") }
- let res = extra.ascii_fold_with_normalizer("schröder", fake)
+ let res = str.ascii_fold_with_normalizer("schröder", fake)
assert string.contains(res, "o")
}
pub fn to_kebab_equals_slugify_test() {
- assert extra.to_kebab_case("Hello World") == extra.slugify("Hello World")
+ assert str.to_kebab_case("Hello World") == str.slugify("Hello World")
}
pub fn ascii_fold_basic_test() {
- assert extra.ascii_fold("ÀÁÂÃÄÅ") == "AAAAAA"
- assert extra.ascii_fold("àáâãäå") == "aaaaaa"
+ assert str.ascii_fold("ÀÁÂÃÄÅ") == "AAAAAA"
+ assert str.ascii_fold("àáâãäå") == "aaaaaa"
}
pub fn ascii_fold_specials_test() {
- assert extra.ascii_fold("Çç") == "Cc"
- assert extra.ascii_fold("Ææß") == "AEaess"
+ assert str.ascii_fold("Çç") == "Cc"
+ assert str.ascii_fold("Ææß") == "AEaess"
}
pub fn slugify_basic_test() {
let s = "Hello, World!"
- assert extra.slugify(s) == "hello-world"
+ assert str.slugify(s) == "hello-world"
}
pub fn slugify_accent_test() {
let s = "Café déjà vu"
- assert extra.slugify(s) == "cafe-deja-vu"
+ assert str.slugify(s) == "cafe-deja-vu"
}
pub fn slugify_emoji_removed_test() {
let s = "I ❤️ Gleam"
- assert extra.slugify(s) == "i-gleam"
+ assert str.slugify(s) == "i-gleam"
}
pub fn slugify_multiple_separators_test() {
let s = "a--b__c"
- assert extra.slugify(s) == "a-b-c"
+ assert str.slugify(s) == "a-b-c"
}
pub fn slugify_numbers_test() {
let s = "2025 Year!"
- assert extra.slugify(s) == "2025-year"
+ assert str.slugify(s) == "2025-year"
}
pub fn to_snake_case_test() {
- assert extra.to_snake_case("Hello World") == "hello_world"
+ assert str.to_snake_case("Hello World") == "hello_world"
}
pub fn to_camel_case_test() {
let s = "Hello Fancy World"
- assert extra.to_camel_case(s) == "helloFancyWorld"
+ assert str.to_camel_case(s) == "helloFancyWorld"
}
pub fn camel_case_edge_test() {
let s = " multiple separators__and--caps "
- let c = extra.to_camel_case(s)
+ let c = str.to_camel_case(s)
assert string.length(c) > 0
assert string.slice(c, 0, 1) != "-"
}
pub fn slugify_trim_test() {
let s = "---Hello---"
- assert extra.slugify(s) == "hello"
+ assert str.slugify(s) == "hello"
}
pub fn slugify_preserves_digits_test() {
let s = "X1 Y2 Z3"
- assert extra.slugify(s) == "x1-y2-z3"
+ assert str.slugify(s) == "x1-y2-z3"
}
pub fn slugify_opts_max_len_test() {
let s = "a b c d e"
- assert extra.slugify_opts(s, 3, "-", False) == "a-b-c"
+ assert str.slugify_opts(s, 3, "-", False) == "a-b-c"
}
pub fn slugify_opts_sep_test() {
let s = "Hello World"
- assert extra.slugify_opts(s, -1, "_", False) == "hello_world"
+ assert str.slugify_opts(s, -1, "_", False) == "hello_world"
}
pub fn slugify_opts_preserve_unicode_true_test() {
let s = "Café ❤️ Gleam"
- assert extra.slugify_opts(s, -1, "-", True) == "café-❤️-gleam"
+ assert str.slugify_opts(s, -1, "-", True) == "café-❤️-gleam"
}
pub fn slugify_opts_preserve_unicode_false_test() {
let s = "Café ❤️ Gleam"
- assert extra.slugify_opts(s, -1, "-", False) == "cafe-gleam"
+ assert str.slugify_opts(s, -1, "-", False) == "cafe-gleam"
}
pub fn ascii_fold_german_test() {
- assert extra.ascii_fold("Müller") == "Muller"
- assert extra.ascii_fold("Größe") == "Grosse"
+ assert str.ascii_fold("Müller") == "Muller"
+ assert str.ascii_fold("Größe") == "Grosse"
}
pub fn ascii_fold_french_test() {
- assert extra.ascii_fold("français") == "francais"
- assert extra.ascii_fold("œuvre") == "oeuvre"
+ assert str.ascii_fold("français") == "francais"
+ assert str.ascii_fold("œuvre") == "oeuvre"
}
pub fn ascii_fold_spanish_test() {
- assert extra.ascii_fold("niño") == "nino"
- assert extra.ascii_fold("Aragón") == "Aragon"
+ assert str.ascii_fold("niño") == "nino"
+ assert str.ascii_fold("Aragón") == "Aragon"
}
pub fn ascii_fold_scandinavian_test() {
- assert extra.ascii_fold("Åse") == "Ase"
- assert extra.ascii_fold("Øystein") == "Oystein"
+ assert str.ascii_fold("Åse") == "Ase"
+ assert str.ascii_fold("Øystein") == "Oystein"
}
pub fn slugify_long_text_test() {
let s = "This is a very long title that should be truncated"
- let slug = extra.slugify_opts(s, 5, "-", False)
+ let slug = str.slugify_opts(s, 5, "-", False)
let parts = string.split(slug, "-")
assert list.length(parts) == 5
}
pub fn to_kebab_case_multiword_test() {
- assert extra.to_kebab_case("get User By Id") == "get-user-by-id"
+ assert str.to_kebab_case("get User By Id") == "get-user-by-id"
}
pub fn to_snake_case_caps_test() {
- assert extra.to_snake_case("getUserById") == "getuserbyid"
+ assert str.to_snake_case("getUserById") == "getuserbyid"
}
pub fn ascii_fold_no_decompose_precomposed_test() {
- let result = extra.ascii_fold_no_decompose("café")
+ let result = str.ascii_fold_no_decompose("café")
assert result == "cafe"
}
pub fn slugify_whitespace_normalization_test() {
let s = "Hello\t\n\rWorld"
- assert extra.slugify(s) == "hello-world"
+ assert str.slugify(s) == "hello-world"
}
pub fn slugify_punctuation_removal_test() {
let s = "Hello!@#$%World"
- assert extra.slugify(s) == "hello-world"
+ assert str.slugify(s) == "hello-world"
}
pub fn to_pascal_case_basic_test() {
- assert extra.to_pascal_case("hello world") == "HelloWorld"
+ assert str.to_pascal_case("hello world") == "HelloWorld"
}
pub fn to_pascal_case_multi_word_test() {
- assert extra.to_pascal_case("get user by id") == "GetUserById"
+ assert str.to_pascal_case("get user by id") == "GetUserById"
}
pub fn to_pascal_case_with_accents_test() {
- assert extra.to_pascal_case("café brûlée") == "CafeBrulee"
+ assert str.to_pascal_case("café brûlée") == "CafeBrulee"
}
pub fn to_pascal_case_with_separators_test() {
- assert extra.to_pascal_case("hello-world_test") == "HelloWorldTest"
+ assert str.to_pascal_case("hello-world_test") == "HelloWorldTest"
}
pub fn to_title_case_basic_test() {
- assert extra.to_title_case("hello world") == "Hello World"
+ assert str.to_title_case("hello world") == "Hello World"
}
pub fn to_title_case_multi_word_test() {
- assert extra.to_title_case("get user by id") == "Get User By Id"
+ assert str.to_title_case("get user by id") == "Get User By Id"
}
pub fn to_title_case_with_accents_test() {
- assert extra.to_title_case("café brûlée") == "Cafe Brulee"
+ assert str.to_title_case("café brûlée") == "Cafe Brulee"
}
pub fn to_title_case_with_separators_test() {
- assert extra.to_title_case("hello-world_test") == "Hello World Test"
+ assert str.to_title_case("hello-world_test") == "Hello World Test"
}
diff --git a/test/str_integration_test.gleam b/test/str_integration_test.gleam
index 6e84162..25987a0 100644
--- a/test/str_integration_test.gleam
+++ b/test/str_integration_test.gleam
@@ -1,14 +1,13 @@
import gleam/list
import gleam/string
-import str/core
-import str/extra
+import str
pub fn words_to_slug_chain_test() {
let s = " Café — 2025 "
- let folded = extra.ascii_fold(s)
- let words = core.words(folded)
+ let folded = str.ascii_fold(s)
+ let words = str.words(folded)
let slug =
- extra.slugify_opts_with_normalizer(folded, -1, "-", False, fn(x) { x })
+ str.slugify_opts_with_normalizer(folded, -1, "-", False, fn(x) { x })
assert list.length(words) >= 2
&& string.contains(slug, "cafe")
&& string.contains(slug, "2025")
@@ -16,78 +15,78 @@ pub fn words_to_slug_chain_test() {
pub fn truncate_then_slugify_chain_test() {
let s = "Hello, World! This is a longer sentence."
- let t = core.truncate_preserve(s, 12, "…")
- let slug = extra.slugify(t)
+ let t = str.truncate_preserve(s, 12, "…")
+ let slug = str.slugify(t)
assert string.length(slug) > 0
}
pub fn ascii_fold_then_camel_then_slug_chain_test() {
let s = " schröder & co "
- let folded = extra.ascii_fold(s)
- let camel = extra.to_camel_case(folded)
- let slug = extra.slugify(camel)
+ let folded = str.ascii_fold(s)
+ let camel = str.to_camel_case(folded)
+ let slug = str.slugify(camel)
assert string.contains(slug, "schroder")
|| string.contains(slug, "schroder-co")
}
pub fn pad_truncate_chain_test() {
let s = "hi"
- let padded = core.pad_left(s, 10, "*")
- let truncated = core.truncate_default(padded, 5)
+ let padded = str.pad_left(s, 10, "*")
+ let truncated = str.truncate_default(padded, 5)
assert string.length(truncated) <= 8
}
pub fn reverse_pad_reverse_chain_test() {
let s = "test"
- let reversed = core.reverse(s)
- let padded = core.pad_right(reversed, 8, " ")
- let final = core.reverse(padded)
+ let reversed = str.reverse(s)
+ let padded = str.pad_right(reversed, 8, " ")
+ let final = str.reverse(padded)
assert string.contains(final, "test")
}
pub fn words_count_chain_test() {
let s = "hello world hello"
- let words = core.words(s)
+ let words = str.words(s)
let joined = list.fold(words, "", fn(acc, w) { acc <> w })
- let count = core.count(joined, "hello", False)
+ let count = str.count(joined, "hello", False)
assert count == 2
}
pub fn slugify_truncate_chain_test() {
let s = "Very Long Title With Many Words Here"
- let slug = extra.slugify_opts(s, 4, "-", False)
- let truncated = core.truncate_default(slug, 15)
+ let slug = str.slugify_opts(s, 4, "-", False)
+ let truncated = str.truncate_default(slug, 15)
assert string.length(truncated) > 0
}
pub fn ascii_fold_surround_unwrap_chain_test() {
let s = "Café"
- let folded = extra.ascii_fold(s)
- let surrounded = core.surround(folded, "[", "]")
- let unwrapped = core.unwrap(surrounded, "[", "]")
+ let folded = str.ascii_fold(s)
+ let surrounded = str.surround(folded, "[", "]")
+ let unwrapped = str.unwrap(surrounded, "[", "]")
assert unwrapped == "Cafe"
}
pub fn naming_conventions_chain_test() {
let s = "get user by id"
- let camel = extra.to_camel_case(s)
- let snake = extra.to_snake_case(camel)
- let kebab = extra.to_kebab_case(snake)
+ let camel = str.to_camel_case(s)
+ let snake = str.to_snake_case(camel)
+ let kebab = str.to_kebab_case(snake)
assert string.length(kebab) > 0
}
pub fn center_reverse_center_chain_test() {
let s = "test"
- let centered = core.center(s, 10, " ")
- let reversed = core.reverse(centered)
- let recentered = core.center(reversed, 12, "-")
+ let centered = str.center(s, 10, " ")
+ let reversed = str.reverse(centered)
+ let recentered = str.center(reversed, 12, "-")
assert string.length(recentered) >= 12
}
pub fn words_slugify_compare_chain_test() {
let s = "Hello Beautiful World"
- let words = core.words(s)
- let slug = extra.slugify(s)
+ let words = str.words(s)
+ let slug = str.slugify(s)
assert list.length(words) == 3
assert string.contains(slug, "beautiful")
}
@@ -95,9 +94,9 @@ pub fn words_slugify_compare_chain_test() {
pub fn multiple_ascii_fold_chain_test() {
let s1 = "Crème"
let s2 = "Brûlée"
- let f1 = extra.ascii_fold(s1)
- let f2 = extra.ascii_fold(s2)
+ let f1 = str.ascii_fold(s1)
+ let f2 = str.ascii_fold(s2)
let combined = f1 <> " " <> f2
- let slug = extra.slugify(combined)
+ let slug = str.slugify(combined)
assert slug == "creme-brulee"
}
diff --git a/test/str_kmp_cache_test.gleam b/test/str_kmp_cache_test.gleam
index ac9375a..e69e441 100644
--- a/test/str_kmp_cache_test.gleam
+++ b/test/str_kmp_cache_test.gleam
@@ -1,30 +1,30 @@
import gleam/list
-import str/core
+import str
pub fn kmp_maps_reuse_index_test() {
let pat = repeat("ab", 50)
- let maps = core.build_kmp_maps(pat)
+ let maps = str.build_kmp_maps(pat)
let pmap = maps.0
let pimap = maps.1
let text1 = repeat("ab", 200)
let text2 = "xxxx" <> repeat("ab", 100) <> "yyyy"
- assert core.kmp_index_of_with_maps(text1, pat, pmap, pimap)
- == core.kmp_index_of(text1, pat)
- assert core.kmp_index_of_with_maps(text2, pat, pmap, pimap)
- == core.kmp_index_of(text2, pat)
+ assert str.kmp_index_of_with_maps(text1, pat, pmap, pimap)
+ == str.kmp_index_of(text1, pat)
+ assert str.kmp_index_of_with_maps(text2, pat, pmap, pimap)
+ == str.kmp_index_of(text2, pat)
}
pub fn kmp_maps_reuse_search_all_test() {
let pat = repeat("aba", 30)
- let maps = core.build_kmp_maps(pat)
+ let maps = str.build_kmp_maps(pat)
let pmap = maps.0
let pimap = maps.1
let text = repeat("aba", 200)
- assert core.kmp_search_all_with_maps(text, pmap, pimap)
- == core.kmp_search_all(text, pat)
+ assert str.kmp_search_all_with_maps(text, pmap, pimap)
+ == str.kmp_search_all(text, pat)
}
fn repeat(s: String, n: Int) -> String {
diff --git a/test/str_kmp_test.gleam b/test/str_kmp_test.gleam
index 0ea79a4..b9d0434 100644
--- a/test/str_kmp_test.gleam
+++ b/test/str_kmp_test.gleam
@@ -1,31 +1,31 @@
-import str/core
+import str
pub fn build_prefix_table_basic_test() {
let p = "ababaca"
- assert core.build_prefix_table(p) == [0, 0, 1, 2, 3, 0, 1]
+ assert str.build_prefix_table(p) == [0, 0, 1, 2, 3, 0, 1]
}
pub fn kmp_simple_test() {
let text = "ababa"
let pat = "aba"
- assert core.kmp_search_all(text, pat) == [0, 2]
+ assert str.kmp_search_all(text, pat) == [0, 2]
}
pub fn kmp_overlapping_test() {
let text = "aaaa"
let pat = "aa"
- assert core.kmp_search_all(text, pat) == [0, 1, 2]
+ assert str.kmp_search_all(text, pat) == [0, 1, 2]
}
pub fn kmp_emoji_test() {
// emoji sequence as single grapheme cluster
let e = "👨👩👧👦"
let text = e <> "x" <> e
- assert core.kmp_search_all(text, e) == [0, 2]
+ assert str.kmp_search_all(text, e) == [0, 2]
}
pub fn kmp_empty_pattern_test() {
let text = "hello"
let pat = ""
- assert core.kmp_search_all(text, pat) == []
+ assert str.kmp_search_all(text, pat) == []
}
diff --git a/test/str_multilingual_test.gleam b/test/str_multilingual_test.gleam
index 53cd3c4..75f86b5 100644
--- a/test/str_multilingual_test.gleam
+++ b/test/str_multilingual_test.gleam
@@ -1,54 +1,54 @@
import gleam/list
import gleam/string
-import str/extra
-import str/internal_decompose
+import str
+import str/internal/decompose
// French examples
pub fn french_fold_and_slug_test() {
let s = "Crème Brûlée — déjà vu"
- assert extra.ascii_fold(s) == "Creme Brulee — deja vu"
- let slug = extra.slugify_opts(s, 0, "-", False)
+ assert str.ascii_fold(s) == "Creme Brulee — deja vu"
+ let slug = str.slugify_opts(s, 0, "-", False)
assert slug == "creme-brulee-deja-vu"
}
// Polish examples
pub fn polish_fold_and_decomposed_test() {
let s = "Gdańsk Łódź Żółć"
- assert extra.ascii_fold(s) == "Gdansk Lodz Zolc"
+ assert str.ascii_fold(s) == "Gdansk Lodz Zolc"
- let dec = internal_decompose.decompose_latin("Łódź")
- assert extra.ascii_fold(dec) == "Lodz"
+ let dec = decompose.decompose_latin("Łódź")
+ assert str.ascii_fold(dec) == "Lodz"
}
// Scandinavian (Norwegian / Swedish / Danish)
pub fn scandi_test() {
let s = "Smörgåsbord Ærø Ångström Øresund"
- assert extra.ascii_fold(s) == "Smorgasbord AEro Angstrom Oresund"
- let slug = extra.slugify_opts(s, 0, "-", False)
+ assert str.ascii_fold(s) == "Smorgasbord AEro Angstrom Oresund"
+ let slug = str.slugify_opts(s, 0, "-", False)
assert slug == "smorgasbord-aero-angstrom-oresund"
}
// Romanian and Turkish
pub fn rom_turk_test() {
let r = "Țări Șosea"
- assert extra.ascii_fold(r) == "Tari Sosea"
+ assert str.ascii_fold(r) == "Tari Sosea"
let t = "Şişli İzmir"
- assert extra.ascii_fold(t) == "Sisli Izmir"
+ assert str.ascii_fold(t) == "Sisli Izmir"
}
// Icelandic
pub fn icelandic_test() {
let s = "Þingvellirmaður ð"
// Current transliteration maps "Þ" -> "TH"
- assert extra.ascii_fold(s) == "THingvellirmadur d"
+ assert str.ascii_fold(s) == "THingvellirmadur d"
}
// Complex combined case: mixing emoji and diacritics and decomposed input
pub fn complex_mixed_test() {
let mixed = "👩\u{200D}👩\u{200D}👧 café — Ångström"
// preserve unicode for emoji, fold accents
- let folded = extra.ascii_fold(mixed)
+ let folded = str.ascii_fold(mixed)
let gs = string.to_graphemes(folded)
let firsts = list.take(gs, 1)
let ok = case firsts {
@@ -59,6 +59,6 @@ pub fn complex_mixed_test() {
assert string.contains(folded, "Angstrom")
// decomposed sequence for Å
- let dec = internal_decompose.decompose_latin("Ångström")
- assert extra.ascii_fold(dec) == "Angstrom"
+ let dec = decompose.decompose_latin("Ångström")
+ assert str.ascii_fold(dec) == "Angstrom"
}
diff --git a/test/str_normalizer_integration_test.gleam b/test/str_normalizer_integration_test.gleam
index 5225ce6..42d83a2 100644
--- a/test/str_normalizer_integration_test.gleam
+++ b/test/str_normalizer_integration_test.gleam
@@ -1,12 +1,12 @@
import gleam/string
-import str/extra
+import str
pub fn ascii_fold_with_normalizer_identity_test() {
// Passing the identity function should produce the same result as the
// existing `ascii_fold` path.
let s = "Crème"
- let res1 = extra.ascii_fold_with_normalizer(s, fn(x) { x })
- let res2 = extra.ascii_fold(s)
+ let res1 = str.ascii_fold_with_normalizer(s, fn(x) { x })
+ let res2 = str.ascii_fold(s)
assert res1 == res2
}
@@ -17,6 +17,6 @@ pub fn slugify_opts_with_normalizer_fake_nfd_test() {
let fake_nfd = fn(x) { string.replace(x, "é", "e\u{0301}") }
let s = "Café ❤️"
- let slug = extra.slugify_opts_with_normalizer(s, 0, "-", False, fake_nfd)
+ let slug = str.slugify_opts_with_normalizer(s, 0, "-", False, fake_nfd)
assert slug == "cafe"
}
diff --git a/test/str_sliding_test.gleam b/test/str_sliding_test.gleam
index 29d64d1..86da545 100644
--- a/test/str_sliding_test.gleam
+++ b/test/str_sliding_test.gleam
@@ -1,25 +1,25 @@
-import str/core
+import str
pub fn sliding_simple_test() {
let text = "ababa"
let pat = "aba"
- assert core.sliding_search_all(text, pat) == [0, 2]
+ assert str.sliding_search_all(text, pat) == [0, 2]
}
pub fn sliding_overlapping_test() {
let text = "aaaa"
let pat = "aa"
- assert core.sliding_search_all(text, pat) == [0, 1, 2]
+ assert str.sliding_search_all(text, pat) == [0, 1, 2]
}
pub fn sliding_emoji_test() {
let e = "👨👩👧👦"
let text = e <> "x" <> e
- assert core.sliding_search_all(text, e) == [0, 2]
+ assert str.sliding_search_all(text, e) == [0, 2]
}
pub fn sliding_empty_pattern_test() {
let text = "hello"
let pat = ""
- assert core.sliding_search_all(text, pat) == []
+ assert str.sliding_search_all(text, pat) == []
}
diff --git a/test/str_strategy_explicit_test.gleam b/test/str_strategy_explicit_test.gleam
index 045a5f4..f9cc499 100644
--- a/test/str_strategy_explicit_test.gleam
+++ b/test/str_strategy_explicit_test.gleam
@@ -1,31 +1,32 @@
import gleam/list
-import str/core
+import str
+import str/internal/core
pub fn index_of_strategy_sliding_test() {
let text = "hello world"
let pat = "world"
- assert core.index_of_strategy(text, pat, core.Sliding)
- == core.sliding_index_of(text, pat)
+ assert str.index_of_strategy(text, pat, str.Sliding)
+ == str.sliding_index_of(text, pat)
}
pub fn index_of_strategy_kmp_test() {
let text = repeat("ab", 100)
let pat = repeat("ab", 50)
- assert core.index_of_strategy(text, pat, core.Kmp)
- == core.kmp_index_of(text, pat)
+ assert str.index_of_strategy(text, pat, str.Kmp)
+ == str.kmp_index_of(text, pat)
}
pub fn count_strategy_sliding_test() {
let text = "aaaa"
let pat = "aa"
- assert core.count_strategy(text, pat, True, core.Sliding)
+ assert str.count_strategy(text, pat, True, str.Sliding)
== list.length(core.sliding_search_all(text, pat))
}
pub fn count_strategy_kmp_test() {
let text = repeat("ab", 100)
let pat = repeat("ab", 5)
- assert core.count_strategy(text, pat, True, core.Kmp)
+ assert str.count_strategy(text, pat, True, str.Kmp)
== list.length(core.kmp_search_all(text, pat))
}
diff --git a/test/str_strategy_test.gleam b/test/str_strategy_test.gleam
index daad075..6c01b16 100644
--- a/test/str_strategy_test.gleam
+++ b/test/str_strategy_test.gleam
@@ -1,33 +1,33 @@
import gleam/list
-import str/core
+import str
pub fn choose_strategy_small_random_test() {
let text = "abcdefghij"
let pat = "cd"
- assert core.choose_search_strategy(text, pat) == core.Sliding
+ assert str.choose_search_strategy(text, pat) == str.Sliding
}
pub fn choose_strategy_large_pat_test() {
let text = repeat("a", 100)
let pat = repeat("a", 100)
- assert core.choose_search_strategy(text, pat) == core.Kmp
+ assert str.choose_search_strategy(text, pat) == str.Kmp
}
pub fn choose_strategy_long_text_small_pat_test() {
let text = repeat("a", 200_000)
let pat = "abcdabcd"
- assert core.choose_search_strategy(text, pat) == core.Kmp
+ assert str.choose_search_strategy(text, pat) == str.Kmp
}
pub fn choose_strategy_repetitive_border_test() {
// pattern with large border: 'abababab...'
let pat = repeat("ab", 50)
let text = repeat("ab", 1000)
- assert core.choose_search_strategy(text, pat) == core.Kmp
+ assert str.choose_search_strategy(text, pat) == str.Kmp
}
pub fn choose_strategy_empty_pattern_test() {
- assert core.choose_search_strategy("hello", "") == core.Sliding
+ assert str.choose_search_strategy("hello", "") == str.Sliding
}
fn repeat(s: String, n: Int) -> String {
diff --git a/test/str_tokenize_compare_test.gleam b/test/str_tokenize_compare_test.gleam
index 396dff5..2204560 100644
--- a/test/str_tokenize_compare_test.gleam
+++ b/test/str_tokenize_compare_test.gleam
@@ -1,5 +1,5 @@
import gleam/list
-import str/tokenize
+import str
pub fn chars_vs_stdlib_length_test() {
// Examples where we expect close agreement
@@ -14,14 +14,14 @@ pub fn chars_vs_stdlib_length_test() {
]
let check_eq = fn(s) {
- let a = tokenize.chars(s)
- let b = tokenize.chars_stdlib(s)
+ let a = str.chars(s)
+ let b = str.chars_stdlib(s)
assert list.length(a) == list.length(b)
}
let check_diff = fn(s) {
- let a = tokenize.chars(s)
- let b = tokenize.chars_stdlib(s)
+ let a = str.chars(s)
+ let b = str.chars_stdlib(s)
// For complex ZWJ/emoji sequences we allow differences, but both must
// produce at least one grapheme cluster.
assert list.length(a) >= 1
diff --git a/test/str_unicode_test.gleam b/test/str_unicode_test.gleam
index 914d0de..28dc771 100644
--- a/test/str_unicode_test.gleam
+++ b/test/str_unicode_test.gleam
@@ -1,9 +1,8 @@
-import str/core
-import str/extra
+import str
// NFC input should fold to ASCII base
pub fn ascii_fold_nfc_test() {
- let r = extra.ascii_fold("á")
+ let r = str.ascii_fold("á")
assert r == "a"
}
@@ -11,20 +10,20 @@ pub fn ascii_fold_nfc_test() {
pub fn ascii_fold_nfd_test() {
// 'a' + combining acute accent
let decomposed = "a\u{0301}"
- let r = extra.ascii_fold(decomposed)
+ let r = str.ascii_fold(decomposed)
assert r == "a"
}
// slugify should handle accented input the same way
pub fn slugify_accents_test() {
- let s = extra.slugify_opts("Crème Brûlée", 0, "-", False)
+ let s = str.slugify_opts("Crème Brûlée", 0, "-", False)
assert s == "creme-brulee"
}
// Truncate preserve should not split a ZWJ family emoji when asking for 1 cluster
pub fn zwj_preserve_test() {
let s = "👩\u{200D}👩\u{200D}👧\u{200D}👦 family"
- let t = core.truncate_preserve(s, 1, "")
+ let t = str.truncate_preserve(s, 1, "")
// The preserved cluster should be the family emoji
assert t == "👩\u{200D}👩\u{200D}👧\u{200D}👦"
}