diff --git a/src/datasets/dataset15.jl b/src/datasets/dataset15.jl index e4499e4..94c69c5 100644 --- a/src/datasets/dataset15.jl +++ b/src/datasets/dataset15.jl @@ -54,7 +54,7 @@ function parse_dataset15(io) color = Int[] node_coords = Vector{Float64}[] - while (r1 = readline(io)) != " -1" + while (r1 = readline(io))[1:6] != " -1" nid, dcs, disp_cs, col, x, y, z = @scanf(r1, "%10d%10d%10d%10d%13e%13e%13e", Int, Int, Int, Int, Float64, Float64, Float64)[2:end] push!(node_ID, nid) diff --git a/src/datasets/dataset18.jl b/src/datasets/dataset18.jl index 4c4b318..1999671 100644 --- a/src/datasets/dataset18.jl +++ b/src/datasets/dataset18.jl @@ -81,7 +81,7 @@ function parse_dataset18(io) cs_x = Vector{Float64}[] cs_xz = Vector{Float64}[] - while (line = readline(io)) != " -1" + while (line = readline(io))[1:6] != " -1" # Record 1 - FORMAT(5I10) csn, cst, ref_csn, col, md = @scanf(line, "%10d%10d%10d%10d%10d", Int, Int, Int, Int, Int)[2:end] # csn, cst, ref_csn, col, md = parse.(Int, split(line)) diff --git a/src/datasets/dataset2411.jl b/src/datasets/dataset2411.jl index ad06e7b..ab87b63 100644 --- a/src/datasets/dataset2411.jl +++ b/src/datasets/dataset2411.jl @@ -57,7 +57,7 @@ function parse_dataset2411(io) color = similar(nodes_ID) node_coords = Vector{Float64}[] - while (r1 = readline(io)) != " -1" + while (r1 = readline(io))[1:6] != " -1" # Record 1: 4I10 nid, cs, dcs, col = @scanf(r1, "%10d%10d%10d%10d", Int, Int, Int, Int)[2:end] push!(nodes_ID, nid) diff --git a/src/datasets/dataset2412.jl b/src/datasets/dataset2412.jl index 74d7946..13f6dfe 100644 --- a/src/datasets/dataset2412.jl +++ b/src/datasets/dataset2412.jl @@ -249,7 +249,7 @@ function parse_dataset2412(io) # reset(io) # block = String[] - # while (line = readline(io)) != " -1" + # while (line = readline(io))[1:6] != " -1" # push!(block, line) # end @@ -266,7 +266,7 @@ function parse_dataset2412(io) # nlines = length(block) nodes_elt = Int[] # while i ≤ nlines - while (r1 = readline(io)) != " -1" + while (r1 = readline(io))[1:6] != " -1" # Record 1 - (6I10) elt, fed, pprop, mprop, col, nnode = @scanf(r1, "%10d%10d%10d%10d%10d%10d", Int, Int, Int, Int, Int, Int)[2:end] # elt, fed, pprop, mprop, col, nnode = parse.(Int, split(block[i])) diff --git a/src/datasets/dataset2414.jl b/src/datasets/dataset2414.jl index 1167e8a..6276033 100644 --- a/src/datasets/dataset2414.jl +++ b/src/datasets/dataset2414.jl @@ -854,7 +854,7 @@ function parse_dataset2414(io) data_exp = 1 total_vals = ndv _values = similar(eltype(data_value_raw), 0) - while (r14 = readline(io)) != " -1" + while (r14 = readline(io))[1:6] != " -1" # Record 14 push!(data_info_raw, parse.(Int, split(strip(r14)))) diff --git a/src/datasets/dataset55.jl b/src/datasets/dataset55.jl index 0b2ef70..0a43e3d 100644 --- a/src/datasets/dataset55.jl +++ b/src/datasets/dataset55.jl @@ -462,7 +462,7 @@ function parse_dataset55(io) # Start parsing from Record 9 and 10 _data = similar(eltype(data), 0) - while (r9 = readline(io)) != " -1" + while (r9 = readline(io))[1:6] != " -1" # Record 9 - Format (I10) push!(node_number, parse(Int, strip(r9))) diff --git a/src/datasets/dataset58b.jl b/src/datasets/dataset58b.jl index 7a4e5c3..655222d 100644 --- a/src/datasets/dataset58b.jl +++ b/src/datasets/dataset58b.jl @@ -102,10 +102,10 @@ function parse_dataset58b(io) Int, Char, Int, Int, Int, Int, Int, Int, Int, Int)[3:end] # Need to implement proper error handling - type == 'b' || error("Expected UFF58 binary file but type is $type") - endian == 1 || println("Only implemented for Little Endian") - floating_point_format == 2 || println("Only implemented for IEEE 754") - num_ascii_lines == 11 || println("Header not correct") + type == 'b' || throw(ArgumentError("Expected UFF58 binary file but type is $type")) + endian == 1 || throw(ArgumentError("Only implemented for Little Endian")) + floating_point_format == 2 || throw(ArgumentError("Only implemented for Little Endian")) + num_ascii_lines == 11 || throw(ArgumentError("Header not correct number of lines")) id1 = strip(readline(io)) id2 = strip(readline(io)) diff --git a/src/datasets/dataset82.jl b/src/datasets/dataset82.jl index 8eb2b73..a607c5a 100644 --- a/src/datasets/dataset82.jl +++ b/src/datasets/dataset82.jl @@ -70,7 +70,7 @@ function parse_dataset82(io) # Record 3 - FORMAT(8I10) line_nodes = Int[] - while (line = readline(io)) != " -1" + while (line = readline(io))[1:6] != " -1" append!(line_nodes, parse.(Int, split(line))) end diff --git a/src/read_write_uff.jl b/src/read_write_uff.jl index 5890ee5..8b38dbb 100644 --- a/src/read_write_uff.jl +++ b/src/read_write_uff.jl @@ -11,7 +11,7 @@ Reads a UFF (Universal File Format) file and parses its contents into a vector o """ function readuff(filename::String) - file_extension = split(filename, ".")[end] + file_extension = splitext(filename)[end] if !(file_extension in supported_file_extensions()) throw(ArgumentError("File extension .$file_extension may not be supported for UFF files.")) end @@ -64,7 +64,7 @@ Writes a vector of UFFDataset objects to a UFF file. - `w58b::Bool`: Optional flag to indicate if Dataset58 format must be written in binary format (default: false). """ function writeuff(filename::String, datasets; w58b::Bool = false) - file_extension = split(filename, ".")[end] + file_extension = splitext(filename)[end] if !(file_extension in supported_file_extensions()) throw(ArgumentError("File extension .$file_extension may not be supported for UFF files.")) end