diff --git a/src/analyses/creationLockset.ml b/src/analyses/creationLockset.ml index 118dc2d590..8998b5bc39 100644 --- a/src/analyses/creationLockset.ml +++ b/src/analyses/creationLockset.ml @@ -20,7 +20,7 @@ module Spec = struct include StdV end - module G = MapDomain.MapBot (TID) (Lockset) + module G = Queries.CL let name () = "creationLockset" let startstate _ = D.bot () @@ -33,46 +33,19 @@ module Spec = struct *) let contribute_locks man to_contribute child_tid = man.sideg child_tid to_contribute - (** reflexive-transitive closure of child relation applied to [tid] - and filtered to only include threads, where [tid] is a must-ancestor - @param man any man - @param tid - *) - let must_ancestor_descendants_closure man tid = - let descendants = man.ask @@ Queries.DescendantThreads tid in - let must_ancestors_descendants = TIDs.filter (TID.must_be_ancestor tid) descendants in - TIDs.add tid must_ancestors_descendants - let threadspawn man ~multiple lval f args fman = let tid_lifted = man.ask Queries.CurrentThreadId in let child_tid_lifted = fman.ask Queries.CurrentThreadId in match tid_lifted, child_tid_lifted with | `Lifted tid, `Lifted child_tid when TID.must_be_ancestor tid child_tid -> - let must_ancestor_descendants = must_ancestor_descendants_closure fman child_tid in + let must_ancestor_descendants = + ThreadDescendants.must_ancestor_descendants_closure (ask_of_man fman) child_tid + in let lockset = man.ask Queries.MustLockset in let to_contribute = G.singleton tid lockset in TIDs.iter (contribute_locks man to_contribute) must_ancestor_descendants | _ -> () - (** compute all descendant threads that may run along with the ego thread at a program point. - for all of them, tid must be an ancestor - @param man man of ego thread at the program point - @param tid ego thread id - *) - let get_must_ancestor_running_descendants man tid = - let may_created_tids = man.ask Queries.CreatedThreads in - let may_must_ancestor_created_tids = - TIDs.filter (TID.must_be_ancestor tid) may_created_tids - in - let may_transitively_created_tids = - TIDs.fold - (fun child_tid acc -> TIDs.union acc (must_ancestor_descendants_closure man child_tid)) - may_must_ancestor_created_tids - (TIDs.empty ()) - in - let must_joined_tids = man.ask Queries.MustJoinedThreads in - TIDs.diff may_transitively_created_tids must_joined_tids - (** handle unlock of mutex [lock] *) let unlock man tid possibly_running_tids lock = let shrink_locksets des_tid = @@ -98,7 +71,9 @@ module Spec = struct let tid_lifted = man.ask Queries.CurrentThreadId in (match tid_lifted with | `Lifted tid -> - let possibly_running_tids = get_must_ancestor_running_descendants man tid in + let possibly_running_tids = + ThreadDescendants.must_ancestor_running_descendants (ask_of_man man) tid + in let lock_opt = LockDomain.MustLock.of_addr addr in (match lock_opt with | Some lock -> unlock man tid possibly_running_tids lock @@ -148,6 +123,11 @@ module Spec = struct let creation_lockset = man.global tid in tid, lockset, creation_lockset | _ -> ThreadIdDomain.UnknownThread, Lockset.empty (), G.empty () + + let query man (type a) (x : a Queries.t) : a Queries.result = + match x with + | Queries.CreationLockset t -> (man.global t : G.t) + | _ -> Queries.Result.top x end let _ = diff --git a/src/analyses/descendantLockset.ml b/src/analyses/descendantLockset.ml new file mode 100644 index 0000000000..632be40165 --- /dev/null +++ b/src/analyses/descendantLockset.ml @@ -0,0 +1,180 @@ +(** descendant lockset analysis [descendantLockset] + analyzes a happened-before relationship related to thread creations with mutexes held. + + Enabling [creationLockset] may improve the precision of this analysis. + + @see https://github.com/goblint/analyzer/pull/1923 +*) + +open Analyses +module TID = ThreadIdDomain.Thread +module TIDs = ConcDomain.ThreadSet +module Lockset = LockDomain.MustLockset +module TidToLocksetMapTop = MapDomain.MapTop (TID) (Lockset) + +module Spec = struct + include IdentityUnitContextsSpec + + (** [{ t_d |-> L }] + + [t_d] was transitively created with all members of [L] held. + Additionally, no member of [L] could have been unlocked after the creation of [t_d] + *) + module D = MapDomain.MapBot (TID) (Lockset) + + (** [{ t_0 |-> { t_d |-> L } }] + + [{ t_d |-> L }] is the descendant lockset valid for the [V] value, + because [t_d] was created in [t_0] with the lockset being a superset of L. + + We suspect [MapBot] to suffice for the inner map. To ensure soundness, we use [MapTop] instead + *) + module G = MapDomain.MapBot (TID) (TidToLocksetMapTop) + + module V = struct + include TID + include StdV + end + + let name () = "descendantLockset" + let startstate _ = D.empty () + let exitstate _ = D.empty () + + let threadspawn_contribute_globals man tid must_ancestor_descendants = + let descendant_lockset = man.local in + let contribute_for_descendant t_d = + let creation_lockset = man.ask @@ Queries.CreationLockset t_d in + let to_contribute = + D.fold + (fun t_l l_dl acc -> + let l_cl = Queries.CL.find tid creation_lockset in + let l_inter = Lockset.inter l_cl l_dl in + TidToLocksetMapTop.add t_l l_inter acc) + descendant_lockset + (TidToLocksetMapTop.empty ()) + in + man.sideg t_d (G.singleton tid to_contribute) + in + TIDs.iter contribute_for_descendant must_ancestor_descendants + + let threadspawn_compute_local_contribution man tid must_ancestor_descendants = + let lockset = man.ask Queries.MustLockset in + TIDs.fold + (fun t_d -> D.join (D.singleton t_d lockset)) + must_ancestor_descendants + man.local + + let threadspawn man ~multiple lval f args fman = + let tid_lifted = man.ask Queries.CurrentThreadId in + let child_tid_lifted = fman.ask Queries.CurrentThreadId in + match tid_lifted, child_tid_lifted with + | `Lifted tid, `Lifted child_tid when TID.must_be_ancestor tid child_tid -> + let must_ancestor_descendants = + ThreadDescendants.must_ancestor_descendants_closure (ask_of_man fman) child_tid + in + threadspawn_contribute_globals man tid must_ancestor_descendants; + threadspawn_compute_local_contribution man tid must_ancestor_descendants + | _ -> man.local + + let unlock man possibly_running_tids lock = + TIDs.fold + (fun des_tid -> + let old_value = D.find des_tid man.local in + let new_value = Lockset.remove lock old_value in + D.add des_tid new_value) + possibly_running_tids + (D.empty ()) + + let unknown_unlock man possibly_running_tids = + TIDs.fold + (fun des_tid -> D.add des_tid (Lockset.empty ())) + possibly_running_tids + (D.empty ()) + + let event man e _ = + match e with + | Events.Unlock addr -> + let tid_lifted = man.ask Queries.CurrentThreadId in + (match tid_lifted with + | `Lifted tid -> + let possibly_running_tids = + ThreadDescendants.must_ancestor_running_descendants (ask_of_man man) tid + in + let lock_opt = LockDomain.MustLock.of_addr addr in + (match lock_opt with + | Some lock -> unlock man possibly_running_tids lock + | None -> unknown_unlock man possibly_running_tids) + | _ -> man.local) + | _ -> man.local + + module A = struct + module DlLhProd = Printable.Prod3 (D) (G) (Queries.LH) + + (** ego tid * (local descendant lockset * global descendant lockset * lock history) *) + include Printable.Prod (TID) (DlLhProd) + + module type TidToLs = MapDomain.S with type key = TID.t and type value = Lockset.t + + (** checks if program point 1 must happen before program point 2 + @param (t1,dl1) thread id and descendant lockset of program point 1 + @param (t2,lh2) thread id and mustlock history of program point 2 + @param M module of [dl1] + *) + let happens_before (type t) (t1, dl1) (t2, lh2) (module M : TidToLs with type t = t) = + let locks_held_creating_t2 = M.find t2 dl1 in + if Lockset.is_bot locks_held_creating_t2 + then false + else ( + let relevant_lh2_threads = + Lockset.fold + (fun lock -> TIDs.union (Queries.LH.find lock lh2)) + locks_held_creating_t2 + (TIDs.empty ()) + in + TIDs.exists + (fun t_lh -> + TID.must_be_ancestor t1 t_lh + && (TID.equal t_lh t2 || TID.must_be_ancestor t_lh t2)) + relevant_lh2_threads) + + (** checks if the entire execution of a thread must happen before a program point + @param dlg1 glabal descendant lockset of the thread + @param (t2,lh2) thread id and mustlock history of the program point + *) + let happens_before_global dlg1 (t2, lh2) = + G.exists + (fun t dl_map -> happens_before (t, dl_map) (t2, lh2) (module TidToLocksetMapTop)) + dlg1 + + let may_race (t1, (dl1, dlg1, lh1)) (t2, (dl2, dlg2, lh2)) = + not + (happens_before (t1, dl1) (t2, lh2) (module D) + || happens_before (t2, dl2) (t1, lh1) (module D) + || happens_before_global dlg1 (t2, lh2) + || happens_before_global dlg2 (t1, lh1)) + + (* ego tid is already printed elsewhere *) + let pretty () (_, dl_dlg_lh) = DlLhProd.pretty () dl_dlg_lh + let show (_, dl_dlg_lh) = DlLhProd.show dl_dlg_lh + let to_yojson (_, dl_dlg_lh) = DlLhProd.to_yojson dl_dlg_lh + let printXml f (_, dl_dlg_lh) = DlLhProd.printXml f dl_dlg_lh + + let should_print (_, (dl, dlg, lh)) = + let ls_not_empty _ ls = not @@ Lockset.is_empty ls in + D.exists ls_not_empty dl + || G.exists (fun _ -> TidToLocksetMapTop.exists ls_not_empty) dlg + || Queries.LH.exists (fun l tids -> not @@ TIDs.is_empty tids) lh + end + + let access man _ = + let lh = man.ask Queries.MustlockHistory in + let tid_lifted = man.ask Queries.CurrentThreadId in + match tid_lifted with + | `Lifted tid -> tid, (man.local, man.global tid, lh) + | _ -> ThreadIdDomain.UnknownThread, (D.empty (), G.empty (), Queries.LH.empty ()) +end + +let _ = + MCP.register_analysis + ~dep:[ "threadid"; "mutex"; "threadJoins"; "threadDescendants"; "mustlockHistory" ] + (module Spec : MCPSpec) diff --git a/src/analyses/mustlockHistory.ml b/src/analyses/mustlockHistory.ml new file mode 100644 index 0000000000..ea862927e2 --- /dev/null +++ b/src/analyses/mustlockHistory.ml @@ -0,0 +1,47 @@ +(** must-lock history analysis [mustlockHistory] + collects for locks, in which threads a lock operation must have happened + before reaching the current program point. + + @see https://github.com/goblint/analyzer/pull/1923 +*) + +open Analyses +module TIDs = SetDomain.Reverse (ConcDomain.ThreadSet) +module Lock = LockDomain.MustLock + +module Spec = struct + include IdentityUnitContextsSpec + + (** [{ l |-> T }] + + [l] must have been in all members of [T]. + *) + module D = Queries.LH + + let name () = "mustlockHistory" + let startstate _ = D.empty () + let exitstate _ = D.empty () + + let lock man tid lock = + let old_threadset = D.find lock man.local in + let new_threadset = TIDs.add tid old_threadset in + D.add lock new_threadset man.local + + let event man e _ = + match e with + (* we only handle exclusive locks here *) + | Events.Lock (addr, true) -> + let tid_lifted = man.ask Queries.CurrentThreadId in + let lock_opt = Lock.of_addr addr in + (match tid_lifted, lock_opt with + | `Lifted tid, Some l -> lock man tid l + | _ -> man.local) + | _ -> man.local + + let query man (type a) (x : a Queries.t) : a Queries.result = + match x with + | Queries.MustlockHistory -> (man.local : D.t) + | _ -> Queries.Result.top x +end + +let _ = MCP.register_analysis ~dep:[ "threadid" ] (module Spec : MCPSpec) diff --git a/src/analyses/threadDescendants.ml b/src/analyses/threadDescendants.ml index 22d7911fa4..8f524d8fc7 100644 --- a/src/analyses/threadDescendants.ml +++ b/src/analyses/threadDescendants.ml @@ -4,6 +4,37 @@ open Analyses module TID = ThreadIdDomain.Thread +module TIDs = ConcDomain.ThreadSet + +(** reflexive-transitive closure of child relation applied to [tid] and + filtered to only include threads, where [tid] is a must-ancestor + @param ask any ask + @param tid +*) +let must_ancestor_descendants_closure (ask: Queries.ask) tid = + let descendants = ask.f @@ Queries.DescendantThreads tid in + let must_ancestors_descendants = TIDs.filter (TID.must_be_ancestor tid) descendants in + TIDs.add tid must_ancestors_descendants + +(** compute all descendant threads that may run along with the ego thread at a program point. + for all of them, tid must be an ancestor + @param man man of ego thread at the program point + @param tid ego thread id +*) +let must_ancestor_running_descendants (ask: Queries.ask) tid = + let may_created_tids = ask.f Queries.CreatedThreads in + let may_must_ancestor_created_tids = + TIDs.filter (TID.must_be_ancestor tid) may_created_tids + in + let may_transitively_created_tids = + TIDs.fold + (fun child_tid acc -> + TIDs.union acc (must_ancestor_descendants_closure ask child_tid)) + may_must_ancestor_created_tids + (TIDs.empty ()) + in + let must_joined_tids = ask.f Queries.MustJoinedThreads in + TIDs.diff may_transitively_created_tids must_joined_tids module Spec = struct include IdentityUnitContextsSpec diff --git a/src/domains/queries.ml b/src/domains/queries.ml index 6b85a2729c..b068e8a09b 100644 --- a/src/domains/queries.ml +++ b/src/domains/queries.ml @@ -77,6 +77,10 @@ type invariant_context = Invariant.context = { module YS = SetDomain.ToppedSet (YamlWitnessType.Entry) (struct let topname = "Top" end) +module CL = MapDomain.MapBot_LiftTop (ThreadIdDomain.Thread) (LockDomain.MustLockset) + +module LH = MapDomain.MapTop (LockDomain.MustLock) (SetDomain.Reverse (ConcDomain.ThreadSet)) + (** GADT for queries with specific result type. *) type _ t = @@ -147,6 +151,8 @@ type _ t = | GhostVarAvailable: WitnessGhostVar.t -> MayBool.t t | InvariantGlobalNodes: NS.t t (** Nodes where YAML witness flow-insensitive invariants should be emitted as location invariants (if [witness.invariant.flow_insensitive-as] is configured to do so). *) (* [Spec.V.t] argument (as [Obj.t]) could be added, if this should be different for different flow-insensitive invariants. *) | DescendantThreads: ThreadIdDomain.Thread.t -> ConcDomain.ThreadSet.t t + | CreationLockset: ThreadIdDomain.Thread.t -> CL.t t + | MustlockHistory: LH.t t type 'a result = 'a @@ -223,6 +229,8 @@ struct | GhostVarAvailable _ -> (module MayBool) | InvariantGlobalNodes -> (module NS) | DescendantThreads _ -> (module ConcDomain.ThreadSet) + | CreationLockset _ -> (module CL) + | MustlockHistory -> (module LH) (** Get bottom result for query. *) let bot (type a) (q: a t): a result = @@ -298,6 +306,8 @@ struct | GhostVarAvailable _ -> MayBool.top () | InvariantGlobalNodes -> NS.top () | DescendantThreads _ -> ConcDomain.ThreadSet.top () + | CreationLockset _ -> CL.top () + | MustlockHistory -> LH.top () end (* The type any_query can't be directly defined in Any as t, @@ -370,6 +380,8 @@ struct | Any (GhostVarAvailable _) -> 62 | Any InvariantGlobalNodes -> 63 | Any (DescendantThreads _) -> 64 + | Any (CreationLockset _) -> 65 + | Any (MustlockHistory) -> 66 let rec compare a b = let r = Stdlib.compare (order a) (order b) in @@ -430,6 +442,7 @@ struct | Any (GasExhausted f1), Any (GasExhausted f2) -> CilType.Fundec.compare f1 f2 | Any (GhostVarAvailable v1), Any (GhostVarAvailable v2) -> WitnessGhostVar.compare v1 v2 | Any (DescendantThreads t1), Any (DescendantThreads t2) -> ThreadIdDomain.Thread.compare t1 t2 + | Any (CreationLockset t1), Any (CreationLockset t2) -> ThreadIdDomain.Thread.compare t1 t2 (* only argumentless queries should remain *) | _, _ -> Stdlib.compare (order a) (order b) @@ -478,6 +491,7 @@ struct | Any (GasExhausted f) -> CilType.Fundec.hash f | Any (GhostVarAvailable v) -> WitnessGhostVar.hash v | Any (DescendantThreads t) -> ThreadIdDomain.Thread.hash t + | Any (CreationLockset t) -> ThreadIdDomain.Thread.hash t (* IterSysVars: *) (* - argument is a function and functions cannot be compared in any meaningful way. *) (* - doesn't matter because IterSysVars is always queried from outside of the analysis, so MCP's query caching is not done for it. *) @@ -547,6 +561,8 @@ struct | Any (GhostVarAvailable v) -> Pretty.dprintf "GhostVarAvailable %a" WitnessGhostVar.pretty v | Any InvariantGlobalNodes -> Pretty.dprintf "InvariantGlobalNodes" | Any (DescendantThreads t) -> Pretty.dprintf "DescendantThreads %a" ThreadIdDomain.Thread.pretty t + | Any (CreationLockset t) -> Pretty.dprintf "CreationLockset %a" ThreadIdDomain.Thread.pretty t + | Any (MustlockHistory) -> Pretty.dprintf "MustlockHistory" end let to_value_domain_ask (ask: ask) = diff --git a/src/goblint_lib.ml b/src/goblint_lib.ml index 92fa69c3b4..ead3965c1e 100644 --- a/src/goblint_lib.ml +++ b/src/goblint_lib.ml @@ -109,6 +109,7 @@ module MayLocks = MayLocks module SymbLocks = SymbLocks module Deadlock = Deadlock module MutexGhosts = MutexGhosts +module MustlockHistory = MustlockHistory (** {3 Threads} @@ -133,6 +134,7 @@ module PthreadBarriers = PthreadBarriers module ExtractPthread = ExtractPthread module PthreadOnce = PthreadOnce module CreationLockset = CreationLockset +module DescendantLockset = DescendantLockset (** {2 Longjmp} diff --git a/tests/regression/53-races-mhp/40-dl_simple_racefree.c b/tests/regression/53-races-mhp/40-dl_simple_racefree.c new file mode 100644 index 0000000000..73416bf5cb --- /dev/null +++ b/tests/regression/53-races-mhp/40-dl_simple_racefree.c @@ -0,0 +1,22 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + // everything from here must happen after unlock in main + pthread_mutex_unlock(&mutex); + global++; // NORACE + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // NORACE + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/41-dl_lock_in_intermediate_thread_racefree.c b/tests/regression/53-races-mhp/41-dl_lock_in_intermediate_thread_racefree.c new file mode 100644 index 0000000000..129fc9249c --- /dev/null +++ b/tests/regression/53-races-mhp/41-dl_lock_in_intermediate_thread_racefree.c @@ -0,0 +1,27 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1, id2; + +void *t2(void *arg) { + global++; // NORACE + return NULL; +} + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + // everything from here must happen after unlock in main + pthread_create(&id2, NULL, t2, NULL); + pthread_mutex_unlock(&mutex); + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // NORACE + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/42-dl_cl_simple_racefree.c b/tests/regression/53-races-mhp/42-dl_cl_simple_racefree.c new file mode 100644 index 0000000000..491a414c27 --- /dev/null +++ b/tests/regression/53-races-mhp/42-dl_cl_simple_racefree.c @@ -0,0 +1,31 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset --set "ana.activated[+]" creationLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1, id2; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + // everything from here must happen after unlock in main + pthread_mutex_unlock(&mutex); + global++; // NORACE + return NULL; +} + +void *t2(void *arg) { // t2 is joined into main before unlock happens + global++; // NORACE + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + + pthread_create(&id1, NULL, t1, NULL); + + pthread_create(&id2, NULL, t2, NULL); + pthread_join(id2, NULL); + + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/43-dl_maybe_create_racefree.c b/tests/regression/53-races-mhp/43-dl_maybe_create_racefree.c new file mode 100644 index 0000000000..cb020006a4 --- /dev/null +++ b/tests/regression/53-races-mhp/43-dl_maybe_create_racefree.c @@ -0,0 +1,25 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + // everything from here must happen after unlock in main + pthread_mutex_unlock(&mutex); + global++; // NORACE + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + int maybe; + if (maybe) { + pthread_create(&id1, NULL, t1, NULL); + } + global++; // NORACE + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/44-dl_cl_transitive_create_racefree.c b/tests/regression/53-races-mhp/44-dl_cl_transitive_create_racefree.c new file mode 100644 index 0000000000..30c7ac75f4 --- /dev/null +++ b/tests/regression/53-races-mhp/44-dl_cl_transitive_create_racefree.c @@ -0,0 +1,36 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset --set "ana.activated[+]" creationLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1, id2, id3; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + // everything from here must happen after unlock in main + pthread_mutex_unlock(&mutex); + global++; // NORACE + return NULL; +} + +void *t3(void *arg) { // t3 is joined into main before unlock happens + global++; // NORACE + return NULL; +} + +void *t2(void *arg) { + pthread_create(&id3, NULL, t3, NULL); + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + + pthread_create(&id1, NULL, t1, NULL); + + pthread_create(&id2, NULL, t2, NULL); + pthread_join(id3, NULL); + + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/45-dl_multiple_mutexes_racefree.c b/tests/regression/53-races-mhp/45-dl_multiple_mutexes_racefree.c new file mode 100644 index 0000000000..53c1340cb3 --- /dev/null +++ b/tests/regression/53-races-mhp/45-dl_multiple_mutexes_racefree.c @@ -0,0 +1,22 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER, mutex2 = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex1); + pthread_mutex_unlock(&mutex1); + global++; // NORACE + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex1); + pthread_mutex_lock(&mutex2); + pthread_create(&id1, NULL, t1, NULL); + pthread_mutex_unlock(&mutex2); + global++; // NORACE + return 0; +} diff --git a/tests/regression/53-races-mhp/46-dl_recursive_mutex.c b/tests/regression/53-races-mhp/46-dl_recursive_mutex.c new file mode 100644 index 0000000000..b5abb8db47 --- /dev/null +++ b/tests/regression/53-races-mhp/46-dl_recursive_mutex.c @@ -0,0 +1,30 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#define _GNU_SOURCE +#include + +int global = 0; +#ifdef __APPLE__ +pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; +#else +pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; +#endif +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + pthread_mutex_unlock(&mutex); + global++; + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // NORACE + pthread_mutex_unlock(&mutex); + global++; // RACE (the unlock statement above doesn't fully unlock the recursive mutex) + pthread_mutex_unlock(&mutex); + global++; // RACE! + return 0; +} diff --git a/tests/regression/53-races-mhp/47-dl_create_before_second_lock_of_recursive_mutex.c b/tests/regression/53-races-mhp/47-dl_create_before_second_lock_of_recursive_mutex.c new file mode 100644 index 0000000000..e02a01ac60 --- /dev/null +++ b/tests/regression/53-races-mhp/47-dl_create_before_second_lock_of_recursive_mutex.c @@ -0,0 +1,31 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#define _GNU_SOURCE +#include + +int global = 0; +#ifdef __APPLE__ +pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; +#else +pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; +#endif +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + pthread_mutex_unlock(&mutex); + global++; + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // NORACE + pthread_mutex_lock(&mutex); + global++; // NORACE + pthread_mutex_unlock(&mutex); + global++; // RACE (the unlock statement above doesn't fully unlock the recursive mutex) + pthread_mutex_unlock(&mutex); + global++; // RACE! + return 0; +} diff --git a/tests/regression/53-races-mhp/50-dl_no_lh_racing.c b/tests/regression/53-races-mhp/50-dl_no_lh_racing.c new file mode 100644 index 0000000000..3d34d41c3e --- /dev/null +++ b/tests/regression/53-races-mhp/50-dl_no_lh_racing.c @@ -0,0 +1,19 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // RACE! + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/51-dl_unlock_parent_racing.c b/tests/regression/53-races-mhp/51-dl_unlock_parent_racing.c new file mode 100644 index 0000000000..8615b5f2e6 --- /dev/null +++ b/tests/regression/53-races-mhp/51-dl_unlock_parent_racing.c @@ -0,0 +1,20 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + pthread_mutex_unlock(&mutex); + global++; // RACE! + return 0; +} diff --git a/tests/regression/53-races-mhp/52-dl_maybe_lh_racing.c b/tests/regression/53-races-mhp/52-dl_maybe_lh_racing.c new file mode 100644 index 0000000000..c4e9218dc2 --- /dev/null +++ b/tests/regression/53-races-mhp/52-dl_maybe_lh_racing.c @@ -0,0 +1,24 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + int maybe; + if (maybe) { + pthread_mutex_lock(&mutex); + pthread_mutex_unlock(&mutex); + } + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // RACE! + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/53-dl_maybe_unlock_parent_racing.c b/tests/regression/53-races-mhp/53-dl_maybe_unlock_parent_racing.c new file mode 100644 index 0000000000..1f62562e1f --- /dev/null +++ b/tests/regression/53-races-mhp/53-dl_maybe_unlock_parent_racing.c @@ -0,0 +1,23 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + int maybe; + if (maybe) { + pthread_mutex_unlock(&mutex); + } + global++; // RACE! + return 0; +} diff --git a/tests/regression/53-races-mhp/54-dl_unknown_lh_racing.c b/tests/regression/53-races-mhp/54-dl_unknown_lh_racing.c new file mode 100644 index 0000000000..dee7f56fec --- /dev/null +++ b/tests/regression/53-races-mhp/54-dl_unknown_lh_racing.c @@ -0,0 +1,22 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_t *mutex_ref; + pthread_mutex_lock(mutex_ref); // lock of unknown mutex + pthread_mutex_unlock(mutex_ref); + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + global++; // RACE! + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/55-dl_unknown_unlock_parent_racing.c b/tests/regression/53-races-mhp/55-dl_unknown_unlock_parent_racing.c new file mode 100644 index 0000000000..22e0d84a17 --- /dev/null +++ b/tests/regression/53-races-mhp/55-dl_unknown_unlock_parent_racing.c @@ -0,0 +1,21 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + pthread_create(&id1, NULL, t1, NULL); + pthread_mutex_t *mutex_ref; + pthread_mutex_unlock(mutex_ref); // unlock of unknown mutex + global++; // RACE! + return 0; +} diff --git a/tests/regression/53-races-mhp/56-dl_multiple_creates_sequential_racing.c b/tests/regression/53-races-mhp/56-dl_multiple_creates_sequential_racing.c new file mode 100644 index 0000000000..0840d5e4ac --- /dev/null +++ b/tests/regression/53-races-mhp/56-dl_multiple_creates_sequential_racing.c @@ -0,0 +1,23 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset --disable ana.thread.include-node +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1, id2, id3; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + pthread_mutex_unlock(&mutex); + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_create(&id1, NULL, t1, NULL); + pthread_create(&id2, NULL, t1, NULL); + pthread_mutex_lock(&mutex); + pthread_create(&id3, NULL, t1, NULL); // t1 can already be created before locking + global++; // RACE! + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/57-dl_multiple_creates_conditional_racing.c b/tests/regression/53-races-mhp/57-dl_multiple_creates_conditional_racing.c new file mode 100644 index 0000000000..2791f8b610 --- /dev/null +++ b/tests/regression/53-races-mhp/57-dl_multiple_creates_conditional_racing.c @@ -0,0 +1,27 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset --disable ana.thread.include-node +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1, id2, id3; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + pthread_mutex_unlock(&mutex); + global++; // RACE! + return NULL; +} + +int main(void) { + int maybe; + if (maybe) { + pthread_create(&id1, NULL, t1, NULL); // locking happens after thread creation! + pthread_mutex_lock(&mutex); + } else { + pthread_mutex_lock(&mutex); + pthread_create(&id2, NULL, t1, NULL); + } + global++; // RACE! + pthread_mutex_unlock(&mutex); + return 0; +} diff --git a/tests/regression/53-races-mhp/58-dl_cl_unlock_before_join_racing.c b/tests/regression/53-races-mhp/58-dl_cl_unlock_before_join_racing.c new file mode 100644 index 0000000000..cb6a52c43d --- /dev/null +++ b/tests/regression/53-races-mhp/58-dl_cl_unlock_before_join_racing.c @@ -0,0 +1,30 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset --set "ana.activated[+]" creationLockset +#include + +int global = 0; +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1, id2; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex); + // everything from here must happen after unlock in main + pthread_mutex_unlock(&mutex); + global++; // RACE! + return NULL; +} + +void *t2(void *arg) { // t2 is joined into main before unlock happens + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex); + + pthread_create(&id1, NULL, t1, NULL); + + pthread_create(&id2, NULL, t2, NULL); + pthread_mutex_unlock(&mutex); + pthread_join(id2, NULL); + return 0; +} diff --git a/tests/regression/53-races-mhp/59-dl_multiple_mutexes_racing.c b/tests/regression/53-races-mhp/59-dl_multiple_mutexes_racing.c new file mode 100644 index 0000000000..4a90cf244f --- /dev/null +++ b/tests/regression/53-races-mhp/59-dl_multiple_mutexes_racing.c @@ -0,0 +1,22 @@ +// PARAM: --set ana.activated[+] threadJoins --set ana.activated[+] threadDescendants --set ana.activated[+] mustlockHistory --set ana.activated[+] descendantLockset +#include + +int global = 0; +pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER, mutex2 = PTHREAD_MUTEX_INITIALIZER; +pthread_t id1; + +void *t1(void *arg) { + pthread_mutex_lock(&mutex1); + pthread_mutex_unlock(&mutex1); + global++; // RACE! + return NULL; +} + +int main(void) { + pthread_mutex_lock(&mutex1); + pthread_mutex_lock(&mutex2); + pthread_create(&id1, NULL, t1, NULL); + pthread_mutex_unlock(&mutex1); + global++; // RACE! + return 0; +}