diff --git a/defaults/dbussy/.gitignore b/defaults/dbussy/.gitignore new file mode 100644 index 0000000..6d65064 --- /dev/null +++ b/defaults/dbussy/.gitignore @@ -0,0 +1,2 @@ +/__pycache__/ +/build/ diff --git a/defaults/dbussy/COPYING b/defaults/dbussy/COPYING new file mode 100644 index 0000000..ae23fcf --- /dev/null +++ b/defaults/dbussy/COPYING @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/defaults/dbussy/README b/defaults/dbussy/README new file mode 100644 index 0000000..acae1f2 --- /dev/null +++ b/defaults/dbussy/README @@ -0,0 +1,714 @@ +DBussy is yet another Python binding for accessing D-Bus +. I know there is +already dbus-python , +among others +. So why do +we need another one? + +The main issue is one of event loops. Most of the existing bindings +seem to be based around GLib. However, Python now has its own +“asyncio” event-loop architecture +. This goes back to +Python 3.4, but as of 3.5, you now have full-fledged coroutines +(“async def” and “await”) as a language feature. + +Every GUI toolkit already provides its own event loop; so why did +the Python developers decide to add yet another one? The answer +seems clear: to provide a language-standard API for event loops, +and a reference implementation for this API. It should be possible +to adapt other event loops to this same API, and then Python +code written to work with asyncio becomes event-loop agnostic. + + +What Is D-Bus? +============== + +D-Bus is a high-level interprocess communication protocol. It also +provides a standard daemon, that is included with the main Linux +desktop environments, that implements a set of standard “buses”: a +“system” bus that is created at system boot time, and a “session” bus +that belongs to each user who logs into one of these desktop +environments. + +Processes can register their services on one of these buses--the +system bus for systemwide access, or the session bus for per-user +access--where other processes can find them by name and connect to +them. Or they can accept connections on entirely separate networking +sockets, without any dependency on the D-Bus daemon. libdbus, the +reference implementation for the low-level D-Bus protocol, supports +both modes of operation. + +D-Bus is based around the concept of passing messages conforming to +a standard, extensible format. Messages are of four types: + * a “method call” + * a “method return” (normal response to a method call) + * an “error” (abnormal response to a method call) + * a “signal” notification + +A method-call message is how one process requests a service of another +process via D-Bus; the usual response would be a method-return message +in the other direction indicating the completion status of the service +being performed; it is also possible to send method-call messages +without expecting a reply. If there was something wrong with the +method-call message (e.g. inappropriate parameters, lack of +permissions), then the response would be an error message. One could +also send a method-return with information indicating a failure to +perform the requested service; presumably the choice between the types +of response is that an error return indicates a condition that is not +supposed to happen--a bug in the requesting program. + +Signal messages are sent as notifications of interesting events +pertaining to the current session (for the session bus) or the entire +system (for the system bus). They are usually not sent to a specific +destination, but can be picked up by all interested processes on the +bus. There are no replies to signals; if the receiving process cannot +or will not process a particular message, it simply ignores it. + +Messages optionally include the following information: + * a destination “bus name” indicating the process that is to + receive the message (this is not the name of the bus, but the + name of a connection to the bus) + * an “object path” which looks like a POSIX absolute file name + (always beginning with a slash and never ending with a slash, + except for the root object “/”); the meaning of this is up to + the receiving process, but it is intended to indicate some + object within the hierarchy exposed by the process + * an “interface name” which identifies the particular message + protocol + * a “method name” which identifies the particular function to be + performed within that interface. + +Bus names and interface names look like domain names with the components +reversed, so the top level is at the beginning. If you are familiar with +package names in Java, they take the same form, and with the same +intent: to reduce the chance of name conflicts. + +D-Bus also includes an extensive, but not extensible, type system for +encoding data in a message. This data represents arguments to the +method call or signal, return results for a method return or the error +name and message for an error. A message contains a sequence of 0, 1 +or more items of such data, each of which can be of various types: +“basic” types (e.g. integer, float, string) or “container” types +(structs, arrays, dictionaries) which in turn contain more values, +each of which in turn can be of a basic or (recursively) another +container type. A “signature” is a string encoding the type of a +value, or sequence of values; there is also a “variant” type, which +means the type of the value is encoded dynamically with the value +itself, separate from the signature. + +The importance of type signatures is really up to the particular +programs that are trying to communicate: some might insist on values +exactly matching the expected type signature, whereas others might be +more lenient. For example, while the D-Bus type system specifies +different basic types for different sizes of integers of signed or +unsigned varieties, most Python code will probably not care about the +specific distinctions, and treat all these values as of type “int”. + + +Standard Interfaces +------------------- + +D-Bus defines some standard interfaces which are meant to be +understood by most if not all services. + +One fundamental one is the “org.freedesktop.DBus.Introspectable” +interface; this defines an “Introspect” method, that is expected to +return an XML string that describes all the interfaces understood by +the object identified by the object path, as well as listing all the +available child objects that can be accessed by appending a slash and +the child name to the parent object path, if any. Introspection is a +very important part of D-Bus: it is what allows users to discover what +services are available on their installations, and throw together +ad-hoc scripts in Python or other high-level languages to make +convenient use of such services, without having to write a lot of +interfacing code. + +Another commonly-supported interface is called +“org.freedesktop.DBus.Properties”. This one defines the concept of +*properties*, which are pieces of data notionally attached to object +paths, and which might be readable, writable or both. This interface +defines standard methods to get a property value for an object, set a +new property value, or get all properties defined on an object. It +also specifies a signal that can be sent by a server process as a +general notification to all peers on the bus about changes to its +property values. + + +Enter DBussy +============ + +DBussy allows you to take advantage of asyncio, but it doesn’t force +you to use it. DBussy is meant to give you access to (nearly) all the +functionality of the underlying libdbus library +. libdbus is a +very low-level, very general library, designed to be called from C +code, that makes no assumptions about event loops at all. Consider the +basic task in a client program of sending a D-Bus request message and +waiting for a reply; or consider a server waiting for a message to +come in. libdbus offers 3 different ways to handle this: + + * poll repeatedly until the message appears + * block the thread until the message comes in + * specify a callback to be notified when the message is available + +DBussy offers access to all these ways. But it also gives you the +option of engaging the asyncio event loop. This means you can be doing +other things in the loop, and when a message comes in, it can be +passed automatically to a callback that you previously specified. + +It also gives clients another way of sending a message and waiting for +the reply: using a coroutine. For example + + async def request_reply(connection) : + message = dbussy.Message.new_method_call(...) + ... other setup of message args etc ... + reply = await connection.send_await_reply(message, timeout) + ... process reply ... + #end request_reply + + loop = asyncio.get_event_loop() + dbus_task = loop.create_task(request_reply(connection)) + ... maybe create other tasks to run while awaiting reply ... + loop.run_until_complete(dbus_task) + +On the server side, you can correspondingly use coroutines to handle +time-consuming requests without blocking the main loop. A message +filter or object-path message callback can return, instead of +DBUS.HANDLER_RESULT_HANDLED, a coroutine. The wrapper code will give +libdbus a result of DBUS.HANDLER_RESULT_HANDLED on your behalf, after +creating a task to execute the coroutine on the event loop. The +coroutine can then go on to handle the actual processing of the +request, and return the reply at some later stage. + +The dbussy module also offers several more Pythonic facilities beyond +those of the underlying libdbus, including a higher-level +representation of type signatures as Type objects (and subclasses +thereof), and an Introspection object hierarchy that can be easily +converted to and from the standard D-Bus introspection XML +representation. + + +No Type-Guessing! +================= + +Unlike some other Python bindings for D-Bus libraries, DBussy never +tries to guess how to convert Python types to D-Bus types. For +example, D-Bus provides many different sizes of integer, in both +signed and unsigned variants; when retrieving an integer parameter +from a message, DBussy will convert all of these to Python integers, +but when putting a Python integer into a message parameter, there must +always be a signature somewhere specifying which specific D-Bus +integer type to convert it to. + +Also, D-Bus variants are always represented in Python as a 2-tuple: +the first element is the signature of the actual type being passed, +and the second is the value of that type. + + +Multithreading Caveats +====================== + +While libdbus was supposed to be usable with multithreaded code, in +practice this has turned out to be problematic +. +The whole point of Python’s asyncio framework is to avoid +multithreading anyway. + +The dbussy module does use multithreading in a very limited way, +in just one place: the Connection.open_async() call. It does this +to give the effect of a nonblocking equivalent to a call for which +libdbus only offers a blocking version. + + +Ravel: The Higher-Level Interface +================================= + +Rather than directly manipulating D-Bus message objects, it is usually +more convenient to have a representation where D-Bus object paths are +directly mapped to Python objects, and D-Bus method and signal calls +are similarly mapped to calls on methods of those Python objects. So +on the client side, the local Python objects become “proxies” for the +actual objects implemented by the remote server. And on the server +side, the implementation of an interface can be wrapped up in an +“interface class” with methods that are automatically invoked in +response to incoming D-Bus requests. + +Interface classes can also be used on the client side: in this +situation, the method calls are just stubs used for type-checking +outgoing requests, while the signal definitions can be real functions +which are invoked in response to incoming signal messages. Conversely, +on the server side, the signal definitions are stubs used for +type-checking, while the method definitions (in the D-Bus sense) are +real functions implementing those calls. + +An interface class can also be both client-side and server-side in +one, which means all the method definitions are real, none are stubs. +So it can be used both for type-checking outgoing messages and +handling incoming ones. For example, this is true of both the common +standard interfaces (Introspectable and Properties), since most if not +all peers are expected to support them. + +(Signal definitions are a special case: even in a +client-and-server-side interface, they can be marked as stubs--as in +the standard PropertyHandler interface. This allows you to register +such an interface for introspection purposes, without having to accept +its handling of any signals.) + +Both kinds of interface representations are provided by the “ravel” +module--interface classes on the client or server side, and proxy +interfaces on the client side. Ravel also offers different ways to +construct a proxy interface: you can define it yourself, or you can +have Ravel construct it automatically for you by introspecting the +server-side object. + +Either way, you start by creating a ravel.Connection object, which is a +wrapper around a lower-level dbussy.Connection object. You can get one +for the session or system bus by calling ravel.session_bus() and +ravel.system_bus() respectively, or you can use a ravel.Server object +(wrapping around the corresponding dbussy.Server) to accept +connections on your own network address, separate from the D-Bus +daemon. + + +The Client Side: Proxy Interfaces +--------------------------------- + +Proxy interfaces can be easily constructed in different ways. One way +is to start with a proxy for a bus peer with a particular name. You +get one of these with an expression that treats the connection as +though it were a mapping: + + peer = conn[«bus_name»] + +Then you do another lookup on this mapping to get a reference to a +particular object path at that peer: + + obj = peer[«object_path»] + +Now, you can get a proxy for a desired interface thus: + + iface = obj.get_interface(«interface_name») + +which causes automatic introspection of that object path on the peer +to obtain all the necessary type information for that interface (if it +is not one of the standard interfaces). So calling a Python method on +this object + + results = iface.«method»(«args») + +translates automatically to the corresponding D-Bus method call to +that object and interface on the remote server, with full type +checking done on both arguments and results. + +Note that the method result is always a list. + +D-Bus properties are automatically mapped to Python properties, +so you can access their values and assign new ones in the usual +Python way. For example, adding 1 to a numeric property (written +out the long way to demonstrate property access on both the LHS +and RHS of the assignment): + + iface = conn[«bus_name»][«path»] \ + .get_interface(«interface_name») + iface.«prop» = iface.«prop» + 1 + +The above are *blocking* calls, which means the current thread is +blocked while waiting for the reply to the method call. If you want to +do things in a more event-loop-friendly fashion, then use +get_async_interface instead of get_interface, which returns a +coroutine object that evaluates to an asynchronous version of the +proxy object when it finally completes. Method calls and property +accesses on this are automatically also coroutine calls, so you can +use them in await-constructs in your coroutines, or create asyncio +tasks to run them etc. + +Here is an example use of the above calls, which pops up a GUI +notification displaying a short message for 5 seconds. Because the +introspection of this interface supplies names for the arguments, it +is possible to pass them to the method call by keyword: + + ravel.session_bus()["org.freedesktop.Notifications"]["/org/freedesktop/Notifications"] \ + .get_interface("org.freedesktop.Notifications") \ + .Notify \ + ( + app_name = "test", + replaces_id = 0, + app_icon = "dialog-information", + summary = "Hello World!", + body = "DBussy works!", + actions = [], + hints = {}, + timeout = 5000, + ) + +(But note that the argument names might differ, depending on your +Linux distro version. If you get errors saying certain argument names +are not understood, try it without the argument names. Or do your own +introspection of the interface, to decide what argument names should +be used.) + + +Proxy Interfaces: Alternative Order +----------------------------------- + +The above access to proxy interfaces could be described as +“bus name-path-interface”, after the order in which the components +are specified. Proxies can also be obtained in “bus name-interface-path” +order. This can be convenient for obtaining a proxy interface object +that can then be used to make calls on multiple objects. + +In this method, an initial *root* proxy is obtained thus: + + iface_root = conn[«bus_name»].get_interface \ + ( + path = «initial_path», + interface = «interface_name», + ) + +Note you need to specify an object path for this initial introspection; +it is probably best to use the shortest (highest-level) path that +supports that interface. Depending on the peer, the root path “/” might work. + +From this, you get the proxy for the interface on an actual object by +using the object path as a lookup key, e.g. + + iface = iface_root[«path»] + +From here, you can invoke the method calls and access properties +in the same way as before, e.g. + + iface.«method»(«args») + ... iface.«prop» ... + + +Asynchronous Properties +----------------------- + +As mentioned, both property and method access can be done asynchronously +on an event loop. Asynchronous *reading* of a property is easy +enough to express: + + val = await obj.«prop» + +But how do you do asynchronous *writing* of the property? The obvious +construct + + await obj.«prop» = newval + +produces a syntax error: Python doesn’t (at least as of 3.6!) allow +“await” on the left-hand side of an assignment. Instead, you +write it as though it were a blocking call: + + obj.«prop» = newval + +but because the interface is defined as asynchronous, this causes +a task to be queued on the event loop to oversee the completion of +the set-property call, and your code can continue execution before +this task completes. + +The main consequence of this is that any error exception will be +raised asynchronously. But if you don’t like the idea that execution +will be deferred, you can await the completion of all such pending +property-setting calls with the following call on the root proxy: + + await iface_root.set_prop_flush() + +or on the actual proxy interface: + + await ravel.set_prop_flush(iface) + +whichever is more convenient. This means you can batch up a whole +series of property-setting calls on any number of objects on the same +interface and bus name, then wait for them all to complete with a +single flush call. + + +Interface Classes +----------------- + +The more structured high-level interface offered by Ravel is built +around the concept of an *interface class*, which is a Python class +that represents a D-Bus interface, either as an actual implementation +or as a “proxy” for making calls to another bus peer. You can then +register instances of this class on a bus connection at selected +points in your object path hierarchy, to handle either only specific +objects at those paths or as a fallback to also deal with objects +at points below those, that do not have their own instance of this +class registered. + +An interface class is identified by applying the @ravel.interface() +decorator to the class definition, specifying the kind of interface +(for use client-side, server-side or both), and the interface name, e.g. + + @ravel.interface(ravel.INTERFACE.SERVER, name = "com.example.my_interface") + class MyClass : + ... + #end MyClass + +The meanings of the first, “kind”, argument to @ravel.interface are +as follows: + * INTERFACE.SERVER -- you are the server implementing the method + calls defined by this interface. However, the signal definitions + are just “stubs” used for type-checking when you send those signals + over the bus. This interface definition can also be introspected + to inform users about the facilities provided by the interface. + * INTERFACE.CLIENT -- you are a client wanting to communicate with + a server that implements this interface. The method calls are + just stubs used for type-checking when you send those calls over + the bus. The signal definitions can be your actual functions + that you want to be invoked when those signals are received, or + they can also be stubs. + * INTERFACE.CLIENT_AND_SERVER -- both of the above; you implement + the methods, and maybe the signals as well, and you can also use + their definitions to send corresponding method and signal calls to + peers that implement the same interface. The standard interfaces + (Peer, Introspectable, Properties) are defined in this way. + +Within such a class, Python methods that are to handle D-Bus method +calls are identified with the @ravel.method() decorator, e.g.: + + @ravel.method \ + ( + name = ..., + in_signature = ..., + out_signature = ..., + args_keyword = ..., + arg_keys = ..., + arg_attrs = ..., + result_keyword = ..., + result_keys = ..., + result_attrs = ..., + connection_keyword = ..., + message_keyword = ..., + path_keyword = ..., + bus_keyword = ..., + set_result_keyword = ..., + ... + ) + def my_method(...) : + ... + #end my_method + +As you can see, there are a large number of options for implementing +such a method. It can also be defined as a coroutine with async def if +you have an event loop attached, and Ravel will automatically queue +the task for execution and await any returned result. Partial summary +of arguments: + * name -- the D-Bus method name. If omitted, defaults to the Python + function name. + * in_signature -- the D-Bus signature specifying the arguments (zero or more) + to the method. + * out_signature -- the D-Bus signature specifying the results (zero + or more) the method will return. + * args_keyword -- the name of an argument to the Python function that will + be set to the arguments from the message method call. The arguments + will be passed as a list, or a dict, or an attributed class, depending the + specification of arg_keys and arg_attrs (see below). + * path_keyword -- if specified, then the object path field from the + incoming method call will be passed to the Python function as the + value of the argument with this name. + * message_keyword -- if specified, then the dbussy.Message object + for the incoming method call will be passed to the Python function + as the value of the argument with this name. + * connection_keyword -- if specified, then the dbussy.Connection object + will be passed to the Python function as the value of the argument + with this name. + * bus_keyword -- if specified, then the ravel.Connection object + will be passed to the Python function as the value of the argument + with this name. + * set_result_keyword -- if specified, then a function of a single + argument will be passed to the Python function as the value of the + argument with this name; the argument passed by calling this + function becomes the method result. + +Passing arguments: the argument with the name given by args_keyword +will hold the extracted arguments from the method call message. If +neither of arg_keys or arg_attrs is specified, then the arguments are +passed as a list. If arg_keys is specified, then it must be a sequence +of names that must match the number of types specified by the +in_signature; in this case, the args will be passed as a dict with the +keys given in arg_keys associated in order with the argument values. + +If arg_attrs is specified instead of arg_keys, then it must be a +sequence of names that must match the number of types specified by the +in_signature; a mutable attributed class object is created by calling +ravel.def_attr_class, with the attribute names taken from arg_keys +assigned in order to the argument values. + +Returning results: the function can return the result values to be +inserted into the method-return message as the function result, by +assigning to elements of a mutable result argument (passed as the +argument named by result_keyword, or by calling the set_result +function that was passed via the set_result_keyword (above). + +If neither result_keys nor result_attrs is specified, then the result +is expected to be a sequence of values matching the out_signature. If +it is returned as the function result, then it can be a tuple or list; +but if result_keyword is specified, then the value of this is a list, +and the values in the sequence must be assigned to the elements of +this list in-place. + +If result_keys is specified, then the result is a dict mapping the +names from result_keys to the values of the result sequence in order. +If result_attrs is specified, then the result is a mutable attributed +class object created by calling ravel.def_attr_class, mapping the +names from result_attrs to the values of the result sequence in order. +If result_keyword is not specified, then the result object is expected +to be returned as the function result; otherwise, it is passed as the +value of the argument named by result_keyword, and the handler is +supposed to update its elements in-place. + +Signal definitions look similar, except they return no results: + + @ravel.signal \ + ( + name = ..., + in_signature = ..., + args_keyword = ..., + arg_keys = ..., + arg_attrs = ..., + connection_keyword = ..., + message_keyword = ..., + path_keyword = ..., + bus_keyword = ..., + stub = ..., + ... + ) + def my_signal(...) : + ... + #end my_signal + +Also note the “stub” argument--this has meaning on a client-side +interface to indicate that the interface class does not implement +the listener for the signal, but that it is registered separately +with a listen_signal call. This is used for the PropertiesChanged +signal in ravel.PropertyHandler (the standard handler for the +DBUS.INTERFACE_PROPERTIES interface), so that you do not have +to replace the class just to install your own listeners for this +signal. + +Properties are defined by implementing getter and/or setter +methods, identified by @propgetter() and @propsetter() decorators +respectively: + + @ravel.propgetter \ + ( + name = ..., + type = ..., + name_keyword = ..., + connection_keyword = ..., + message_keyword = ..., + path_keyword = ..., + bus_keyword = ..., + change_notification = ... + ) + def my_propgetter(...) : + ... + return \ + «value» + #end my_propgetter + + @ravel.propsetter \ + ( + name = ..., + type = ..., + name_keyword = ..., + type_keyword = ..., + value_keyword = ..., + connection_keyword = ..., + message_keyword = ..., + path_keyword = ..., + bus_keyword = ... + ) : + def my_propsetter(...) : + ... + #end my_propsetter + +Note the following arguments: + * type -- the type signature for permitted property values. + * change_notification -- one of the dbussy.PROP_CHANGE_NOTIFICATION values + indicating whether (and what kind of) signals should be generated for + changes to this property value. This is specified on the @propgetter(), + because there is no point notifying about write-only properties. + * type_keyword -- for passing the actual type of the new property value + to the setter. + * value_keyword -- for passing the new property value to the setter. + +Getters and setters can be coroutines. + + +Custom User Data +---------------- + +With Ravel’s interface classes, it is possible to attach your own +user data items to arbitrary points in the object path tree. To +obtain the user data dictionary for a given object path, do either + + user_data = bus.user_data["/com/example/myapp"] + +or + + user_data = bus.user_data["com", "example", "myapp"] + +The result is a dictionary into which you can insert whatever +key-value pairs you like, e.g.: + + user_data["com.example.myapp.attribs"] = MyObj(...) + + +Predefined Interface Classes +---------------------------- + +Ravel provides predefined interface classes for the +org.freedesktop.DBus.Peer, org.freedesktop.DBus.Introspectable and +org.freedesktop.DBus.Properties interfaces, and these are +automatically registered on Connection instances. The Peer interface +is just a stub, since the actual implementation is hard-coded into +libdbus itself; it is there to provide automatic introspection of this +interface. + +The ravel.IntrospectionHandler class defines the standard +Introspectable interface, and provides automatic introspection of all +interfaces registered with a ravel.Connection (including itself and +the other standard interfaces). It extracts the information specified +to the class, method, signal and property-handler decorators, and +generates the appropriate XML form for returning to D-Bus queries. + +The ravel.PropertyHandler class defines the standard Properties +interface, and automatically dispatches to @propgetter() and +@propsetter() methods as defined in your registered interface classes. + +The ravel.ManagedObjectsHandler class defines the standard +ObjectManager interface. It handles sending out of notifications as +you call the object_added() and object_removed() methods on a +Connection. It is not automatically registered on a Connection object; +you have to add it manually. If you already have a ravel.Connection +object «conn», then you can do so with + + «conn».register_additional_standard(managed_objects = True) + +Alternatively, you can request this registration at the time of +creating the Connection object in one step, for example with + + bus = ravel.session_bus(managed_objects = True) + + +DBussy Examples +=============== + +Sample code illustrating how to use DBussy/Ravel is available in my +dbussy_examples repo on GitLab +and GitHub . + + +How Do You Pronounce “DBussy”? +============================== + +The name is a pun on “dbus” and the name of French Impressionist +composer Claude Debussy. The most natural way to pronounce it would be +the same as his name. At least, that’s my story, and I’m sticking to +it. + + +Lawrence D'Oliveiro +2020 February 2 diff --git a/defaults/dbussy/dbussy.py b/defaults/dbussy/dbussy.py new file mode 100644 index 0000000..918c612 --- /dev/null +++ b/defaults/dbussy/dbussy.py @@ -0,0 +1,7021 @@ +""" +Pure-Python binding for D-Bus , +built around libdbus . + +This Python binding supports hooking into event loops via Python’s standard +asyncio module. +""" +#+ +# Copyright 2017-2020 Lawrence D'Oliveiro . +# Licensed under the GNU Lesser General Public License v2.1 or later. +#- + +import os +import builtins +import operator +import array +import enum +import ctypes as ct +from weakref import ref as weak_ref, WeakValueDictionary +import threading +import io +import atexit +import asyncio +import functools +try: + from x.sax.saxutils import quoteattr as quote_xml_attr + import x.etree.ElementTree as XMLElementTree +except ImportError: + from xml.sax.saxutils import quoteattr as quote_xml_attr + import xml.etree.ElementTree as XMLElementTree + +dbus = ct.cdll.LoadLibrary("libdbus-1.so.3") + +class DBUS : + "useful definitions adapted from the D-Bus includes. You will need to use the" \ + " constants, but apart from that, see the more Pythonic wrappers defined outside" \ + " this class in preference to accessing low-level structures directly." + + # General ctypes gotcha: when passing addresses of ctypes-constructed objects + # to routine calls, do not construct the objects directly in the call. Otherwise + # the refcount goes to 0 before the routine is actually entered, and the object + # can get prematurely disposed. Always store the object reference into a local + # variable, and pass the value of the variable instead. + + # from dbus-protocol.h: + + # Message byte order + LITTLE_ENDIAN = 'l' + BIG_ENDIAN = 'B' + + # Protocol version. + MAJOR_PROTOCOL_VERSION = 1 + + # Type code that is never equal to a legitimate type code + TYPE_INVALID = 0 + + # Primitive types + TYPE_BYTE = ord('y') # 8-bit unsigned integer + TYPE_BOOLEAN = ord('b') # boolean + TYPE_INT16 = ord('n') # 16-bit signed integer + TYPE_UINT16 = ord('q') # 16-bit unsigned integer + TYPE_INT32 = ord('i') # 32-bit signed integer + TYPE_UINT32 = ord('u') # 32-bit unsigned integer + TYPE_INT64 = ord('x') # 64-bit signed integer + TYPE_UINT64 = ord('t') # 64-bit unsigned integer + TYPE_DOUBLE = ord('d') # 8-byte double in IEEE 754 format + TYPE_STRING = ord('s') # UTF-8 encoded, nul-terminated Unicode string + TYPE_OBJECT_PATH = ord('o') # D-Bus object path + TYPE_SIGNATURE = ord('g') # D-Bus type signature + TYPE_UNIX_FD = ord('h') # unix file descriptor + + basic_to_ctypes = \ + { # ctypes objects suitable for holding values of D-Bus types + TYPE_BYTE : ct.c_ubyte, + TYPE_BOOLEAN : ct.c_ubyte, + TYPE_INT16 : ct.c_short, + TYPE_UINT16 : ct.c_ushort, + TYPE_INT32 : ct.c_int, + TYPE_UINT32 : ct.c_uint, + TYPE_INT64 : ct.c_longlong, + TYPE_UINT64 : ct.c_ulonglong, + TYPE_DOUBLE : ct.c_double, + TYPE_STRING : ct.c_char_p, + TYPE_OBJECT_PATH : ct.c_char_p, + TYPE_SIGNATURE : ct.c_char_p, + TYPE_UNIX_FD : ct.c_int, + } + + def int_subtype(i, bits, signed) : + "returns integer i after checking that it fits in the given number of bits." + if not isinstance(i, int) : + raise TypeError("value is not int: %s" % repr(i)) + #end if + if signed : + lo = - 1 << bits - 1 + hi = (1 << bits - 1) - 1 + else : + lo = 0 + hi = (1 << bits) - 1 + #end if + if i < lo or i > hi : + raise ValueError \ + ( + "%d not in range of %s %d-bit value" % (i, ("unsigned", "signed")[signed], bits) + ) + #end if + return \ + i + #end int_subtype + + subtype_boolean = lambda i : DBUS.int_subtype(i, 1, False) + subtype_byte = lambda i : DBUS.int_subtype(i, 8, False) + subtype_int16 = lambda i : DBUS.int_subtype(i, 16, True) + subtype_uint16 = lambda i : DBUS.int_subtype(i, 16, False) + subtype_int32 = lambda i : DBUS.int_subtype(i, 32, True) + subtype_uint32 = lambda i : DBUS.int_subtype(i, 32, False) + subtype_int64 = lambda i : DBUS.int_subtype(i, 64, True) + subtype_uint64 = lambda i : DBUS.int_subtype(i, 64, False) + + int_convert = \ + { # range checks for the various D-Bus integer types + TYPE_BOOLEAN : subtype_boolean, + TYPE_BYTE : subtype_byte, + TYPE_INT16 : subtype_int16, + TYPE_UINT16 : subtype_uint16, + TYPE_INT32 : subtype_int32, + TYPE_UINT32 : subtype_uint32, + TYPE_INT64 : subtype_int64, + TYPE_UINT64 : subtype_uint64, + } + + # subclasses for distinguishing various special kinds of D-Bus values: + + class ObjectPath(str) : + "an object path string." + + def __repr__(self) : + return \ + "%s(%s)" % (self.__class__.__name__, super().__repr__()) + #end __repr__ + + #end ObjectPath + + class Signature(str) : + "a type-signature string." + + def __repr__(self) : + return \ + "%s(%s)" % (self.__class__.__name__, super().__repr__()) + #end __repr__ + + #end Signature + + class UnixFD(int) : + "a file-descriptor integer." + + def __repr__(self) : + return \ + "%s(%s)" % (self.__class__.__name__, super().__repr__()) + #end __repr__ + + #end UnixFD + + basic_subclasses = \ + { + TYPE_BOOLEAN : bool, + TYPE_OBJECT_PATH : ObjectPath, + TYPE_SIGNATURE : Signature, + TYPE_UNIX_FD : UnixFD, + } + + # Compound types + TYPE_ARRAY = ord('a') # D-Bus array type + TYPE_VARIANT = ord('v') # D-Bus variant type + + TYPE_STRUCT = ord('r') # a struct; however, type signatures use STRUCT_BEGIN/END_CHAR + TYPE_DICT_ENTRY = ord('e') # a dict entry; however, type signatures use DICT_ENTRY_BEGIN/END_CHAR + NUMBER_OF_TYPES = 16 # does not include TYPE_INVALID or STRUCT/DICT_ENTRY_BEGIN/END_CHAR + + # characters other than typecodes that appear in type signatures + STRUCT_BEGIN_CHAR = ord('(') # start of a struct type in a type signature + STRUCT_END_CHAR = ord(')') # end of a struct type in a type signature + DICT_ENTRY_BEGIN_CHAR = ord('{') # start of a dict entry type in a type signature + DICT_ENTRY_END_CHAR = ord('}') # end of a dict entry type in a type signature + + MAXIMUM_NAME_LENGTH = 255 # max length in bytes of a bus name, interface or member (object paths are unlimited) + + MAXIMUM_SIGNATURE_LENGTH = 255 # fits in a byte + + MAXIMUM_MATCH_RULE_LENGTH = 1024 + + MAXIMUM_MATCH_RULE_ARG_NUMBER = 63 + + MAXIMUM_ARRAY_LENGTH = 67108864 # 2 * 26 + MAXIMUM_ARRAY_LENGTH_BITS = 26 # to store the max array size + + MAXIMUM_MESSAGE_LENGTH = MAXIMUM_ARRAY_LENGTH * 2 + MAXIMUM_MESSAGE_LENGTH_BITS = 27 + + MAXIMUM_MESSAGE_UNIX_FDS = MAXIMUM_MESSAGE_LENGTH // 4 # FDs are at least 32 bits + MAXIMUM_MESSAGE_UNIX_FDS_BITS = MAXIMUM_MESSAGE_LENGTH_BITS - 2 + + MAXIMUM_TYPE_RECURSION_DEPTH = 32 + + # Types of message + + MESSAGE_TYPE_INVALID = 0 # never a valid message type + MESSAGE_TYPE_METHOD_CALL = 1 + MESSAGE_TYPE_METHOD_RETURN = 2 + MESSAGE_TYPE_ERROR = 3 + MESSAGE_TYPE_SIGNAL = 4 + + NUM_MESSAGE_TYPES = 5 + + # Header flags + + HEADER_FLAG_NO_REPLY_EXPECTED = 0x1 + HEADER_FLAG_NO_AUTO_START = 0x2 + HEADER_FLAG_ALLOW_INTERACTIVE_AUTHORIZATION = 0x4 + + # Header fields + + HEADER_FIELD_INVALID = 0 + HEADER_FIELD_PATH = 1 + HEADER_FIELD_INTERFACE = 2 + HEADER_FIELD_MEMBER = 3 + HEADER_FIELD_ERROR_NAME = 4 + HEADER_FIELD_REPLY_SERIAL = 5 + HEADER_FIELD_DESTINATION = 6 + HEADER_FIELD_SENDER = 7 + HEADER_FIELD_SIGNATURE = 8 + HEADER_FIELD_UNIX_FDS = 9 + + HEADER_FIELD_LAST = HEADER_FIELD_UNIX_FDS + + HEADER_SIGNATURE = bytes \ + (( + TYPE_BYTE, + TYPE_BYTE, + TYPE_BYTE, + TYPE_BYTE, + TYPE_UINT32, + TYPE_UINT32, + TYPE_ARRAY, + STRUCT_BEGIN_CHAR, + TYPE_BYTE, + TYPE_VARIANT, + STRUCT_END_CHAR, + )) + MINIMUM_HEADER_SIZE = 16 # smallest header size that can occur (missing required fields, though) + + # Errors + ERROR_FAILED = "org.freedesktop.DBus.Error.Failed" # generic error + ERROR_NO_MEMORY = "org.freedesktop.DBus.Error.NoMemory" + ERROR_SERVICE_UNKNOWN = "org.freedesktop.DBus.Error.ServiceUnknown" + ERROR_NAME_HAS_NO_OWNER = "org.freedesktop.DBus.Error.NameHasNoOwner" + ERROR_NO_REPLY = "org.freedesktop.DBus.Error.NoReply" + ERROR_IO_ERROR = "org.freedesktop.DBus.Error.IOError" + ERROR_BAD_ADDRESS = "org.freedesktop.DBus.Error.BadAddress" + ERROR_NOT_SUPPORTED = "org.freedesktop.DBus.Error.NotSupported" + ERROR_LIMITS_EXCEEDED = "org.freedesktop.DBus.Error.LimitsExceeded" + ERROR_ACCESS_DENIED = "org.freedesktop.DBus.Error.AccessDenied" + ERROR_AUTH_FAILED = "org.freedesktop.DBus.Error.AuthFailed" + ERROR_NO_SERVER = "org.freedesktop.DBus.Error.NoServer" + ERROR_TIMEOUT = "org.freedesktop.DBus.Error.Timeout" + ERROR_NO_NETWORK = "org.freedesktop.DBus.Error.NoNetwork" + ERROR_ADDRESS_IN_USE = "org.freedesktop.DBus.Error.AddressInUse" + ERROR_DISCONNECTED = "org.freedesktop.DBus.Error.Disconnected" + ERROR_INVALID_ARGS = "org.freedesktop.DBus.Error.InvalidArgs" + ERROR_FILE_NOT_FOUND = "org.freedesktop.DBus.Error.FileNotFound" + ERROR_FILE_EXISTS = "org.freedesktop.DBus.Error.FileExists" + ERROR_UNKNOWN_METHOD = "org.freedesktop.DBus.Error.UnknownMethod" + ERROR_UNKNOWN_OBJECT = "org.freedesktop.DBus.Error.UnknownObject" + ERROR_UNKNOWN_INTERFACE = "org.freedesktop.DBus.Error.UnknownInterface" + ERROR_UNKNOWN_PROPERTY = "org.freedesktop.DBus.Error.UnknownProperty" + ERROR_PROPERTY_READ_ONLY = "org.freedesktop.DBus.Error.PropertyReadOnly" + ERROR_TIMED_OUT = "org.freedesktop.DBus.Error.TimedOut" + ERROR_MATCH_RULE_NOT_FOUND = "org.freedesktop.DBus.Error.MatchRuleNotFound" + ERROR_MATCH_RULE_INVALID = "org.freedesktop.DBus.Error.MatchRuleInvalid" + ERROR_SPAWN_EXEC_FAILED = "org.freedesktop.DBus.Error.Spawn.ExecFailed" + ERROR_SPAWN_FORK_FAILED = "org.freedesktop.DBus.Error.Spawn.ForkFailed" + ERROR_SPAWN_CHILD_EXITED = "org.freedesktop.DBus.Error.Spawn.ChildExited" + ERROR_SPAWN_CHILD_SIGNALED = "org.freedesktop.DBus.Error.Spawn.ChildSignaled" + ERROR_SPAWN_FAILED = "org.freedesktop.DBus.Error.Spawn.Failed" + ERROR_SPAWN_SETUP_FAILED = "org.freedesktop.DBus.Error.Spawn.FailedToSetup" + ERROR_SPAWN_CONFIG_INVALID = "org.freedesktop.DBus.Error.Spawn.ConfigInvalid" + ERROR_SPAWN_SERVICE_INVALID = "org.freedesktop.DBus.Error.Spawn.ServiceNotValid" + ERROR_SPAWN_SERVICE_NOT_FOUND = "org.freedesktop.DBus.Error.Spawn.ServiceNotFound" + ERROR_SPAWN_PERMISSIONS_INVALID = "org.freedesktop.DBus.Error.Spawn.PermissionsInvalid" + ERROR_SPAWN_FILE_INVALID = "org.freedesktop.DBus.Error.Spawn.FileInvalid" + ERROR_SPAWN_NO_MEMORY = "org.freedesktop.DBus.Error.Spawn.NoMemory" + ERROR_UNIX_PROCESS_ID_UNKNOWN = "org.freedesktop.DBus.Error.UnixProcessIdUnknown" + ERROR_INVALID_SIGNATURE = "org.freedesktop.DBus.Error.InvalidSignature" + ERROR_INVALID_FILE_CONTENT = "org.freedesktop.DBus.Error.InvalidFileContent" + ERROR_SELINUX_SECURITY_CONTEXT_UNKNOWN = "org.freedesktop.DBus.Error.SELinuxSecurityContextUnknown" + ERROR_ADT_AUDIT_DATA_UNKNOWN = "org.freedesktop.DBus.Error.AdtAuditDataUnknown" + ERROR_OBJECT_PATH_IN_USE = "org.freedesktop.DBus.Error.ObjectPathInUse" + ERROR_INCONSISTENT_MESSAGE = "org.freedesktop.DBus.Error.InconsistentMessage" + ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED = "org.freedesktop.DBus.Error.InteractiveAuthorizationRequired" + + # XML introspection format + INTROSPECT_1_0_XML_NAMESPACE = "http://www.freedesktop.org/standards/dbus" + INTROSPECT_1_0_XML_PUBLIC_IDENTIFIER = "-//freedesktop//DTD D-BUS Object Introspection 1.0//EN" + INTROSPECT_1_0_XML_SYSTEM_IDENTIFIER = "http://www.freedesktop.org/standards/dbus/1.0/introspect.dtd" + INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE = \ + ( + "\n" + ) + + # from dbus-shared.h: + + # well-known bus types + BusType = ct.c_uint + BUS_SESSION = 0 + BUS_SYSTEM = 1 + BUS_STARTER = 2 + + # results that a message handler can return + BusHandlerResult = ct.c_uint + HANDLER_RESULT_HANDLED = 0 # no need to try more handlers + HANDLER_RESULT_NOT_YET_HANDLED = 1 # see if other handlers want it + HANDLER_RESULT_NEED_MEMORY = 2 # try again later with more memory + + # Bus names + SERVICE_DBUS = "org.freedesktop.DBus" # used to talk to the bus itself + + # Paths + PATH_DBUS = "/org/freedesktop/DBus" # object path used to talk to the bus itself + PATH_LOCAL = "/org/freedesktop/DBus/Local" # path used in local/in-process-generated messages + + # Interfaces + INTERFACE_DBUS = "org.freedesktop.DBus" # interface exported by the object with SERVICE_DBUS and PATH_DBUS + INTERFACE_MONITORING = "org.freedesktop.DBus.Monitoring" # monitoring interface exported by the dbus-daemon + INTERFACE_VERBOSE = "org.freedesktop.DBus.Verbose" # verbose interface exported by the dbus-daemon + INTERFACE_INTROSPECTABLE = "org.freedesktop.DBus.Introspectable" # interface supported by introspectable objects + INTERFACE_PROPERTIES = "org.freedesktop.DBus.Properties" # interface supported by objects with properties + INTERFACE_PEER = "org.freedesktop.DBus.Peer" # interface supported by most dbus peers + INTERFACE_LOCAL = "org.freedesktop.DBus.Local" # methods can only be invoked locally + + # Owner flags for request_name + NAME_FLAG_ALLOW_REPLACEMENT = 0x1 + NAME_FLAG_REPLACE_EXISTING = 0x2 + NAME_FLAG_DO_NOT_QUEUE = 0x4 + + # Replies to request for a name + REQUEST_NAME_REPLY_PRIMARY_OWNER = 1 + REQUEST_NAME_REPLY_IN_QUEUE = 2 + REQUEST_NAME_REPLY_EXISTS = 3 + REQUEST_NAME_REPLY_ALREADY_OWNER = 4 + + # Replies to releasing a name + RELEASE_NAME_REPLY_RELEASED = 1 + RELEASE_NAME_REPLY_NON_EXISTENT = 2 + RELEASE_NAME_REPLY_NOT_OWNER = 3 + + # Replies to service starts + START_REPLY_SUCCESS = 1 + START_REPLY_ALREADY_RUNNING = 2 + + # from dbus-types.h: + + bool_t = ct.c_uint + + # from dbus-memory.h: + + FreeFunction = ct.CFUNCTYPE(None, ct.c_void_p) + + # from dbus-connection.h: + + HandlerResult = ct.c_uint + + class Error(ct.Structure) : + _fields_ = \ + [ + ("name", ct.c_char_p), + ("message", ct.c_char_p), + ("padding", 2 * ct.c_void_p), + ] + #end Error + ErrorPtr = ct.POINTER(Error) + + WatchFlags = ct.c_uint + WATCH_READABLE = 1 << 0 + WATCH_WRITABLE = 1 << 1 + WATCH_ERROR = 1 << 2 + WATCH_HANGUP = 1 << 3 + + DispatchStatus = ct.c_uint + DISPATCH_DATA_REMAINS = 0 # more data available + DISPATCH_COMPLETE = 1 # all available data has been processed + DISPATCH_NEED_MEMORY = 2 # not enough memory to continue + + AddWatchFunction = ct.CFUNCTYPE(bool_t, ct.c_void_p, ct.c_void_p) + # add_watch(DBusWatch, user_data) returns success/failure + WatchToggledFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p) + # watch_toggled(DBusWatch, user_data) + RemoveWatchFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p) + # remove_watch(DBusWatch, user_data) + + AddTimeoutFunction = ct.CFUNCTYPE(bool_t, ct.c_void_p, ct.c_void_p) + # add_timeout(DBusTimeout, user_data) returns success/failure + TimeoutToggledFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p) + # timeout_toggled(DBusTimeout, user_data) + RemoveTimeoutFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p) + # remove_timeout(DBusTimeout, user_data) + + DispatchStatusFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.POINTER(DispatchStatus), ct.c_void_p) + # dispatch_status(DBusConnection, DBusDispatchStatus, user_data) + WakeupMainFunction = ct.CFUNCTYPE(None, ct.c_void_p) + # wakeup_main(user_data) + + AllowUnixUserFunction = ct.CFUNCTYPE(bool_t, ct.c_void_p, ct.c_ulong, ct.c_void_p) + # allow_unix_user(DBusConnection, uid, user_data) returns success/failure + AllowWindowsUserFunction = ct.CFUNCTYPE(bool_t, ct.c_void_p, ct.c_void_p, ct.c_void_p) + # allow_windows_user(DBusConnection, user_sid, user_data)returns success/failure + + PendingCallNotifyFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p) + # notify(DBusPendingCall, user_data) + + HandleMessageFunction = ct.CFUNCTYPE(HandlerResult, ct.c_void_p, ct.c_void_p, ct.c_void_p) + # handle_message(DBusConnection, DBusMessage, user_data) + + ObjectPathUnregisterFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p) + # unregister(DBusConnection, user_data) + ObjectPathMessageFunction = ct.CFUNCTYPE(HandlerResult, ct.c_void_p, ct.c_void_p, ct.c_void_p) + # handle_message(DBusConnection, DBusMessage, user_data) + + class ObjectPathVTable(ct.Structure) : + pass + #end ObjectPathVTable + ObjectPathVTable._fields_ = \ + [ + ("unregister_function", ObjectPathUnregisterFunction), + ("message_function", ObjectPathMessageFunction), + ("internal_pad1", ct.CFUNCTYPE(None, ct.c_void_p)), + ("internal_pad2", ct.CFUNCTYPE(None, ct.c_void_p)), + ("internal_pad3", ct.CFUNCTYPE(None, ct.c_void_p)), + ("internal_pad4", ct.CFUNCTYPE(None, ct.c_void_p)), + ] + ObjectPathVTablePtr = ct.POINTER(ObjectPathVTable) + + # from dbus-pending-call.h: + TIMEOUT_INFINITE = 0x7fffffff + TIMEOUT_USE_DEFAULT = -1 + + # from dbus-message.h: + class MessageIter(ct.Structure) : + "contains no public fields." + _fields_ = \ + [ + ("dummy1", ct.c_void_p), + ("dummy2", ct.c_void_p), + ("dummy3", ct.c_uint), + ("dummy4", ct.c_int), + ("dummy5", ct.c_int), + ("dummy6", ct.c_int), + ("dummy7", ct.c_int), + ("dummy8", ct.c_int), + ("dummy9", ct.c_int), + ("dummy10", ct.c_int), + ("dummy11", ct.c_int), + ("pad1", ct.c_int), + ("pad2", ct.c_void_p), + ("pad3", ct.c_void_p), + ] + #end MessageIter + MessageIterPtr = ct.POINTER(MessageIter) + + # from dbus-server.h: + NewConnectionFunction = ct.CFUNCTYPE(None, ct.c_void_p, ct.c_void_p, ct.c_void_p) + # new_connection(DBusServer, DBusConnection, user_data) + + # from dbus-signature.h: + class SignatureIter(ct.Structure) : + "contains no public fields." + _fields_ = \ + [ + ("dummy1", ct.c_void_p), + ("dummy2", ct.c_void_p), + ("dummy8", ct.c_uint), + ("dummy12", ct.c_int), + ("dummy17", ct.c_int), + ] + #end SignatureIter + SignatureIterPtr = ct.POINTER(SignatureIter) + +#end DBUS + +class DBUSX: + "additional definitions not part of the official interfaces" + + DEFAULT_TIMEOUT = 25 # seconds, from dbus-connection-internal.h in libdbus source + + # For reference implementation for how to connect to daemon, + # see libdbus sources, dbus/dbus-bus.c (internal_bus_get routine + # and stuff that it calls) + + # environment variables used to find addresses of bus daemons + SESSION_BUS_ADDRESS_VAR = "DBUS_SESSION_BUS_ADDRESS" + SYSTEM_BUS_ADDRESS_VAR = "DBUS_SYSTEM_BUS_ADDRESS" + STARTER_BUS_ADDRESS_VAR = "DBUS_STARTER_ADDRESS" + STARTER_BUS_ADDRESS_TYPE = "DBUS_STARTER_BUS_TYPE" + + # values for value of STARTER_BUS_ADDRESS_TYPE + # If cannot determine type, then default to session bus + BUS_TYPE_SESSION = "session" + BUS_TYPE_SYSTEM = "system" + + SYSTEM_BUS_ADDRESS = "unix:path=/var/run/dbus/system_bus_socket" + # default system bus daemon address if value of SYSTEM_BUS_ADDRESS_VAR is not defined + SESSION_BUS_ADDRESS = "autolaunch:" + # default session bus daemon address if value of SESSION_BUS_ADDRESS_VAR is not defined + + INTERFACE_OBJECT_MANAGER = "org.freedesktop.DBus.ObjectManager" + # no symbolic name for this in standard headers as yet + +#end DBUSX + +#+ +# Useful stuff +#- + +if hasattr(asyncio, "get_running_loop") : + # new in Python 3.7 + get_running_loop = asyncio.get_running_loop +else : + # as long as I want to support pre-3.7... + get_running_loop = asyncio.get_event_loop +#end if + +def get_event_loop() : + "Python docs indicate that asyncio.get_event_loop() is going away" \ + " in its current form. But I still need to be able to attach objects" \ + " to the default event loop from a non-coroutine context. So I" \ + " reimplement its original semantics here." + return \ + asyncio.get_event_loop_policy().get_event_loop() +#end get_event_loop + +def _wderef(w_self, parent) : + self = w_self() + assert self != None, "%s has gone away" % parent + return \ + self +#end _wderef + +def call_async(func, funcargs = (), timeout = None, abort = None, loop = None) : + "invokes func on a separate temporary thread and returns a Future that" \ + " can be used to wait for its completion and obtain its result. If timeout" \ + " is not None, then waiters on the Future will get a TimeoutError exception" \ + " if the function has not completed execution after that number of seconds." \ + " This allows easy invocation of blocking I/O functions in an asyncio-" \ + "compatible fashion. But note that the operation cannot be cancelled" \ + " if the timeout elapses; instead, you can specify an abort callback" \ + " which will be invoked with whatever result is eventually returned from" \ + " func." + + if loop == None : + loop = get_running_loop() + #end if + + timeout_task = None + + def func_done(ref_awaiting, result) : + awaiting = ref_awaiting() + if awaiting != None : + if not awaiting.done() : + awaiting.set_result(result) + if timeout_task != None : + timeout_task.cancel() + #end if + else : + if abort != None : + abort(result) + #end if + #end if + #end if + #end func_done + + def do_func_timedout(ref_awaiting) : + awaiting = ref_awaiting() + if awaiting != None : + if not awaiting.done() : + awaiting.set_exception(TimeoutError()) + # Python doesn’t give me any (easy) way to cancel the thread running the + # do_func() call, so just let it run to completion, whereupon func_done() + # will get rid of the result. Even if I could delete the thread, can I be sure + # that would clean up memory and OS/library resources properly? + #end if + #end if + #end do_func_timedout + + def do_func(ref_awaiting) : + # makes the blocking call on a separate thread. + result = func(*funcargs) + # A Future is not itself threadsafe, but I can thread-safely + # run a callback on the main thread to set it. + loop.call_soon_threadsafe(func_done, ref_awaiting, result) + #end do_func + +#begin call_async + awaiting = loop.create_future() + ref_awaiting = weak_ref(awaiting) + # weak ref to avoid circular refs with loop + subthread = threading.Thread(target = do_func, args = (ref_awaiting,), daemon = True) + subthread.start() + if timeout != None : + timeout_task = loop.call_later(timeout, do_func_timedout, ref_awaiting) + #end if + return \ + awaiting +#end call_async + +#+ +# Higher-level interface to type system +#- + +class TYPE(enum.Enum) : + "D-Bus type codes wrapped up in an enumeration." + + BYTE = ord('y') # 8-bit unsigned integer + BOOLEAN = ord('b') # boolean + INT16 = ord('n') # 16-bit signed integer + UINT16 = ord('q') # 16-bit unsigned integer + INT32 = ord('i') # 32-bit signed integer + UINT32 = ord('u') # 32-bit unsigned integer + INT64 = ord('x') # 64-bit signed integer + UINT64 = ord('t') # 64-bit unsigned integer + DOUBLE = ord('d') # 8-byte double in IEEE 754 format + STRING = ord('s') # UTF-8 encoded, nul-terminated Unicode string + OBJECT_PATH = ord('o') # D-Bus object path + SIGNATURE = ord('g') # D-Bus type signature + UNIX_FD = ord('h') # unix file descriptor + + ARRAY = ord('a') # array of elements all of same type, or possibly dict + STRUCT = ord('r') # sequence of elements of arbitrary types + VARIANT = ord('v') # a single element of dynamic type + + @property + def is_basic(self) : + "does this code represent a basic (non-container) type." + return \ + self.value in DBUS.basic_to_ctypes + #end is_basic + +#end TYPE + +class Type : + "base class for all Types. The “signature” property returns the fully-encoded" \ + " signature string for the entire Type." + + __slots__ = ("code",) + + def __init__(self, code) : + if not isinstance(code, TYPE) : + raise TypeError("only TYPE.xxx values allowed") + #end if + self.code = code + #end __init__ + + @property + def signature(self) : + raise NotImplementedError("subclass forgot to override signature property") + #end signature + + def __eq__(t1, t2) : + raise NotImplementedError("subclass forgot to override __eq__ method") + #end __eq__ + + def validate(self, val) : + "returns val if it is an acceptable value of this Type, else raises" \ + " TypeError or ValueError." + raise NotImplementedError("subclass forgot to override validate method") + #end validate + + def __repr__(self) : + return \ + "%s(sig = %s)" % (type(self).__name__, repr(self.signature)) + #end __repr__ + +#end Type + +class BasicType(Type) : + "a basic (non-container) type." + + __slots__ = () + + def __init__(self, code) : + if not isinstance(code, TYPE) or not code.is_basic : + raise TypeError("only basic TYPE.xxx values allowed") + #end if + super().__init__(code) + #end __init__ + + def __repr__(self) : + return \ + "%s(%s)" % (type(self).__name__, repr(self.code)) + #end __repr__ + + @property + def signature(self) : + return \ + chr(self.code.value) + #end signature + + def __eq__(t1, t2) : + return \ + isinstance(t2, BasicType) and t1.code == t2.code + #end __eq__ + + def validate(self, val) : + if self.code.value in DBUS.int_convert : + val = DBUS.int_convert[self.code.value](val) + elif self.code == TYPE.DOUBLE : + if not isinstance(val, float) : + raise TypeError("expecting a float, not %s: %s" % (type(val).__name__, repr(val))) + #end if + elif self.code == TYPE.UNIX_FD : + val = DBUS.subtype_uint32(val) + elif DBUS.basic_to_ctypes[self.code.value] == ct.c_char_p : + if not isinstance(val, str) : + raise TypeError("expecting a string, not %s: %s" % (type(val).__name__, repr(val))) + #end if + else : + raise RuntimeError("unknown basic type %s" % repr(self.code)) + #end if + return \ + val + #end validate + +#end BasicType + +class VariantType(Type) : + "the variant type--a single element of a type determined at run-time." + + def __init__(self) : + super().__init__(TYPE.VARIANT) + #end __init__ + + @property + def signature(self) : + return \ + chr(TYPE.VARIANT.value) + #end signature + + def __repr__(self) : + return \ + "%s()" % type(self).__name__ + #end __repr__ + + def __eq__(t1, t2) : + return \ + isinstance(t2, VariantType) + #end __eq__ + + def validate(self, val) : + if not isinstance(val, (tuple, list)) or len(val) != 2 : + raise ValueError("expecting a (type, value) pair") + #end if + valtype, val = val + valtype = parse_single_signature(valtype) + return \ + (valtype, valtype.validate(val)) + #end validate + +#end VariantType + +class StructType(Type) : + "a sequence of one or more arbitrary types (empty structs are not allowed)." + + __slots__ = ("elttypes",) + + def __init__(self, *types) : + if len(types) == 0 : + raise TypeError("must have at least one element type") + #end if + if not all(isinstance(t, Type) for t in types) : + raise TypeError("struct elements must be Types") + #end if + super().__init__(TYPE.STRUCT) + self.elttypes = tuple(types) + #end __init__ + + def __repr__(self) : + return \ + "%s(%s)" % (type(self).__name__, repr(self.elttypes)) + #end __repr__ + + @property + def signature(self) : + return \ + "(%s)" % "".join(t.signature for t in self.elttypes) + #end signature + + def __eq__(t1, t2) : + return \ + ( + isinstance(t2, StructType) + and + len(t1.elttypes) == len(t2.elttypes) + and + all(e1 == e2 for e1, e2 in zip(t1.elttypes, t2.elttypes)) + ) + #end __eq__ + + def validate(self, val) : + if not isinstance(val, (tuple, list)) or len(val) != len(self.elttypes) : + raise TypeError \ + ( + "need a list or tuple of %d elements, not %s" % (len(self.elttypes), repr(val)) + ) + #end if + return \ + type(val)(elttype.validate(elt) for elttype, elt in zip(self.elttypes, val)) + #end validate + +#end StructType + +class ArrayType(Type) : + "an array of zero or more elements all of the same type." + + __slots__ = ("elttype",) + + def __init__(self, elttype) : + if not isinstance(elttype, Type) : + raise TypeError("invalid array element type") + #end if + super().__init__(TYPE.ARRAY) + self.elttype = elttype + #end __init__ + + def __repr__(self) : + return \ + "%s[%s]" % (type(self).__name__, repr(self.elttype)) + #end __repr__ + + @property + def signature(self) : + return \ + chr(TYPE.ARRAY.value) + self.elttype.signature + #end signature + + def __eq__(t1, t2) : + return \ + isinstance(t2, ArrayType) and t1.elttype == t2.elttype + #end __eq__ + + def validate(self, val) : + if not isinstance(val, (tuple, list)) : + raise TypeError("need a tuple or list, not %s: %s" % (type(val).__name__, repr(val))) + #end if + return \ + type(val)(self.elttype.validate(elt) for elt in val) + #end validate + +#end ArrayType + +class DictType(Type) : + "a dictionary mapping zero or more keys to values." + + __slots__ = ("keytype", "valuetype") + + def __init__(self, keytype, valuetype) : + if not isinstance(keytype, BasicType) or not isinstance(valuetype, Type) : + raise TypeError("invalid dict key/value type") + #end if + super().__init__(TYPE.ARRAY) + self.keytype = keytype + self.valuetype = valuetype + #end keytype + + def __repr__(self) : + return \ + "%s[%s : %s]" % (type(self).__name__, repr(self.keytype), repr(self.valuetype)) + #end __repr__ + + @property + def signature(self) : + return \ + "%s{%s%s}" % (chr(TYPE.ARRAY.value), self.keytype.signature, self.valuetype.signature) + #end signature + + @property + def entry_signature(self) : + "signature for a dict entry." + return \ + "{%s%s}" % (self.keytype.signature, self.valuetype.signature) + #end entry_signature + + def __eq__(t1, t2) : + return \ + isinstance(t2, DictType) and t1.keytype == t2.keytype and t1.valuetype == t2.valuetype + #end __eq__ + + def validate(self, val) : + if not isinstance(val, dict) : + raise TypeError("need a dict, not %s: %s" % (type(val).__name__, repr(val))) + #end if + return \ + type(val) \ + ( + (self.keytype.validate(key), self.valuetype.validate(val[key])) + for key in val + ) + #end validate + +#end DictType + +def data_key(data) : + "returns a unique value that allows data to be used as a dict/set key." + if isinstance(data, (bytes, float, frozenset, int, str, tuple)) : + result = data + else : + # data itself is non-hashable + result = id(data) + #end if + return \ + result +#end data_key + +#+ +# Library prototypes +#- + +# from dbus-connection.h: +dbus.dbus_connection_open.restype = ct.c_void_p +dbus.dbus_connection_open.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_connection_open_private.restype = ct.c_void_p +dbus.dbus_connection_open_private.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_connection_ref.restype = ct.c_void_p +dbus.dbus_connection_ref.argtypes = (ct.c_void_p,) +dbus.dbus_connection_unref.restype = None +dbus.dbus_connection_unref.argtypes = (ct.c_void_p,) +dbus.dbus_connection_close.restype = None +dbus.dbus_connection_close.argtypes = (ct.c_void_p,) +dbus.dbus_connection_get_is_connected.restype = DBUS.bool_t +dbus.dbus_connection_get_is_connected.argtypes = (ct.c_void_p,) +dbus.dbus_connection_get_is_authenticated.restype = DBUS.bool_t +dbus.dbus_connection_get_is_authenticated.argtypes = (ct.c_void_p,) +dbus.dbus_connection_get_is_anonymous.restype = DBUS.bool_t +dbus.dbus_connection_get_is_anonymous.argtypes = (ct.c_void_p,) +dbus.dbus_connection_get_server_id.restype = ct.c_void_p +dbus.dbus_connection_get_server_id.argtypes = (ct.c_void_p,) +dbus.dbus_connection_can_send_type.restype = DBUS.bool_t +dbus.dbus_connection_can_send_type.argtypes = (ct.c_void_p, ct.c_int) +dbus.dbus_connection_set_exit_on_disconnect.restype = None +dbus.dbus_connection_set_exit_on_disconnect.argtypes = (ct.c_void_p, DBUS.bool_t) +dbus.dbus_connection_preallocate_send.restype = ct.c_void_p +dbus.dbus_connection_preallocate_send.argtypes = (ct.c_void_p,) +dbus.dbus_connection_free_preallocated_send.restype = None +dbus.dbus_connection_free_preallocated_send.argtypes = (ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_send_preallocated.restype = None +dbus.dbus_connection_send_preallocated.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.POINTER(ct.c_uint)) +dbus.dbus_connection_has_messages_to_send.restype = DBUS.bool_t +dbus.dbus_connection_has_messages_to_send.argtypes = (ct.c_void_p,) +dbus.dbus_connection_send.restype = DBUS.bool_t +dbus.dbus_connection_send.argtypes = (ct.c_void_p, ct.c_void_p, ct.POINTER(ct.c_uint)) +dbus.dbus_connection_send_with_reply.restype = DBUS.bool_t +dbus.dbus_connection_send_with_reply.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_int) +dbus.dbus_connection_send_with_reply_and_block.restype = ct.c_void_p +dbus.dbus_connection_send_with_reply_and_block.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_int, DBUS.ErrorPtr) +dbus.dbus_connection_flush.restype = None +dbus.dbus_connection_flush.argtypes = (ct.c_void_p,) +dbus.dbus_connection_read_write_dispatch.restype = DBUS.bool_t +dbus.dbus_connection_read_write_dispatch.argtypes = (ct.c_void_p, ct.c_int) +dbus.dbus_connection_read_write.restype = DBUS.bool_t +dbus.dbus_connection_read_write.argtypes = (ct.c_void_p, ct.c_int) +dbus.dbus_connection_borrow_message.restype = ct.c_void_p +dbus.dbus_connection_borrow_message.argtypes = (ct.c_void_p,) +dbus.dbus_connection_return_message.restype = None +dbus.dbus_connection_return_message.argtypes = (ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_steal_borrowed_message.restype = None +dbus.dbus_connection_steal_borrowed_message.argtypes = (ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_pop_message.restype = ct.c_void_p +dbus.dbus_connection_pop_message.argtypes = (ct.c_void_p,) +dbus.dbus_connection_get_dispatch_status.restype = ct.c_uint +dbus.dbus_connection_get_dispatch_status.argtypes = (ct.c_void_p,) +dbus.dbus_connection_dispatch.restype = ct.c_uint +dbus.dbus_connection_dispatch.argtypes = (ct.c_void_p,) +dbus.dbus_connection_set_watch_functions.restype = DBUS.bool_t +dbus.dbus_connection_set_watch_functions.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_set_timeout_functions.restype = DBUS.bool_t +dbus.dbus_connection_set_timeout_functions.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_set_wakeup_main_function.restype = None +dbus.dbus_connection_set_wakeup_main_function.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_set_dispatch_status_function.restype = None +dbus.dbus_connection_set_dispatch_status_function.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_get_unix_user.restype = DBUS.bool_t +dbus.dbus_connection_get_unix_user.argtypes = (ct.c_void_p, ct.POINTER(ct.c_ulong)) +dbus.dbus_connection_get_unix_process_id.restype = DBUS.bool_t +dbus.dbus_connection_get_unix_process_id.argtypes = (ct.c_void_p, ct.POINTER(ct.c_ulong)) +dbus.dbus_connection_get_adt_audit_session_data.restype = DBUS.bool_t +dbus.dbus_connection_get_adt_audit_session_data.argtypes = (ct.c_void_p, ct.c_void_p, ct.POINTER(ct.c_uint)) +dbus.dbus_connection_set_unix_user_function.restype = None +dbus.dbus_connection_set_unix_user_function.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_get_windows_user.restype = DBUS.bool_t +dbus.dbus_connection_get_windows_user.argtypes = (ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_set_windows_user_function.restype = None +dbus.dbus_connection_set_windows_user_function.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_set_allow_anonymous.restype = None +dbus.dbus_connection_set_allow_anonymous.argtypes = (ct.c_void_p, DBUS.bool_t) +dbus.dbus_connection_set_route_peer_messages.restype = None +dbus.dbus_connection_set_route_peer_messages.argtypes = (ct.c_void_p, DBUS.bool_t) + +dbus.dbus_connection_add_filter.restype = DBUS.bool_t +dbus.dbus_connection_add_filter.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_remove_filter.restype = None +dbus.dbus_connection_remove_filter.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p) + +dbus.dbus_connection_allocate_data_slot.restype = DBUS.bool_t +dbus.dbus_connection_allocate_data_slot.argtypes = (ct.POINTER(ct.c_uint),) +dbus.dbus_connection_free_data_slot.restype = None +dbus.dbus_connection_free_data_slot.argtypes = (ct.c_uint,) +dbus.dbus_connection_set_data.restype = DBUS.bool_t +dbus.dbus_connection_set_data.argtypes = (ct.c_void_p, ct.c_uint, ct.c_void_p, ct.c_void_p) +dbus.dbus_connection_get_data.restype = ct.c_void_p +dbus.dbus_connection_get_data.argtypes = (ct.c_void_p, ct.c_uint) +dbus.dbus_connection_set_change_sigpipe.restype = None +dbus.dbus_connection_set_change_sigpipe.argtypes = (DBUS.bool_t,) +dbus.dbus_connection_set_max_message_size.restype = None +dbus.dbus_connection_set_max_message_size.argtypes = (ct.c_void_p, ct.c_long) +dbus.dbus_connection_get_max_message_size.restype = ct.c_long +dbus.dbus_connection_get_max_message_size.argtypes = (ct.c_void_p,) +dbus.dbus_connection_set_max_received_size.restype = None +dbus.dbus_connection_set_max_received_size.argtypes = (ct.c_void_p, ct.c_long) +dbus.dbus_connection_get_max_received_size.restype = ct.c_long +dbus.dbus_connection_get_max_received_size.argtypes = (ct.c_void_p,) +dbus.dbus_connection_set_max_message_unix_fds.restype = None +dbus.dbus_connection_set_max_message_unix_fds.argtypes = (ct.c_void_p, ct.c_long) +dbus.dbus_connection_get_max_message_unix_fds.restype = ct.c_long +dbus.dbus_connection_get_max_message_unix_fds.argtypes = (ct.c_void_p,) +dbus.dbus_connection_set_max_received_unix_fds.restype = None +dbus.dbus_connection_set_max_received_unix_fds.argtypes = (ct.c_void_p, ct.c_long) +dbus.dbus_connection_get_max_received_unix_fds.restype = ct.c_long +dbus.dbus_connection_get_max_received_unix_fds.argtypes = (ct.c_void_p,) + +dbus.dbus_connection_get_outgoing_size.restype = ct.c_long +dbus.dbus_connection_get_outgoing_size.argtypes = (ct.c_void_p,) +dbus.dbus_connection_get_outgoing_unix_fds.restype = ct.c_long +dbus.dbus_connection_get_outgoing_unix_fds.argtypes = (ct.c_void_p,) + +dbus.dbus_connection_register_object_path.restype = DBUS.bool_t +dbus.dbus_connection_register_object_path.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ObjectPathVTablePtr, ct.c_void_p) +dbus.dbus_connection_try_register_object_path.restype = DBUS.bool_t +dbus.dbus_connection_try_register_object_path.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ObjectPathVTablePtr, ct.c_void_p, DBUS.ErrorPtr) +dbus.dbus_connection_register_fallback.restype = DBUS.bool_t +dbus.dbus_connection_register_fallback.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ObjectPathVTablePtr, ct.c_void_p) +dbus.dbus_connection_try_register_fallback.restype = DBUS.bool_t +dbus.dbus_connection_try_register_fallback.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ObjectPathVTablePtr, ct.c_void_p, DBUS.ErrorPtr) +dbus.dbus_connection_get_object_path_data.restype = DBUS.bool_t +dbus.dbus_connection_get_object_path_data.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_void_p) +dbus.dbus_connection_list_registered.restype = DBUS.bool_t +dbus.dbus_connection_list_registered.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_void_p) +dbus.dbus_connection_get_unix_fd.restype = DBUS.bool_t +dbus.dbus_connection_get_unix_fd.argtypes = (ct.c_void_p, ct.POINTER(ct.c_int)) +dbus.dbus_connection_get_socket.restype = DBUS.bool_t +dbus.dbus_connection_get_socket.argtypes = (ct.c_void_p, ct.POINTER(ct.c_int)) +dbus.dbus_connection_unregister_object_path.restype = DBUS.bool_t +dbus.dbus_connection_unregister_object_path.argtypes = (ct.c_void_p, ct.c_char_p) + +dbus.dbus_watch_get_unix_fd.restype = ct.c_int +dbus.dbus_watch_get_unix_fd.argtypes = (ct.c_void_p,) +dbus.dbus_watch_get_socket.restype = ct.c_int +dbus.dbus_watch_get_socket.argtypes = (ct.c_void_p,) +dbus.dbus_watch_get_flags.restype = ct.c_uint +dbus.dbus_watch_get_flags.argtypes = (ct.c_void_p,) +dbus.dbus_watch_get_data.restype = ct.c_void_p +dbus.dbus_watch_get_data.argtypes = (ct.c_void_p,) +dbus.dbus_watch_set_data.restype = None +dbus.dbus_watch_set_data.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_watch_handle.restype = DBUS.bool_t +dbus.dbus_watch_handle.argtypes = (ct.c_void_p, ct.c_uint) +dbus.dbus_watch_get_enabled.restype = DBUS.bool_t +dbus.dbus_watch_get_enabled.argtypes = (ct.c_void_p,) + +dbus.dbus_timeout_get_interval.restype = ct.c_int +dbus.dbus_timeout_get_interval.argtypes = (ct.c_void_p,) +dbus.dbus_timeout_get_data.restype = ct.c_void_p +dbus.dbus_timeout_get_data.argtypes = (ct.c_void_p,) +dbus.dbus_timeout_set_data.restype = None +dbus.dbus_timeout_set_data.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_timeout_handle.restype = DBUS.bool_t +dbus.dbus_timeout_handle.argtypes = (ct.c_void_p,) +dbus.dbus_timeout_get_enabled.restype = DBUS.bool_t +dbus.dbus_timeout_get_enabled.argtypes = (ct.c_void_p,) + +# from dbus-bus.h: +dbus.dbus_bus_get.restype = ct.c_void_p +dbus.dbus_bus_get.argtypes = (ct.c_uint, DBUS.ErrorPtr) +dbus.dbus_bus_get_private.restype = ct.c_void_p +dbus.dbus_bus_get_private.argtypes = (ct.c_uint, DBUS.ErrorPtr) +dbus.dbus_bus_register.restype = DBUS.bool_t +dbus.dbus_bus_register.argtypes = (ct.c_void_p, DBUS.ErrorPtr) +dbus.dbus_bus_set_unique_name.restype = DBUS.bool_t +dbus.dbus_bus_set_unique_name.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_bus_get_unique_name.restype = ct.c_char_p +dbus.dbus_bus_get_unique_name.argtypes = (ct.c_void_p,) +dbus.dbus_bus_get_unix_user.restype = ct.c_ulong +dbus.dbus_bus_get_unix_user.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_bus_get_id.restype = ct.c_void_p +dbus.dbus_bus_get_id.argtypes = (ct.c_void_p, DBUS.ErrorPtr) +dbus.dbus_bus_request_name.restype = ct.c_int +dbus.dbus_bus_request_name.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_uint, DBUS.ErrorPtr) +dbus.dbus_bus_release_name.restype = ct.c_int +dbus.dbus_bus_release_name.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_bus_name_has_owner.restype = DBUS.bool_t +dbus.dbus_bus_name_has_owner.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_bus_start_service_by_name.restype = DBUS.bool_t +dbus.dbus_bus_start_service_by_name.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_uint, ct.POINTER(ct.c_uint), DBUS.ErrorPtr) +dbus.dbus_bus_add_match.restype = None +dbus.dbus_bus_add_match.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_bus_remove_match.restype = None +dbus.dbus_bus_remove_match.argtypes = (ct.c_void_p, ct.c_char_p, DBUS.ErrorPtr) + +dbus.dbus_error_init.restype = None +dbus.dbus_error_init.argtypes = (DBUS.ErrorPtr,) +dbus.dbus_error_free.restype = None +dbus.dbus_error_free.argtypes = (DBUS.ErrorPtr,) +dbus.dbus_move_error.restype = None +dbus.dbus_move_error.argtypes = (DBUS.ErrorPtr, DBUS.ErrorPtr) +dbus.dbus_error_has_name.restype = DBUS.bool_t +dbus.dbus_error_has_name.argtypes = (DBUS.ErrorPtr, ct.c_char_p) +dbus.dbus_error_is_set.restype = DBUS.bool_t +dbus.dbus_error_is_set.argtypes = (DBUS.ErrorPtr,) +dbus.dbus_set_error.restype = None +dbus.dbus_set_error.argtypes = (DBUS.ErrorPtr, ct.c_char_p, ct.c_char_p, ct.c_char_p) + # note I can’t handle varargs + +# from dbus-pending-call.h: +dbus.dbus_pending_call_ref.restype = ct.c_void_p +dbus.dbus_pending_call_ref.argtypes = (ct.c_void_p,) +dbus.dbus_pending_call_unref.restype = None +dbus.dbus_pending_call_unref.argtypes = (ct.c_void_p,) +dbus.dbus_pending_call_set_notify.restype = DBUS.bool_t +dbus.dbus_pending_call_set_notify.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_pending_call_cancel.restype = None +dbus.dbus_pending_call_cancel.argtypes = (ct.c_void_p,) +dbus.dbus_pending_call_get_completed.restype = DBUS.bool_t +dbus.dbus_pending_call_get_completed.argtypes = (ct.c_void_p,) +dbus.dbus_pending_call_steal_reply.restype = ct.c_void_p +dbus.dbus_pending_call_steal_reply.argtypes = (ct.c_void_p,) +dbus.dbus_pending_call_block.restype = None +dbus.dbus_pending_call_block.argtypes = (ct.c_void_p,) +dbus.dbus_pending_call_allocate_data_slot.restype = DBUS.bool_t +dbus.dbus_pending_call_allocate_data_slot.argtypes = (ct.POINTER(ct.c_int),) +dbus.dbus_pending_call_free_data_slot.restype = None +dbus.dbus_pending_call_free_data_slot.argtypes = (ct.c_int,) +dbus.dbus_pending_call_set_data.restype = DBUS.bool_t +dbus.dbus_pending_call_set_data.argtypes = (ct.c_void_p, ct.c_int, ct.c_void_p, ct.c_void_p) +dbus.dbus_pending_call_get_data.restype = ct.c_void_p +dbus.dbus_pending_call_get_data.argtypes = (ct.c_void_p, ct.c_int) + +# from dbus-message.h: +dbus.dbus_message_new.restype = ct.c_void_p +dbus.dbus_message_new.argtypes = (ct.c_int,) +dbus.dbus_message_new_method_call.restype = ct.c_void_p +dbus.dbus_message_new_method_call.argtypes = (ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_char_p) +dbus.dbus_message_new_method_return.restype = ct.c_void_p +dbus.dbus_message_new_method_return.argtypes = (ct.c_void_p,) +dbus.dbus_message_new_signal.restype = ct.c_void_p +dbus.dbus_message_new_signal.argtypes = (ct.c_char_p, ct.c_char_p, ct.c_char_p) +dbus.dbus_message_new_error.restype = ct.c_void_p +dbus.dbus_message_new_error.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_char_p) +dbus.dbus_message_new_error_printf.restype = ct.c_void_p +dbus.dbus_message_new_error_printf.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_char_p, ct.c_char_p) + # note I can’t handle varargs +dbus.dbus_message_copy.restype = ct.c_void_p +dbus.dbus_message_copy.argtypes = (ct.c_void_p,) +dbus.dbus_message_ref.restype = ct.c_void_p +dbus.dbus_message_ref.argtypes = (ct.c_void_p,) +dbus.dbus_message_unref.restype = None +dbus.dbus_message_unref.argtypes = (ct.c_void_p,) +dbus.dbus_message_get_type.restype = ct.c_int +dbus.dbus_message_get_type.argtypes = (ct.c_void_p,) +dbus.dbus_message_set_path.restype = DBUS.bool_t +dbus.dbus_message_set_path.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_path.restype = ct.c_char_p +dbus.dbus_message_get_path.argtypes = (ct.c_void_p,) +dbus.dbus_message_has_path.restype = DBUS.bool_t +dbus.dbus_message_has_path.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_set_interface.restype = DBUS.bool_t +dbus.dbus_message_set_interface.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_interface.restype = ct.c_char_p +dbus.dbus_message_get_interface.argtypes = (ct.c_void_p,) +dbus.dbus_message_has_interface.restype = DBUS.bool_t +dbus.dbus_message_has_interface.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_set_member.restype = DBUS.bool_t +dbus.dbus_message_set_member.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_member.restype = ct.c_char_p +dbus.dbus_message_get_member.argtypes = (ct.c_void_p,) +dbus.dbus_message_has_member.restype = DBUS.bool_t +dbus.dbus_message_has_member.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_set_error_name.restype = DBUS.bool_t +dbus.dbus_message_set_error_name.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_error_name.restype = ct.c_char_p +dbus.dbus_message_get_error_name.argtypes = (ct.c_void_p,) +dbus.dbus_message_set_destination.restype = DBUS.bool_t +dbus.dbus_message_set_destination.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_destination.restype = ct.c_char_p +dbus.dbus_message_get_destination.argtypes = (ct.c_void_p,) +dbus.dbus_message_set_sender.restype = DBUS.bool_t +dbus.dbus_message_set_sender.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_sender.restype = ct.c_char_p +dbus.dbus_message_get_sender.argtypes = (ct.c_void_p,) +dbus.dbus_message_get_signature.restype = ct.c_char_p +dbus.dbus_message_get_signature.argtypes = (ct.c_void_p,) +dbus.dbus_message_set_no_reply.restype = None +dbus.dbus_message_set_no_reply.argtypes = (ct.c_void_p, DBUS.bool_t) +dbus.dbus_message_get_no_reply.restype = DBUS.bool_t +dbus.dbus_message_get_no_reply.argtypes = (ct.c_void_p,) +dbus.dbus_message_is_method_call.restype = DBUS.bool_t +dbus.dbus_message_is_method_call.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_char_p) +dbus.dbus_message_is_signal.restype = DBUS.bool_t +dbus.dbus_message_is_signal.argtypes = (ct.c_void_p, ct.c_char_p, ct.c_char_p) +dbus.dbus_message_is_error.restype = DBUS.bool_t +dbus.dbus_message_is_error.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_has_destination.restype = DBUS.bool_t +dbus.dbus_message_has_destination.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_has_sender.restype = DBUS.bool_t +dbus.dbus_message_has_sender.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_has_signature.restype = DBUS.bool_t +dbus.dbus_message_has_signature.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_message_get_serial.restype = ct.c_uint +dbus.dbus_message_get_serial.argtypes = (ct.c_void_p,) +dbus.dbus_message_set_serial.restype = None +dbus.dbus_message_set_serial.argtypes = (ct.c_void_p, ct.c_uint) +dbus.dbus_message_set_reply_serial.restype = DBUS.bool_t +dbus.dbus_message_set_reply_serial.argtypes = (ct.c_void_p, ct.c_uint) +dbus.dbus_message_get_reply_serial.restype = ct.c_uint +dbus.dbus_message_get_reply_serial.argtypes = (ct.c_void_p,) +dbus.dbus_message_set_auto_start.restype = None +dbus.dbus_message_set_auto_start.argtypes = (ct.c_void_p, DBUS.bool_t) +dbus.dbus_message_get_auto_start.restype = DBUS.bool_t +dbus.dbus_message_get_auto_start.argtypes = (ct.c_void_p,) +dbus.dbus_message_get_path_decomposed.restype = DBUS.bool_t +dbus.dbus_message_get_path_decomposed.argtypes = (ct.c_void_p, ct.c_void_p) +dbus.dbus_message_append_args.restype = DBUS.bool_t +dbus.dbus_message_append_args.argtypes = (ct.c_void_p, ct.c_int, ct.c_void_p, ct.c_int) + # note I can’t handle varargs +# probably cannot make use of dbus.dbus_message_append_args_valist +dbus.dbus_message_get_args.restype = DBUS.bool_t +dbus.dbus_message_get_args.argtypes = (ct.c_void_p, DBUS.ErrorPtr, ct.c_int, ct.c_void_p, ct.c_int) + # note I can’t handle varargs +# probably cannot make use of dbus.dbus_message_get_args_valist +dbus.dbus_message_contains_unix_fds.restype = DBUS.bool_t +dbus.dbus_message_contains_unix_fds.argtypes = (ct.c_void_p,) +dbus.dbus_message_iter_init.restype = DBUS.bool_t +dbus.dbus_message_iter_init.argtypes = (ct.c_void_p, DBUS.MessageIterPtr) +dbus.dbus_message_iter_has_next.restype = DBUS.bool_t +dbus.dbus_message_iter_has_next.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_message_iter_next.restype = DBUS.bool_t +dbus.dbus_message_iter_next.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_message_iter_get_signature.restype = ct.c_void_p +dbus.dbus_message_iter_next.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_message_iter_get_signature.restype = ct.c_void_p +dbus.dbus_message_iter_get_signature.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_message_iter_get_arg_type.restype = ct.c_int +dbus.dbus_message_iter_get_arg_type.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_message_iter_get_element_type.restype = ct.c_int +dbus.dbus_message_iter_get_element_type.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_message_iter_recurse.restype = None +dbus.dbus_message_iter_recurse.argtypes = (DBUS.MessageIterPtr, DBUS.MessageIterPtr) +dbus.dbus_message_iter_get_basic.restype = None +dbus.dbus_message_iter_get_basic.argtypes = (DBUS.MessageIterPtr, ct.c_void_p) +if hasattr(dbus, "dbus_message_iter_get_element_count") : + dbus.dbus_message_iter_get_element_count.restype = ct.c_int + dbus.dbus_message_iter_get_element_count.argtypes = (DBUS.MessageIterPtr,) +#end if +# dbus_message_iter_get_array_len deprecated +dbus.dbus_message_iter_get_fixed_array.restype = None +dbus.dbus_message_iter_get_fixed_array.argtypes = (DBUS.MessageIterPtr, ct.c_void_p, ct.POINTER(ct.c_int)) +dbus.dbus_message_iter_init_append.restype = None +dbus.dbus_message_iter_init_append.argtypes = (ct.c_void_p, DBUS.MessageIterPtr) +dbus.dbus_message_iter_append_basic.restype = DBUS.bool_t +dbus.dbus_message_iter_append_basic.argtypes = (DBUS.MessageIterPtr, ct.c_int, ct.c_void_p) +dbus.dbus_message_iter_append_fixed_array.restype = DBUS.bool_t +dbus.dbus_message_iter_append_fixed_array.argtypes = (DBUS.MessageIterPtr, ct.c_int, ct.c_void_p, ct.c_int) +dbus.dbus_message_iter_open_container.restype = DBUS.bool_t +dbus.dbus_message_iter_open_container.argtypes = (DBUS.MessageIterPtr, ct.c_int, ct.c_char_p, DBUS.MessageIterPtr) +dbus.dbus_message_iter_close_container.restype = DBUS.bool_t +dbus.dbus_message_iter_close_container.argtypes = (DBUS.MessageIterPtr, DBUS.MessageIterPtr) +dbus.dbus_message_iter_abandon_container.restype = None +dbus.dbus_message_iter_abandon_container.argtypes = (DBUS.MessageIterPtr, DBUS.MessageIterPtr) +dbus.dbus_message_lock.restype = None +dbus.dbus_message_lock.argtypes = (DBUS.MessageIterPtr,) +dbus.dbus_set_error_from_message.restype = DBUS.bool_t +dbus.dbus_set_error_from_message.argtypes = (DBUS.ErrorPtr, ct.c_void_p) +dbus.dbus_message_allocate_data_slot.restype = DBUS.bool_t +dbus.dbus_message_allocate_data_slot.argtypes = (ct.POINTER(ct.c_int),) +dbus.dbus_message_free_data_slot.restype = None +dbus.dbus_message_free_data_slot.argtypes = (ct.POINTER(ct.c_int),) +dbus.dbus_message_set_data.restype = DBUS.bool_t +dbus.dbus_message_set_data.argtypes = (ct.c_void_p, ct.c_int, ct.c_void_p, ct.c_void_p) +dbus.dbus_message_get_data.restype = ct.c_void_p +dbus.dbus_message_get_data.argtypes = (ct.c_void_p, ct.c_int) +dbus.dbus_message_type_from_string.restype = ct.c_int +dbus.dbus_message_type_from_string.argtypes = (ct.c_char_p,) +dbus.dbus_message_type_to_string.restype = ct.c_char_p +dbus.dbus_message_type_to_string.argtypes = (ct.c_int,) +dbus.dbus_message_marshal.restype = DBUS.bool_t +dbus.dbus_message_marshal.argtypes = (ct.c_void_p, ct.c_void_p, ct.POINTER(ct.c_int)) +dbus.dbus_message_demarshal.restype = ct.c_void_p +dbus.dbus_message_demarshal.argtypes = (ct.c_void_p, ct.c_int, DBUS.ErrorPtr) +dbus.dbus_message_demarshal_bytes_needed.restype = ct.c_int +dbus.dbus_message_demarshal_bytes_needed.argtypes = (ct.c_void_p, ct.c_int) +if hasattr(dbus, "dbus_message_set_allow_interactive_authorization") : + dbus.dbus_message_set_allow_interactive_authorization.restype = None + dbus.dbus_message_set_allow_interactive_authorization.argtypes = (ct.c_void_p, DBUS.bool_t) +#end if +if hasattr(dbus, "dbus_message_get_allow_interactive_authorization") : + dbus.dbus_message_get_allow_interactive_authorization.restype = DBUS.bool_t + dbus.dbus_message_get_allow_interactive_authorization.argtypes = (ct.c_void_p,) +#end if + +# from dbus-memory.h: +dbus.dbus_malloc.restype = ct.c_void_p +dbus.dbus_malloc.argtypes = (ct.c_size_t,) +dbus.dbus_malloc0.restype = ct.c_void_p +dbus.dbus_malloc0.argtypes = (ct.c_size_t,) +dbus.dbus_realloc.restype = ct.c_void_p +dbus.dbus_realloc.argtypes = (ct.c_void_p, ct.c_size_t) +dbus.dbus_free.restype = None +dbus.dbus_free.argtypes = (ct.c_void_p,) +dbus.dbus_free_string_array.restype = None +dbus.dbus_free_string_array.argtypes = (ct.c_void_p,) + +# from dbus-misc.h: +dbus.dbus_get_local_machine_id.restype = ct.c_void_p +dbus.dbus_get_local_machine_id.argtypes = () +dbus.dbus_get_version.restype = None +dbus.dbus_get_version.argtypes = (ct.POINTER(ct.c_int), ct.POINTER(ct.c_int), ct.POINTER(ct.c_int)) +dbus.dbus_setenv.restype = DBUS.bool_t +dbus.dbus_setenv.argtypes = (ct.c_char_p, ct.c_char_p) + +# from dbus-address.h: +dbus.dbus_parse_address.restype = DBUS.bool_t +dbus.dbus_parse_address.argtypes = (ct.c_char_p, ct.c_void_p, ct.POINTER(ct.c_int), DBUS.ErrorPtr) +dbus.dbus_address_entry_get_value.restype = ct.c_char_p +dbus.dbus_address_entry_get_value.argtypes = (ct.c_void_p, ct.c_char_p) +dbus.dbus_address_entry_get_method.restype = ct.c_char_p +dbus.dbus_address_entry_get_method.argtypes = (ct.c_void_p,) +dbus.dbus_address_entries_free.restype = None +dbus.dbus_address_entries_free.argtypes = (ct.c_void_p,) +dbus.dbus_address_escape_value.restype = ct.c_void_p +dbus.dbus_address_escape_value.argtypes = (ct.c_char_p,) +dbus.dbus_address_unescape_value.restype = ct.c_void_p +dbus.dbus_address_unescape_value.argtypes = (ct.c_char_p, DBUS.ErrorPtr) + +# from dbus-signature.h: +dbus.dbus_signature_iter_init.restype = None +dbus.dbus_signature_iter_init.argtypes = (DBUS.SignatureIterPtr, ct.c_char_p) +dbus.dbus_signature_iter_get_current_type.restype = ct.c_int +dbus.dbus_signature_iter_get_current_type.argtypes = (DBUS.SignatureIterPtr,) +dbus.dbus_signature_iter_get_signature.restype = ct.c_void_p +dbus.dbus_signature_iter_get_signature.argtypes = (DBUS.SignatureIterPtr,) +dbus.dbus_signature_iter_get_element_type.restype = ct.c_int +dbus.dbus_signature_iter_get_element_type.argtypes = (DBUS.SignatureIterPtr,) +dbus.dbus_signature_iter_next.restype = DBUS.bool_t +dbus.dbus_signature_iter_next.argtypes = (DBUS.SignatureIterPtr,) +dbus.dbus_signature_iter_recurse.restype = None +dbus.dbus_signature_iter_recurse.argtypes = (DBUS.SignatureIterPtr, DBUS.SignatureIterPtr) +dbus.dbus_signature_validate.restype = DBUS.bool_t +dbus.dbus_signature_validate.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_signature_validate_single.restype = DBUS.bool_t +dbus.dbus_signature_validate_single.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_type_is_valid.restype = DBUS.bool_t +dbus.dbus_type_is_valid.argtypes = (ct.c_int,) +dbus.dbus_type_is_basic.restype = DBUS.bool_t +dbus.dbus_type_is_basic.argtypes = (ct.c_int,) +dbus.dbus_type_is_container.restype = DBUS.bool_t +dbus.dbus_type_is_container.argtypes = (ct.c_int,) +dbus.dbus_type_is_fixed.restype = DBUS.bool_t +dbus.dbus_type_is_fixed.argtypes = (ct.c_int,) + +# from dbus-syntax.h: +dbus.dbus_validate_path.restype = DBUS.bool_t +dbus.dbus_validate_path.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_validate_interface.restype = DBUS.bool_t +dbus.dbus_validate_interface.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_validate_member.restype = DBUS.bool_t +dbus.dbus_validate_member.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_validate_error_name.restype = DBUS.bool_t +dbus.dbus_validate_error_name.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_validate_bus_name.restype = DBUS.bool_t +dbus.dbus_validate_bus_name.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_validate_utf8.restype = DBUS.bool_t +dbus.dbus_validate_utf8.argtypes = (ct.c_char_p, DBUS.ErrorPtr) + +# from dbus-server.h: +dbus.dbus_server_listen.restype = ct.c_void_p +dbus.dbus_server_listen.argtypes = (ct.c_char_p, DBUS.ErrorPtr) +dbus.dbus_server_ref.restype = ct.c_void_p +dbus.dbus_server_ref.argtypes = (ct.c_void_p,) +dbus.dbus_server_unref.restype = ct.c_void_p +dbus.dbus_server_unref.argtypes = (ct.c_void_p,) +dbus.dbus_server_disconnect.restype = None +dbus.dbus_server_disconnect.argtypes = (ct.c_void_p,) +dbus.dbus_server_get_is_connected.restype = DBUS.bool_t +dbus.dbus_server_get_is_connected.argtypes = (ct.c_void_p,) +dbus.dbus_server_get_address.restype = ct.c_void_p +dbus.dbus_server_get_address.argtypes = (ct.c_void_p,) +dbus.dbus_server_get_id.restype = ct.c_void_p +dbus.dbus_server_get_id.argtypes = (ct.c_void_p,) +dbus.dbus_server_set_new_connection_function.restype = None +dbus.dbus_server_set_new_connection_function.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_server_set_watch_functions.restype = DBUS.bool_t +dbus.dbus_server_set_watch_functions.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_server_set_timeout_functions.restype = DBUS.bool_t +dbus.dbus_server_set_timeout_functions.argtypes = (ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p) +dbus.dbus_server_set_auth_mechanisms.restype = DBUS.bool_t +dbus.dbus_server_set_auth_mechanisms.argtypes = (ct.c_void_p, ct.c_void_p) +dbus.dbus_server_allocate_data_slot.restype = DBUS.bool_t +dbus.dbus_server_allocate_data_slot.argtypes = (ct.POINTER(ct.c_int),) +dbus.dbus_server_free_data_slot.restype = DBUS.bool_t +dbus.dbus_server_free_data_slot.argtypes = (ct.POINTER(ct.c_int),) +dbus.dbus_server_set_data.restype = DBUS.bool_t +dbus.dbus_server_set_data.argtypes = (ct.c_void_p, ct.c_int, ct.c_void_p, ct.c_void_p) +dbus.dbus_server_set_data.restype = ct.c_void_p +dbus.dbus_server_set_data.argtypes = (ct.c_void_p, ct.c_int) + +# TODO dbus-threads.h +# Seems like the only call worth making is dbus_threads_init_default. + +#+ +# High-level stuff follows +#- + +class DBusError(Exception) : + "for raising an exception that reports a D-Bus error name and accompanying message." + + __slots__ = ("name", "message") + + def __init__(self, name, message) : + self.args = ("%s -- %s" % (name, message),) + self.name = name + self.message = message + #end __init__ + +#end DBusError + +class CallFailed(Exception) : + "used internally for reporting general failure from calling a libdbus routine." + + __slots__ = ("funcname",) + + def __init__(self, funcname) : + self.args = ("%s failed" % funcname,) + self.funcname = funcname + #end __init__ + +#end CallFailed + +class _Abort(Exception) : + pass +#end _Abort + +class TaskKeeper : + "Base class for classes that need to call EventLoop.create_task() to" \ + " schedule caller-created coroutines for execution. asyncio only keeps" \ + " weak references to Task objects when they are not being scheduled," \ + " so to keep them from disappearing unexpectedly, I maintain a list of" \ + " strong references here, and clean them out as they end execution." + + __slots__ = ("__weakref__", "loop", "_cur_tasks") + + def _init(self) : + # avoid __init__ so I don't get passed spurious args + self.loop = None + self._cur_tasks = set() + #end _init + + def create_task(self, coro) : + assert self.loop != None, "no event loop to attach coroutine to" + task = self.loop.create_task(coro) + task.add_done_callback(functools.partial(self._reaper, weak_ref(self))) + self._cur_tasks.add(task) + #end create_task + + @staticmethod + def _reaper(self, task) : + self = self() # avoid reference circularity + self._cur_tasks.remove(task) + #end _reaper + + if "loop" in asyncio.wait.__kwdefaults__ : + + def wait(self, futures, *, timeout = None, return_when = asyncio.ALL_COMPLETED) : + "wrapper around asyncio.wait for compatibility with pre-Python-3.7." + return \ + asyncio.wait(futures, loop = self.loop, timeout = timeout, return_when = return_when) + # No default loop in pre-3.7. + #end wait + + else : + + wait = staticmethod(asyncio.wait) + # no need to pass loop arg in ≥ 3.7, removed in ≥ 3.10. + + #end if + +#end TaskKeeper + +# Misc: + +def get_local_machine_id() : + "returns a systemwide unique ID that is supposed to remain constant at least" \ + " until the next reboot. Two processes seeing the same value for this can assume" \ + " they are on the same machine." + c_result = dbus.dbus_get_local_machine_id() + if c_result == None : + raise CallFailed("dbus_get_local_machine_id") + #end if + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result +#end get_local_machine_id + +def get_version() : + "returns the libdbus library version as a tuple of integers (major, minor, micro)." + major = ct.c_int() + minor = ct.c_int() + micro = ct.c_int() + dbus.dbus_get_version(ct.byref(major), ct.byref(minor), ct.byref(micro)) + return \ + (major.value, minor.value, micro.value) +#end get_version + +def setenv(key, value) : + key = key.encode() + if value != None : + value = value.encode() + #end if + if not dbus.dbus_setenv(key, value) : + raise CallFailed("dbus_setenv") + #end if +#end setenv + +def unsetenv(key) : + setenv(key, None) +#end unsetenv + +class Watch : + "wrapper around a DBusWatch object. Do not instantiate directly; they" \ + " are created and destroyed by libdbus.\n" \ + "\n" \ + "A Watch is the basic mechanism for plugging libdbus-created file descriptors" \ + " into your event loop. When created, they are passed to your add-watch callback" \ + " to manage; and conversely, when deleted, your remove-watch callback is notified." \ + " (These callbacks are ones you attach to Server and Connection objects.)\n" \ + "\n" \ + "Check the enabled property to decide if you need to pay attention to this Watch, and" \ + " look at the flags to see if you need to check for pending reads, or writes, or both." \ + " Call the handle() method with the appropriate flags when you see that reads or writes" \ + " are pending." + # + + __slots__ = ("__weakref__", "_dbobj",) # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, _dbobj) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + self._dbobj = _dbobj + celf._instances[_dbobj] = self + #end if + return \ + self + #end __new__ + + # no __del__ method -- no underlying dispose API call + + @property + def unix_fd(self) : + "the underlying file descriptor for this Watch." + return \ + dbus.dbus_watch_get_unix_fd(self._dbobj) + #end unix_fd + + def fileno(self) : + "for use with Python’s “select” functions." + return \ + self.unix_fd + #end fileno + + @property + def socket(self) : + return \ + dbus.dbus_watch_get_socket(self._dbobj) + #end socket + + @property + def flags(self) : + "returns WATCH_READABLE and/or WATCH_WRITABLE, indicating what to watch for." + return \ + dbus.dbus_watch_get_flags(self._dbobj) + #end flags + + # TODO: get/set data + + def handle(self, flags) : + "tells libdbus that there is something to be read or written." \ + " flags are a combination of WATCH_xxx values." + return \ + dbus.dbus_watch_handle(self._dbobj, flags) != 0 + #end handle + + @property + def enabled(self) : + "does libdbus want you to actually watch this Watch." + return \ + dbus.dbus_watch_get_enabled(self._dbobj) != 0 + #end enabled + +#end Watch + +class Timeout : + "wrapper around a DBusTimeout object. Do not instantiate directly; they" \ + " are created and destroyed by libdbus.\n" \ + "\n" \ + " A Timeout is the basic mechanism for plugging libdbus-created timeouts" \ + " into your event loop. When created, they are passed to your add-timeout" \ + " callback to manage; and conversely, when deleted, your remove-timeout" \ + " callback is notified. (These callbacks are ones you attach to Server and" \ + " Connection objects.)\n" \ + "\n" \ + "Check the enabled property to decide if you need to pay attention to this" \ + " Timeout. Call the handle() method when the timeout becomes due, as measured" \ + " from when it was initially created or most recently enabled, whichever" \ + " happened last." + # + + __slots__ = ("__weakref__", "_dbobj",) # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, _dbobj) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + self._dbobj = _dbobj + celf._instances[_dbobj] = self + #end if + return \ + self + #end __new__ + + # no __del__ method -- no underlying dispose API call + + @property + def interval(self) : + "how long in float seconds until the timeout should fire." + return \ + dbus.dbus_timeout_get_interval(self._dbobj) / 1000 + #end interval + + # TODO: get/set data + + def handle(self) : + "tells libdbus the timeout has fired." + return \ + dbus.dbus_timeout_handle(self._dbobj) + #end handle + + @property + def enabled(self) : + "does libdbus want you to actually schedule this Timeout." + return \ + dbus.dbus_timeout_get_enabled(self._dbobj) != 0 + #end enabled + +#end Timeout + +class ObjectPathVTable(TaskKeeper) : + "wrapper around an ObjectPathVTable struct. You can instantiate directly, or call" \ + " the init method. An additional feature beyond the underlying libdbus capabilities" \ + " is the option to specify an asyncio event loop. If the message handler returns" \ + " a coroutine, then an asyncio task is created to run it, and a result of" \ + " DBUS.HANDLER_RESULT_HANDLED is returned on behalf of the message handler;" \ + " that way, the message function can do the minimum beyond some initial filtering of" \ + " the message, leaving the time-consuming part of the work to the coroutine." + + __slots__ = \ + ( + "_dbobj", + # need to keep references to ctypes-wrapped functions + # so they don't disappear prematurely: + "_wrap_unregister_func", + "_wrap_message_func", + ) # to forestall typos + + def __init__(self, *, loop = None, unregister = None, message = None) : + super().__init__() + super()._init() + self._dbobj = DBUS.ObjectPathVTable() + self.loop = loop + self._wrap_unregister_func = None + self._wrap_message_func = None + if unregister != None : + self.set_unregister(unregister) + #end if + if message != None : + self.set_message(message) + #end if + #end __init__ + + @classmethod + def init(celf, *, loop = None, unregister = None, message = None) : + "for consistency with other classes that don’t want caller to instantiate directly." + return \ + celf \ + ( + loop = loop, + unregister = unregister, + message = message, + ) + #end init + + def set_unregister(self, unregister) : + + def wrap_unregister(c_conn, c_user_data) : + conn = Connection(dbus.dbus_connection_ref(c_conn)) + unregister(conn, conn._user_data.get(c_user_data)) + #end wrap_unregister + + #begin set_unregister + if unregister != None : + self._wrap_unregister_func = DBUS.ObjectPathUnregisterFunction(wrap_unregister) + else : + self._wrap_unregister_func = None + #end if + self._dbobj.unregister_function = self._wrap_unregister_func + return \ + self + #end set_unregister + + def set_message(self, message) : + + w_self = weak_ref(self) + + def wrap_message(c_conn, c_message, c_user_data) : + self = _wderef(w_self, "vtable") + conn = Connection(dbus.dbus_connection_ref(c_conn)) + msg = Message(dbus.dbus_message_ref(c_message)) + user_data = conn._user_data.get(c_user_data) + result = message(conn, msg, user_data) + if asyncio.iscoroutine(result) : + self.create_task(result) + result = DBUS.HANDLER_RESULT_HANDLED + #end if + return \ + result + #end wrap_message + + #begin set_message + if message != None : + self._wrap_message_func = DBUS.ObjectPathMessageFunction(wrap_message) + else : + self._wrap_message_func = None + #end if + self._dbobj.message_function = self._wrap_message_func + return \ + self + #end set_message + +#end ObjectPathVTable + +class _DummyError : + # like an Error, but is never set and so will never raise. + + @property + def is_set(self) : + return \ + False + #end is_set + + def raise_if_set(self) : + pass + #end raise_if_set + +#end _DummyError + +def _get_error(error) : + # Common routine which processes an optional user-supplied Error + # argument, and returns 2 Error-like objects: the first a real + # Error object to be passed to the libdbus call, the second is + # either the same Error object or a separate _DummyError object + # on which to call raise_if_set() afterwards. The procedure for + # using this is + # + # error, my_error = _get_error(error) + # ... call libdbus routine, passing error._dbobj ... + # my_error.raise_if_set() + # + # If the user passes None for error, then an internal Error object + # is created, and returned as both results. That way, if it is + # filled in by the libdbus call, calling raise_if_set() will + # automatically raise the exception. + # But if the user passed their own Error object, then it is + # returned as the first result, and a _DummyError as the second + # result. This means the raise_if_set() call becomes a noop, and + # it is up to the caller to check if their Error object was filled + # in or not. + if error != None and not isinstance(error, Error) : + raise TypeError("error must be an Error") + #end if + if error != None : + my_error = _DummyError() + else : + my_error = Error() + error = my_error + #end if + return \ + error, my_error +#end _get_error + +def _get_timeout(timeout) : + # accepts a timeout in float seconds and converts it to integer milliseconds + # as expected by libdbus. Special-cases DBUS.TIMEOUT_INFINITE and DBUS.TIMEOUT_USE_DEFAULT, + # allowing these to be passed through unchanged. + if not isinstance(timeout, int) or timeout not in (DBUS.TIMEOUT_INFINITE, DBUS.TIMEOUT_USE_DEFAULT) : + timeout = round(timeout * 1000) + #end if + return \ + timeout +#end _get_timeout + +def _loop_attach(self, loop, dispatch) : + # attaches a Server or Connection object to a given asyncio event loop. + # If loop is None, then the default asyncio loop is used. The actual loop + # value is also stored as the loop attribute of the object. + + if loop == None : + try : + # if running within a task, current loop takes priority + loop = get_running_loop() + except RuntimeError : + # not running within a task, use default loop + loop = get_event_loop() + #end try + #end if + + watches = [] # do I need to keep track of Watch objects? + timeouts = [] + + def call_dispatch() : + status = dispatch() + if status == DBUS.DISPATCH_NEED_MEMORY : + raise DBusError(DBUS.ERROR_NO_MEMORY, "not enough memory for connection dispatch") + #end if + if status == DBUS.DISPATCH_DATA_REMAINS : + loop.call_soon(call_dispatch) + #end if + #end call_dispatch + + def add_remove_watch(watch, add) : + + def handle_watch_event(flags) : + # seems I need to remove the watch and add it again to + # avoid an endless stream of notifications that cause + # excessive CPU usage -- asyncio bug? + add_remove_watch(watch, False) + watch.handle(flags) + if watch.enabled : + add_remove_watch(watch, True) + #end if + if dispatch != None : + call_dispatch() + #end if + #end handle_watch_event + + #end add_remove_watch + if DBUS.WATCH_READABLE & watch.flags != 0 : + if add : + loop.add_reader(watch, handle_watch_event, DBUS.WATCH_READABLE) + else : + loop.remove_reader(watch) + #end if + #end if + if DBUS.WATCH_WRITABLE & watch.flags != 0 : + if add : + loop.add_writer(watch, handle_watch_event, DBUS.WATCH_WRITABLE) + else : + loop.remove_writer(watch) + #end if + #end if + #end add_remove_watch + + def handle_add_watch(watch, data) : + if watch not in watches : + watches.append(watch) + add_remove_watch(watch, True) + #end if + return \ + True + #end handle_add_watch + + def handle_watch_toggled(watch, data) : + add_remove_watch(watch, watch.enabled) + #end handle_watch_toggled + + def handle_remove_watch(watch, data) : + try : + pos = watches.index(watch) + except ValueError : + pos = None + #end try + if pos != None : + watches[pos : pos + 1] = [] + add_remove_watch(watch, False) + #end if + #end handle_remove_watch + + def handle_timeout(timeout) : + if timeout["due"] != None and timeout["due"] <= loop.time() and timeout["timeout"].enabled : + timeout["timeout"].handle() + #end if + #end handle_timeout + + def handle_add_timeout(timeout, data) : + if not any(timeout == t["timeout"] for t in timeouts) : + entry = \ + { + "timeout" : timeout, + "due" : (lambda : None, lambda : loop.time() + timeout.interval)[timeout.enabled](), + } + timeouts.append(entry) + if timeout.enabled : + loop.call_later(timeout.interval, handle_timeout, entry) + #end if + #end if + return \ + True + #end handle_add_timeout + + def handle_timeout_toggled(timeout, data) : + # not sure what to do if a Timeout gets toggled from enabled to disabled + # and then to enabled again; effectively I update the due time from + # the time of re-enabling. + search = iter(timeouts) + while True : + entry = next(search, None) + if entry == None : + break + #end if + if entry["timeout"] == timeout : + if timeout.enabled : + entry["due"] = loop.time() + timeout.enterval + loop.call_later(timeout.interval, handle_timeout, entry) + else : + entry["due"] = None + #end if + break + #end if + #end while + #end handle_timeout_toggled + + def handle_remove_timeout(timeout, data) : + new_timeouts = [] + for entry in timeouts : + if entry["timeout"] == timeout : + entry["due"] = None # in case already queued, avoid segfault in handle_timeout + else : + new_timeouts.append(entry) + #end if + #end for + timeouts[:] = new_timeouts + #end handle_remove_timeout + +#begin _loop_attach + self.set_watch_functions \ + ( + add_function = handle_add_watch, + remove_function = handle_remove_watch, + toggled_function = handle_watch_toggled, + data = None + ) + self.set_timeout_functions \ + ( + add_function = handle_add_timeout, + remove_function = handle_remove_timeout, + toggled_function = handle_timeout_toggled, + data = None + ) + self.loop = loop + self = None # avoid circularity +#end _loop_attach + +class _MatchActionEntry : + __slots__ = ("rule", "actions") + + class _Action : + __slots__ = ("func", "user_data") + + def __init__(self, func, user_data) : + self.func = func + self.user_data = user_data + #end __init__ + + def __eq__(a, b) : + # needed to allow equality comparison of set entries + return \ + ( + a.func == b.func + and + data_key(a.user_data) == data_key(b.user_data) + ) + #end __eq__ + + def __hash__(self) : + return \ + hash((self.func, data_key(self.user_data))) + #end __hash__ + + #end _Action + + def __init__(self, rule) : + self.rule = rule + self.actions = set() + #end __init__ + +#end _MatchActionEntry + +@enum.unique +class STOP_ON(enum.Enum) : + "set of conditions on which to raise StopAsyncIteration:\n" \ + "\n" \ + " TIMEOUT - timeout has elapsed\n" \ + " CLOSED - server/connection has closed.\n" \ + "\n" \ + "Otherwise None will be returned on timeout, and the usual BrokenPipeError" \ + " exception will be raised when the connection is closed." + TIMEOUT = 1 + CLOSED = 2 +#end STOP_ON + +class Connection(TaskKeeper) : + "wrapper around a DBusConnection object. Do not instantiate directly; use the open" \ + " or bus_get methods." + # + + __slots__ = \ + ( + "_dbobj", + "_filters", + "_match_actions", + "_receive_queue", + "_receive_queue_enabled", + "_awaiting_receive", + "_user_data", + # need to keep references to ctypes-wrapped functions + # so they don't disappear prematurely: + "_object_paths", + "_add_watch_function", + "_remove_watch_function", + "_toggled_watch_function", + "_free_watch_data", + "_add_timeout_function", + "_remove_timeout_function", + "_toggled_timeout_function", + "_free_timeout_data", + "_wakeup_main", + "_free_wakeup_main_data", + "_dispatch_status", + "_free_dispatch_status_data", + "_allow_unix_user", + "_free_unix_user_data", + ) # to forestall typos + + _instances = WeakValueDictionary() + _shared_connections = [None, None] + + def __new__(celf, _dbobj) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + super()._init(self) + self._dbobj = _dbobj + self._user_data = {} + self._filters = {} + self._match_actions = {} + self._receive_queue = None + self._receive_queue_enabled = set() + self._awaiting_receive = [] + self._object_paths = {} + celf._instances[_dbobj] = self + else : + dbus.dbus_connection_unref(self._dbobj) + # lose extra reference created by caller + #end if + return \ + self + #end __new__ + + def __del__(self) : + if self._dbobj != None : + if self.loop != None : + # remove via direct low-level libdbus calls + dbus.dbus_connection_set_watch_functions(self._dbobj, None, None, None, None, None) + dbus.dbus_connection_set_timeout_functions(self._dbobj, None, None, None, None, None) + self.loop = None + #end if + # Any entries still in super(TaskKeeper, self)._cur_tasks will be lost + # at this point. I leave it to asyncio to report them as destroyed + # while still pending, and the caller to notice this as a program bug. + dbus.dbus_connection_unref(self._dbobj) + self._dbobj = None + #end if + #end __del__ + + @classmethod + def open(celf, address, private, error = None) : + "opens a Connection to a specified address, separate from the" \ + " system or session buses." + error, my_error = _get_error(error) + result = (dbus.dbus_connection_open, dbus.dbus_connection_open_private)[private](address.encode(), error._dbobj) + my_error.raise_if_set() + if result != None : + result = celf(result) + #end if + return \ + result + #end open + + @classmethod + async def open_async(celf, address, private, error = None, loop = None, timeout = DBUS.TIMEOUT_INFINITE) : + "opens a Connection to a specified address, separate from the" \ + " system or session buses." + # There is no nonblocking version of dbus_connection_open/dbus_connection_open_private, + # so I invoke it in a separate thread. + + if loop == None : + loop = get_running_loop() + #end if + error, my_error = _get_error(error) + if timeout == DBUS.TIMEOUT_USE_DEFAULT : + timeout = DBUSX.DEFAULT_TIMEOUT + elif timeout == DBUS.TIMEOUT_INFINITE : + timeout = None + #end if + try : + result = await call_async \ + ( + func = (dbus.dbus_connection_open, dbus.dbus_connection_open_private)[private], + funcargs = (address.encode(), error._dbobj), + timeout = timeout, + abort = dbus.dbus_connection_unref, + loop = loop + ) + except TimeoutError : + result = None + error.set(DBUS.ERROR_TIMEOUT, "connection did not open in time") + #end try + my_error.raise_if_set() + if result != None : + result = celf(result) + result.attach_asyncio(loop) + #end if + return \ + result + #end open_async + + def _flush_awaiting_receive(self) : + if self._receive_queue != None : + while len(self._awaiting_receive) != 0 : + waiting = self._awaiting_receive.pop(0) + waiting.set_exception(BrokenPipeError("async receives have been disabled")) + #end while + #end if + #end _flush_awaiting_receive + + def close(self) : + self._flush_awaiting_receive() + dbus.dbus_connection_close(self._dbobj) + #end close + + @property + def is_connected(self) : + return \ + dbus.dbus_connection_get_is_connected(self._dbobj) != 0 + #end is_connected + + @property + def is_authenticated(self) : + return \ + dbus.dbus_connection_get_is_authenticated(self._dbobj) != 0 + #end is_authenticated + + @property + def is_anonymous(self) : + return \ + dbus.dbus_connection_get_is_anonymous(self._dbobj) != 0 + #end is_anonymous + + @property + def server_id(self) : + "asks the server at the other end for its unique id." + c_result = dbus.dbus_connection_get_server_id(self._dbobj) + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result + #end server_id + + def can_send_type(self, type_code) : + "can this Connection send values of the specified TYPE_XXX code." \ + " Mainly useful for checking if we can send TYPE_UNIX_FD values." + return \ + dbus.dbus_connection_can_send_type(self._dbobj, type_code) != 0 + #end can_send_type + + def set_exit_on_disconnect(self, exit_on_disconnect) : + dbus.dbus_connection_set_exit_on_disconnect(self._dbobj, exit_on_disconnect) + #end set_exit_on_disconnect + + def preallocate_send(self) : + result = dbus.dbus_connection_preallocate_send(self._dbobj) + if result == None : + raise CallFailed("dbus_connection_preallocate_send") + #end if + return \ + PreallocatedSend(result, self) + #end preallocate_send + + def send_preallocated(self, preallocated, message) : + if not isinstance(preallocated, PreallocatedSend) or not isinstance(message, Message) : + raise TypeError("preallocated must be a PreallocatedSend and message must be a Message") + #end if + assert not preallocated._sent, "preallocated has already been sent" + serial = ct.c_uint() + dbus.dbus_connection_send_preallocated(self._dbobj, preallocated._dbobj, message._dbobj, ct.byref(serial)) + preallocated._sent = True + return \ + serial.value + #end send_preallocated + + def send(self, message) : + "puts a message in the outgoing queue." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + serial = ct.c_uint() + if not dbus.dbus_connection_send(self._dbobj, message._dbobj, ct.byref(serial)) : + raise CallFailed("dbus_connection_send") + #end if + return \ + serial.value + #end send + + def send_with_reply(self, message, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "puts a message in the outgoing queue and returns a PendingCall" \ + " that you can use to obtain the reply." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + pending_call = ct.c_void_p() + if not dbus.dbus_connection_send_with_reply(self._dbobj, message._dbobj, ct.byref(pending_call), _get_timeout(timeout)) : + raise CallFailed("dbus_connection_send_with_reply") + #end if + if pending_call.value != None : + result = PendingCall(pending_call.value, self) + else : + result = None + #end if + return \ + result + #end send_with_reply + + def send_with_reply_and_block(self, message, timeout = DBUS.TIMEOUT_USE_DEFAULT, error = None) : + "sends a message, blocks the thread until the reply is available, and returns it." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + error, my_error = _get_error(error) + reply = dbus.dbus_connection_send_with_reply_and_block(self._dbobj, message._dbobj, _get_timeout(timeout), error._dbobj) + my_error.raise_if_set() + if reply != None : + result = Message(reply) + else : + result = None + #end if + return \ + result + #end send_with_reply_and_block + + async def send_await_reply(self, message, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "queues a message, suspends the coroutine (letting the event loop do" \ + " other things) until the reply is available, and returns it." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + assert self.loop != None, "no event loop to attach coroutine to" + pending_call = ct.c_void_p() + if not dbus.dbus_connection_send_with_reply(self._dbobj, message._dbobj, ct.byref(pending_call), _get_timeout(timeout)) : + raise CallFailed("dbus_connection_send_with_reply") + #end if + if pending_call.value != None : + pending = PendingCall(pending_call.value, self) + else : + pending = None + #end if + reply = None # to begin with + if pending != None : + reply = await pending.await_reply() + #end if + return \ + reply + #end send_await_reply + + def flush(self) : + "makes sure all queued messages have been sent, blocking" \ + " the thread until this is done." + dbus.dbus_connection_flush(self._dbobj) + #end flush + + def read_write_dispatch(self, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "dispatches the first available message, if any. Otherwise blocks the" \ + " thread until it can read or write, and does so before returning. Returns" \ + " True as long as the Connection remains connected." + return \ + dbus.dbus_connection_read_write_dispatch(self._dbobj, _get_timeout(timeout)) != 0 + #end read_write_dispatch + + def read_write(self, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "blocks the thread until something can be read or written on the Connection," \ + " and does so, returning True. If the Connection has been disconnected," \ + " immediately returns False." + return \ + dbus.dbus_connection_read_write(self._dbobj, _get_timeout(timeout)) != 0 + #end read_write + + def borrow_message(self) : + "tries to peek at the next available message waiting to be read, returning" \ + " None if these isn’t one. Call the Message’s return_borrowed() method" \ + " to return it to the queue, or steal_borrowed() to confirm that you have" \ + " read the message." + msg = dbus.dbus_connection_borrow_message(self._dbobj) + if msg != None : + msg = Message(msg) + msg._conn = self + msg._borrowed = True + #end if + return \ + msg + #end borrow_message + + # returning/stealing borrowed messages done with + # Message.return_borrowed and Message.steal_borrowed + + def pop_message(self) : + "returns the next available incoming Message, if any, otherwise returns None." \ + " Note this bypasses all message filtering/dispatching on this Connection." + message = dbus.dbus_connection_pop_message(self._dbobj) + if message != None : + message = Message(message) + #end if + return \ + message + #end pop_message + + @property + def dispatch_status(self) : + "checks the state of the incoming message queue; returns a DISPATCH_XXX code." + return \ + dbus.dbus_connection_get_dispatch_status(self._dbobj) + #end dispatch_status + + def dispatch(self) : + "processes any available data, adding messages into the incoming" \ + " queue as appropriate. returns a DISPATCH_XXX code." + return \ + dbus.dbus_connection_dispatch(self._dbobj) + #end dispatch + + def set_watch_functions(self, add_function, remove_function, toggled_function, data, free_data = None) : + "sets the callbacks for libdbus to use to notify you of Watch objects it wants" \ + " you to manage." + + def wrap_add_function(c_watch, _data) : + return \ + add_function(Watch(c_watch), data) + #end wrap_add_function + + def wrap_remove_function(c_watch, _data) : + return \ + remove_function(Watch(c_watch), data) + #end wrap_remove_function + + def wrap_toggled_function(c_watch, _data) : + return \ + toggled_function(Watch(c_watch), data) + #end wrap_toggled_function + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_watch_functions + self._add_watch_function = DBUS.AddWatchFunction(wrap_add_function) + self._remove_watch_function = DBUS.RemoveWatchFunction(wrap_remove_function) + if toggled_function != None : + self._toggled_watch_function = DBUS.WatchToggledFunction(wrap_toggled_function) + else : + self._toggled_watch_function = None + #end if + if free_data != None : + self._free_watch_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_watch_data = None + #end if + if not dbus.dbus_connection_set_watch_functions(self._dbobj, self._add_watch_function, self._remove_watch_function, self._toggled_watch_function, None, self._free_watch_data) : + raise CallFailed("dbus_connection_set_watch_functions") + #end if + #end set_watch_functions + + def set_timeout_functions(self, add_function, remove_function, toggled_function, data, free_data = None) : + "sets the callbacks for libdbus to use to notify you of Timeout objects it wants" \ + " you to manage." + + def wrap_add_function(c_timeout, _data) : + return \ + add_function(Timeout(c_timeout), data) + #end wrap_add_function + + def wrap_remove_function(c_timeout, _data) : + return \ + remove_function(Timeout(c_timeout), data) + #end wrap_remove_function + + def wrap_toggled_function(c_timeout, _data) : + return \ + toggled_function(Timeout(c_timeout), data) + #end wrap_toggled_function + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_timeout_functions + self._add_timeout_function = DBUS.AddTimeoutFunction(wrap_add_function) + self._remove_timeout_function = DBUS.RemoveTimeoutFunction(wrap_remove_function) + if toggled_function != None : + self._toggled_timeout_function = DBUS.TimeoutToggledFunction(wrap_toggled_function) + else : + self._toggled_timeout_function = None + #end if + if free_data != None : + self._free_timeout_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_timeout_data = None + #end if + if not dbus.dbus_connection_set_timeout_functions(self._dbobj, self._add_timeout_function, self._remove_timeout_function, self._toggled_timeout_function, None, self._free_timeout_data) : + raise CallFailed("dbus_connection_set_timeout_functions") + #end if + #end set_timeout_functions + + def set_wakeup_main_function(self, wakeup_main, data, free_data = None) : + "sets the callback to use for libdbus to notify you that something has" \ + " happened requiring processing on the Connection." + + def wrap_wakeup_main(_data) : + wakeup_main(data) + #end wrap_wakeup_main + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_wakeup_main_function + if wakeup_main != None : + self._wakeup_main = DBUS.WakeupMainFunction(wrap_wakeup_main) + else : + self._wakeup_main = None + #end if + if free_data != None : + self._free_wakeup_main_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_wakeup_main_data = None + #end if + dbus.dbus_connection_set_wakeup_main_function(self._dbobj, self._wakeup_main, None, self._free_wakeup_main_data) + #end set_wakeup_main_function + + def set_dispatch_status_function(self, function, data, free_data = None) : + "sets the callback to use for libdbus to notify you of a change in the" \ + " dispatch status of the Connection." + + w_self = weak_ref(self) + + def wrap_dispatch_status(_conn, status, _data) : + function(_wderef(w_self, "connection"), status, data) + #end wrap_dispatch_status + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_dispatch_status_function + self._dispatch_status = DBUS.DispatchStatusFunction(wrap_dispatch_status) + if free_data != None : + self._free_wakeup_main_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_wakeup_main_data = None + #end if + dbus.dbus_connection_set_dispatch_status_function(self._dbobj, self._dispatch_status, None, self._free_wakeup_main_data) + #end set_dispatch_status_function + + @property + def unix_fd(self) : + c_fd = ct.c_int() + if dbus.dbus_connection_get_unix_fd(self._dbobj, ct.byref(c_fd)) : + result = c_fd.value + else : + result = None + #end if + return \ + result + #end unix_fd + + def fileno(self) : + "for use with Python’s “select” functions." + return \ + self.unix_fd + #end fileno + + @property + def socket(self) : + c_fd = ct.c_int() + if dbus.dbus_connection_get_socket(self._dbobj, ct.byref(c_fd)) : + result = c_fd.value + else : + result = None + #end if + return \ + result + #end socket + + @property + def unix_process_id(self) : + c_pid = ct.c_ulong() + if dbus.dbus_connection_get_unix_process_id(self._dbobj, ct.byref(c_pid)) : + result = c_pid.value + else : + result = None + #end if + return \ + result + #end unix_process_id + + @property + def unix_user(self) : + c_uid = ct.c_ulong() + if dbus.dbus_connection_get_unix_user(self._dbobj, ct.byref(c_uid)) : + result = c_uid.value + else : + result = None + #end if + return \ + result + #end unix_user + + # TODO: get_adt + + def set_unix_user_function(self, allow_unix_user, data, free_data = None) : + + w_self = weak_ref(self) + + def wrap_allow_unix_user(c_conn, uid, c_data) : + return \ + allow_unix_user(_wderef(w_self, "connection"), uid, data) + #end wrap_allow_unix_user + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_unix_user_function + if allow_unix_user != None : + self._allow_unix_user = DBUS.AllowUnixUserFunction(wrap_allow_unix_user) + else : + self._allow_unix_user = None + #end if + if free_data != None : + self._free_unix_user_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_unix_user_data = None + #end if + dbus.dbus_connection_set_unix_user_function(self._dbobj, self._allow_unix_user, None, self._free_unix_user_data) + #end set_unix_user_function + + def set_allow_anonymous(self, allow) : + dbus.dbus_connection_set_allow_anonymous(self._dbobj, allow) + #end set_allow_anonymous + + def set_route_peer_messages(self, enable) : + dbus.dbus_connection_set_route_peer_messages(self._dbobj, enable) + #end set_route_peer_messages + + def add_filter(self, function, user_data, free_data = None) : + "adds a filter callback that gets to look at all incoming messages" \ + " before they get to the dispatch system. The same function can be added" \ + " multiple times as long as the user_data is different." + + w_self = weak_ref(self) + + def wrap_function(c_conn, c_message, _data) : + self = _wderef(w_self, "connection") + message = Message(dbus.dbus_message_ref(c_message)) + result = function(self, message, user_data) + if asyncio.iscoroutine(result) : + self.create_task(result) + result = DBUS.HANDLER_RESULT_HANDLED + #end if + return \ + result + #end wrap_function + + def wrap_free_data(_data) : + free_data(user_data) + #end wrap_free_data + + #begin add_filter + filter_key = (function, data_key(user_data)) + filter_value = \ + { + "function" : DBUS.HandleMessageFunction(wrap_function), + "free_data" : (lambda : None, lambda : DBUS.FreeFunction(wrap_free_data))[free_data != None](), + } + # pass user_data id because libdbus identifies filter entry by both function address and user data address + if not dbus.dbus_connection_add_filter(self._dbobj, filter_value["function"], filter_key[1], filter_value["free_data"]) : + raise CallFailed("dbus_connection_add_filter") + #end if + self._filters[filter_key] = filter_value + # need to ensure wrapped functions don’t disappear prematurely + #end add_filter + + def remove_filter(self, function, user_data) : + "removes a message filter added by add_filter. The filter is identified" \ + " by both the function object and the user_data that was passed." + filter_key = (function, data_key(user_data)) + if filter_key not in self._filters : + raise KeyError("removing nonexistent Connection filter") + #end if + filter_value = self._filters[filter_key] + # pass user_data id because libdbus identifies filter entry by both function address and user data address + dbus.dbus_connection_remove_filter(self._dbobj, filter_value["function"], filter_key[1]) + del self._filters[filter_key] + #end remove_filter + + def register_object_path(self, path, vtable, user_data, error = None) : + "registers an ObjectPathVTable as a dispatch handler for a specified" \ + " path within your object hierarchy." + if not isinstance(vtable, ObjectPathVTable) : + raise TypeError("vtable must be an ObjectPathVTable") + #end if + self._object_paths[path] = {"vtable" : vtable, "user_data" : user_data} # ensure it doesn’t disappear prematurely + error, my_error = _get_error(error) + if user_data != None : + c_user_data = id(user_data) + self._user_data[c_user_data] = user_data + else : + c_user_data = None + #end if + dbus.dbus_connection_try_register_object_path(self._dbobj, path.encode(), vtable._dbobj, c_user_data, error._dbobj) + my_error.raise_if_set() + #end register_object_path + + def register_fallback(self, path, vtable, user_data, error = None) : + "registers an ObjectPathVTable as a dispatch handler for an entire specified" \ + " subtree within your object hierarchy." + if not isinstance(vtable, ObjectPathVTable) : + raise TypeError("vtable must be an ObjectPathVTable") + #end if + self._object_paths[path] = {"vtable" : vtable, "user_data" : user_data} # ensure it doesn’t disappear prematurely + error, my_error = _get_error(error) + if user_data != None : + c_user_data = id(user_data) + self._user_data[c_user_data] = user_data + else : + c_user_data = None + #end if + dbus.dbus_connection_try_register_fallback(self._dbobj, path.encode(), vtable._dbobj, c_user_data, error._dbobj) + my_error.raise_if_set() + #end register_fallback + + def unregister_object_path(self, path) : + "removes a previously-registered ObjectPathVTable handler at a specified" \ + " point (single object or entire subtree) within your object hierarchy." + if path not in self._object_paths : + raise KeyError("unregistering unregistered path") + #end if + if not dbus.dbus_connection_unregister_object_path(self._dbobj, path.encode()) : + raise CallFailed("dbus_connection_unregister_object_path") + #end if + user_data = self._object_paths[path]["user_data"] + c_user_data = id(user_data) + nr_remaining_refs = sum(int(self._object_paths[p]["user_data"] == user_data) for p in self._object_paths if p != path) + if nr_remaining_refs == 0 : + try : + del self._user_data[c_user_data] + except KeyError : + pass + #end try + #end if + del self._object_paths[path] + #end unregister_object_path + + def get_object_path_data(self, path) : + "returns the user_data you passed when previously registering an ObjectPathVTable" \ + " that covers this path in your object hierarchy, or None if no suitable match" \ + " could be found." + c_data_p = ct.c_void_p() + if not dbus.dbus_connection_get_object_path_data(self._dbobj, path.encode(), ct.byref(c_data_p)) : + raise CallFailed("dbus_connection_get_object_path_data") + #end if + return \ + self._user_data.get(c_data_p.value) + #end get_object_path_data + + def list_registered(self, parent_path) : + "lists all the object paths for which you have ObjectPathVTable handlers registered." + child_entries = ct.POINTER(ct.c_char_p)() + if not dbus.dbus_connection_list_registered(self._dbobj, parent_path.encode(), ct.byref(child_entries)) : + raise CallFailed("dbus_connection_list_registered") + #end if + result = [] + i = 0 + while True : + entry = child_entries[i] + if entry == None : + break + result.append(entry.decode()) + i += 1 + #end while + dbus.dbus_free_string_array(child_entries) + return \ + result + #end list_registered + + @staticmethod + def _queue_received_message(self, message, _) : + # message filter which queues messages as appropriate for receive_message_async. + # Must be static so same function object can be passed to all add_filter/remove_filter + # calls. + queueit = message.type in self._receive_queue_enabled + if queueit : + self._receive_queue.append(message) + while len(self._awaiting_receive) != 0 : + # wake them all up, because I don’t know what message types + # each might be waiting for + waiting = self._awaiting_receive.pop(0) + waiting.set_result(True) # result actually ignored + #end while + #end if + return \ + (DBUS.HANDLER_RESULT_NOT_YET_HANDLED, DBUS.HANDLER_RESULT_HANDLED)[queueit] + #end _queue_received_message + + def enable_receive_message(self, queue_types) : + "enables/disables message types for reception via receive_message_async." \ + " queue_types is a set or sequence of DBUS.MESSAGE_TYPE_XXX values for" \ + " the types of messages to be put into the receive queue, or None to" \ + " disable all message types; this replaces queue_types passed to" \ + " any prior enable_receive_message_async call on this Connection." + assert self.loop != None, "no event loop to attach coroutines to" + enable = queue_types != None and len(queue_types) != 0 + if ( + enable + and + not all + ( + m + in + ( + DBUS.MESSAGE_TYPE_METHOD_CALL, + DBUS.MESSAGE_TYPE_METHOD_RETURN, + DBUS.MESSAGE_TYPE_ERROR, + DBUS.MESSAGE_TYPE_SIGNAL, + ) + for m in queue_types + ) + ) : + raise TypeError("invalid message type in queue_types: %s" % repr(queue_types)) + #end if + if enable : + if self._receive_queue == None : + self.add_filter(self._queue_received_message, None) + self._receive_queue = [] + #end if + self._receive_queue_enabled.clear() + self._receive_queue_enabled.update(queue_types) + else : + if self._receive_queue != None : + self._flush_awaiting_receive() + self.remove_filter(self._queue_received_message, None) + self._receive_queue = None + #end if + #end if + #end enable_receive_message + + async def receive_message_async(self, want_types = None, timeout = DBUS.TIMEOUT_INFINITE) : + "receives the first available queued message of an appropriate type, blocking" \ + " if none is available and timeout is nonzero. Returns None if the timeout" \ + " elapses without a suitable message becoming available. want_types can be" \ + " None to receive any of the previously-enabled message types, or a set or" \ + " sequence of DBUS.MESSAGE_TYPE_XXX values to look only for messages of those" \ + " types.\n" \ + "\n" \ + "You must have previously made a call to enable_receive_message to enable" \ + " queueing of one or more message types on this Connection." + assert self._receive_queue != None, "receive_message_async not enabled" + # should I check if want_types contains anything not in self._receive_queue_enabled? + if timeout == DBUS.TIMEOUT_USE_DEFAULT : + timeout = DBUSX.DEFAULT_TIMEOUT + #end if + if timeout != DBUS.TIMEOUT_INFINITE : + finish_time = self.loop.time() + timeout + else : + finish_time = None + #end if + result = ... # indicates “watch this space” + while True : + # keep rescanning queue until got something or timeout + index = 0 # start next queue scan + while True : + if index == len(self._receive_queue) : + # nothing currently suitable on queue + if ( + timeout == 0 + or + finish_time != None + and + self.loop.time() > finish_time + ) : + # waited too long, give up + result = None + break + #end if + if not self.is_connected : + raise BrokenPipeError("Connection has been disconnected") + #end if + # wait and see if something turns up + awaiting = self.loop.create_future() + self._awaiting_receive.append(awaiting) + if finish_time != None : + wait_timeout = finish_time - self.loop.time() + else : + wait_timeout = None + #end if + await self.wait \ + ( + (awaiting,), + timeout = wait_timeout + ) + # ignore done & pending results because they + # don’t match up with future I’m waiting for + try : + self._awaiting_receive.remove(awaiting) + except ValueError : + pass + #end try + awaiting.cancel() + # just to avoid “Future exception was never retrieved” message + break # start new queue scan + #end if + # check next queue item + msg = self._receive_queue[index] + if want_types == None or msg.type in want_types : + # caller wants this one + result = msg + self._receive_queue.pop(index) # remove msg from queue + break + #end if + index += 1 + #end while + if result != ... : + # either got something or given up + break + #end while + return \ + result + #end receive_message_async + + def iter_messages_async(self, want_types = None, stop_on = None, timeout = DBUS.TIMEOUT_INFINITE) : + "wrapper around receive_message_async() to allow use with an async-for statement." \ + " Lets you write\n" \ + "\n" \ + " async for message in «conn».iter_messages_async(«want_types», «stop_on», «timeout») :" \ + " «process message»\n" \ + " #end for\n" \ + "\n" \ + "to receive and process messages in a loop. stop_on is an optional set of" \ + " STOP_ON.xxx values indicating the conditions under which the iterator will" \ + " raise StopAsyncIteration to terminate the loop." + if stop_on == None : + stop_on = frozenset() + elif ( + not isinstance(stop_on, (set, frozenset)) + or + not all(isinstance(elt, STOP_ON) for elt in stop_on) + ) : + raise TypeError("stop_on must be None or set of STOP_ON") + #end if + assert self._receive_queue != None, "receive_message_async not enabled" + return \ + _MsgAiter(self, want_types, stop_on, timeout) + #end iter_messages_async + + # TODO: allocate/free data slot -- staticmethods + # TODO: get/set data + + def set_change_sigpipe(self, will_modify_sigpipe) : + dbus.dbus_connection_set_change_sigpipe(self._dbobj, will_modify_sigpipe) + #end set_change_sigpipe + + @property + def max_message_size(self) : + return \ + dbus.dbus_connection_get_max_message_size(self._dbobj) + #end max_message_size + + @max_message_size.setter + def max_message_size(self, size) : + dbus.dbus_connection_set_max_message_size(self._dbobj, size) + #end max_message_size + + @property + def max_received_size(self) : + return \ + dbus.dbus_connection_get_max_received_size(self._dbobj) + #end max_received_size + + @max_received_size.setter + def max_received_size(self, size) : + dbus.dbus_connection_set_max_received_size(self._dbobj, size) + #end max_received_size + + @property + def max_message_unix_fds(self) : + return \ + dbus.dbus_connection_get_max_message_unix_fds(self._dbobj) + #end max_message_unix_fds + + @max_message_unix_fds.setter + def max_message_unix_fds(self, size) : + dbus.dbus_connection_set_max_message_unix_fds(self._dbobj, size) + #end max_message_unix_fds + + @property + def max_received_unix_fds(self) : + return \ + dbus.dbus_connection_get_max_received_unix_fds(self._dbobj) + #end max_received_unix_fds + + @max_received_unix_fds.setter + def max_received_unix_fds(self, size) : + dbus.dbus_connection_set_max_received_unix_fds(self._dbobj, size) + #end max_received_unix_fds + + @property + def outgoing_size(self) : + return \ + dbus.dbus_connection_get_outgoing_size(self._dbobj) + #end outgoing_size + + @property + def outgoing_unix_fds(self) : + return \ + dbus.dbus_connection_get_outgoing_unix_fds(self._dbobj) + #end outgoing_unix_fds + + @property + def has_messages_to_send(self) : + return \ + dbus.dbus_connection_has_messages_to_send(self._dbobj) != 0 + #end has_messages_to_send + + # message bus APIs + # + + @classmethod + def bus_get(celf, type, private, error = None) : + "returns a Connection to one of the predefined D-Bus buses; type is a BUS_xxx value." + error, my_error = _get_error(error) + result = (dbus.dbus_bus_get, dbus.dbus_bus_get_private)[private](type, error._dbobj) + my_error.raise_if_set() + if result != None : + result = celf(result) + #end if + return \ + result + #end bus_get + + @classmethod + async def bus_get_async(celf, type, private, error = None, loop = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + if loop == None : + loop = get_running_loop() + #end if + assert type in (DBUS.BUS_SESSION, DBUS.BUS_SYSTEM, DBUS.BUS_STARTER), \ + "bus type must be BUS_SESSION, BUS_SYSTEM or BUS_STARTER" + if type == DBUS.BUS_STARTER : + starter_type = os.environ.get(DBUSX.STARTER_BUS_ADDRESS_TYPE) + is_system_bus = starter_type != None and starter_type == DBUSX.BUS_TYPE_SYSTEM + addr = os.environ.get(DBUSX.STARTER_BUS_ADDRESS_VAR) + else : + is_system_bus = type == DBUS.BUS_SYSTEM + addr = os.environ.get \ + ( + (DBUSX.SESSION_BUS_ADDRESS_VAR, DBUSX.SYSTEM_BUS_ADDRESS_VAR)[is_system_bus] + ) + #end if + if not private and celf._shared_connections[is_system_bus] != None : + result = celf._shared_connections[is_system_bus] + else : + if addr == None : + addr = (DBUSX.SESSION_BUS_ADDRESS, DBUSX.SYSTEM_BUS_ADDRESS)[is_system_bus] + #end if + try : + result = await celf.open_async(addr, private, error, loop, timeout) + if error != None and error.is_set : + raise _Abort + #end if + await result.bus_register_async(error = error, timeout = timeout) + if error != None and error.is_set : + raise _Abort + #end if + if not private : + celf._shared_connections[is_system_bus] = result + #end if + except _Abort : + result = None + #end try + #end if + return \ + result + #end bus_get_async + + def bus_register(self, error = None) : + "Only to be used if you created the Connection with open() instead of bus_get();" \ + " sends a “Hello” message to the D-Bus daemon to get a unique name assigned." \ + " Can only be called once." + error, my_error = _get_error(error) + dbus.dbus_bus_register(self._dbobj, error._dbobj) + my_error.raise_if_set() + #end bus_register + + async def bus_register_async(self, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "Only to be used if you created the Connection with open() instead of bus_get();" \ + " sends a “Hello” message to the D-Bus daemon to get a unique name assigned." \ + " Can only be called once." + assert self.loop != None, "no event loop to attach coroutine to" + assert self.bus_unique_name == None, "bus already registered" + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "Hello" + ) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + else : + self.bus_unique_name = reply.expect_return_objects("s")[0] + #end if + #end bus_register_async + + @property + def bus_unique_name(self) : + "returns None if the bus connection has not been registered. Note that the" \ + " unique_name can only be set once." + result = dbus.dbus_bus_get_unique_name(self._dbobj) + if result != None : + result = result.decode() + #end if + return \ + result + #end bus_unique_name + + @bus_unique_name.setter + def bus_unique_name(self, unique_name) : + if not dbus.dbus_bus_set_unique_name(self._dbobj, unique_name.encode()) : + raise CallFailed("dbus_bus_set_unique_name") + #end if + #end bus_unique_name + + #+ + # Calls to D-Bus Daemon + #- + + @property + def bus_id(self) : + my_error = Error() + c_result = dbus.dbus_bus_get_id(self._dbobj, my_error._dbobj) + my_error.raise_if_set() + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result + #end bus_id + + @property + async def bus_id_async(self) : + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "GetId" + ) + reply = await self.send_await_reply(message) + return \ + reply.expect_return_objects("s")[0] + #end bus_id_async + + def bus_get_unix_user(self, name, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_bus_get_unix_user(self._dbobj, name.encode(), error._dbobj) + my_error.raise_if_set() + return \ + result + #end bus_get_unix_user + + async def bus_get_unix_user_async(self, name, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "GetConnectionUnixUser" + ) + message.append_objects("s", name) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + result = None + else : + result = reply.expect_return_objects("u")[0] + #end if + return \ + result + #end bus_get_unix_user_async + + def bus_request_name(self, name, flags, error = None) : + "asks the D-Bus daemon to register the specified bus name on your behalf," \ + " blocking the thread until the reply is received. flags is a combination of" \ + " NAME_FLAG_xxx bits. Result will be a REQUEST_NAME_REPLY_xxx value or -1 on error." + error, my_error = _get_error(error) + result = dbus.dbus_bus_request_name(self._dbobj, name.encode(), flags, error._dbobj) + my_error.raise_if_set() + return \ + result + #end bus_request_name + + async def bus_request_name_async(self, name, flags, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "asks the D-Bus daemon to register the specified bus name on your behalf. flags is" \ + " a combination of NAME_FLAG_xxx bits. Result will be a REQUEST_NAME_REPLY_xxx value" \ + " or None on error." + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "RequestName" + ) + message.append_objects("su", name, flags) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + result = None + else : + result = reply.expect_return_objects("u")[0] + #end if + return \ + result + #end bus_request_name_async + + def bus_release_name(self, name, error = None) : + "asks the D-Bus daemon to release your registration of the specified bus name," \ + " blocking the thread until the reply is received." + error, my_error = _get_error(error) + result = dbus.dbus_bus_release_name(self._dbobj, name.encode(), error._dbobj) + my_error.raise_if_set() + return \ + result + #end bus_release_name + + async def bus_release_name_async(self, name, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "asks the D-Bus daemon to release your registration of the specified bus name." + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "ReleaseName" + ) + message.append_objects("s", name) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + result = None + else : + result = reply.expect_return_objects("u")[0] + #end if + return \ + result + #end bus_release_name_async + + def bus_name_has_owner(self, name, error = None) : + "asks the D-Bus daemon if anybody has claimed the specified bus name, blocking" \ + " the thread until the reply is received." + error, my_error = _get_error(error) + result = dbus.dbus_bus_name_has_owner(self._dbobj, name.encode(), error._dbobj) + my_error.raise_if_set() + return \ + result + #end bus_name_has_owner + + async def bus_name_has_owner_async(self, name, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "asks the D-Bus daemon if anybody has claimed the specified bus name." + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "NameHasOwner" + ) + message.append_objects("s", name) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + result = None + else : + result = reply.expect_return_objects("b")[0] + #end if + return \ + result + #end bus_name_has_owner_async + + def bus_start_service_by_name(self, name, flags = 0, error = None) : + error, my_error = _get_error(error) + outflags = ct.c_uint() + success = dbus.dbus_bus_start_service_by_name(self._dbobj, name.encode(), flags, ct.byref(outflags), error._dbobj) + my_error.raise_if_set() + return \ + outflags.value + #end bus_start_service_by_name + + async def bus_start_service_by_name_async(self, name, flags = 0, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "StartServiceByName" + ) + message.append_objects("su", name, flags) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + result = None + else : + result = reply.expect_return_objects("u")[0] + #end if + return \ + result + #end bus_start_service_by_name + + def bus_add_match(self, rule, error = None) : + "adds a match rule for messages you want to receive. By default you get all" \ + " messages addressed to your bus name(s); but you can use this, for example," \ + " to request notification of signals indicating useful events on the system." + error, my_error = _get_error(error) + dbus.dbus_bus_add_match(self._dbobj, format_rule(rule).encode(), error._dbobj) + my_error.raise_if_set() + #end bus_add_match + + async def bus_add_match_async(self, rule, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "adds a match rule for messages you want to receive. By default you get all" \ + " messages addressed to your bus name(s); but you can use this, for example," \ + " to request notification of signals indicating useful events on the system." + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "AddMatch" + ) + message.append_objects("s", format_rule(rule)) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + else : + reply.expect_return_objects("") + #end if + #end bus_add_match_async + + def bus_remove_match(self, rule, error = None) : + "removes a previously-added match rule for messages you previously wanted" \ + " to receive." + error, my_error = _get_error(error) + dbus.dbus_bus_remove_match(self._dbobj, format_rule(rule).encode(), error._dbobj) + my_error.raise_if_set() + #end bus_remove_match + + async def bus_remove_match_async(self, rule, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "removes a previously-added match rule for messages you previously wanted" \ + " to receive." + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_DBUS, + method = "RemoveMatch" + ) + message.append_objects("s", format_rule(rule)) + reply = await self.send_await_reply(message, timeout = timeout) + if error != None and reply.type == DBUS.MESSAGE_TYPE_ERROR : + reply.set_error(error) + else : + reply.expect_return_objects("") + #end if + #end bus_remove_match_async + + @staticmethod + def _rule_action_match(self, message, _) : + # installed as a message filter to invoke actions corresponding to rules + # that the message matches. To avoid spurious method-not-handled errors + # from eavesdropping on method calls not addressed to me, this routine + # always returns a “handled” status. That means this same Connection + # object should not be used for both eavesdropping and for normal + # method calls. + handled = False + for entry in self._match_actions.values() : + if matches_rule(message, entry.rule) : + for action in entry.actions : + result = action.func(self, message, action.user_data) + if asyncio.iscoroutine(result) : + self.create_task(result) + #end if + #end for + handled = True # passed to at least one handler + #end if + #end for + return \ + (DBUS.HANDLER_RESULT_NOT_YET_HANDLED, DBUS.HANDLER_RESULT_HANDLED)[handled] + #end _rule_action_match + + def bus_add_match_action(self, rule, func, user_data, error = None) : + "adds a message filter that invokes func(conn, message, user_data)" \ + " for each incoming message that matches the specified rule. Unlike" \ + " the underlying add_filter and bus_add_match calls, this allows you" \ + " to associate the action with the particular matching rule.\n" \ + "\n" \ + "Note that the message filter installed to process these rules always" \ + " returns a DBUS.HANDLER_RESULT_HANDLED status; so either only use this" \ + " to listen for signals, or do not use the same Connection object to" \ + " handle normal method calls." + rulekey = format_rule(rule) + rule = unformat_rule(rule) + if rulekey not in self._match_actions : + self.bus_add_match(rulekey, error) # could fail here with bad rule + if error == None or not error.is_set : + if len(self._match_actions) == 0 : + self.add_filter(self._rule_action_match, None) + #end if + self._match_actions[rulekey] = _MatchActionEntry(rule) + #end if + #end if + if error == None or not error.is_set : + self._match_actions[rulekey].actions.add(_MatchActionEntry._Action(func, user_data)) + #end if + #end bus_add_match_action + + def bus_remove_match_action(self, rule, func, user_data, error = None) : + "removes a message filter previously installed with bus_add_match_action." + rulekey = format_rule(rule) + rule = unformat_rule(rule) + self._match_actions[rulekey].actions.remove(_MatchActionEntry._Action(func, user_data)) + if len(self._match_actions[rulekey].actions) == 0 : + self.bus_remove_match(rulekey, error) # shouldn’t fail! + del self._match_actions[rulekey] + if len(self._match_actions) == 0 : + self.remove_filter(self._rule_action_match, None) + #end if + #end if + #end bus_remove_match_action + + async def bus_add_match_action_async(self, rule, func, user_data, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "adds a message filter that invokes func(conn, message, user_data)" \ + " for each incoming message that matches the specified rule. Unlike" \ + " the underlying add_filter and bus_add_match calls, this allows you" \ + " to associate the action with the particular matching rule.\n" \ + "\n" \ + "Note that the message filter installed to process these rules always" \ + " returns a DBUS.HANDLER_RESULT_HANDLED status; so either only use this" \ + " to listen for signals, or do not use the same Connection object to" \ + " handle normal method calls." + rulekey = format_rule(rule) + rule = unformat_rule(rule) + if rulekey not in self._match_actions : + await self.bus_add_match_async(rulekey, error, timeout) # could fail here with bad rule + if error == None or not error.is_set : + if len(self._match_actions) == 0 : + self.add_filter(self._rule_action_match, None) + #end if + self._match_actions[rulekey] = _MatchActionEntry(rule) + #end if + #end if + if error == None or not error.is_set : + self._match_actions[rulekey].actions.add(_MatchActionEntry._Action(func, user_data)) + #end if + #end bus_add_match_action_async + + async def bus_remove_match_action_async(self, rule, func, user_data, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "removes a message filter previously installed with bus_add_match_action." + rulekey = format_rule(rule) + rule = unformat_rule(rule) + self._match_actions[rulekey].actions.remove(_MatchActionEntry._Action(func, user_data)) + if len(self._match_actions[rulekey].actions) == 0 : + await self.bus_remove_match_async(rulekey, error, timeout) # shouldn’t fail! + del self._match_actions[rulekey] + if len(self._match_actions) == 0 : + self.remove_filter(self._rule_action_match, None) + #end if + #end if + #end bus_remove_match_action_async + + def become_monitor(self, rules) : + "turns the connection into one that can only receive monitoring messages." + message = Message.new_method_call \ + ( + destination = DBUS.SERVICE_DBUS, + path = DBUS.PATH_DBUS, + iface = DBUS.INTERFACE_MONITORING, + method = "BecomeMonitor" + ) + message.append_objects("asu", (list(format_rule(rule) for rule in rules)), 0) + self.send(message) + #end become_monitor + + #+ + # End calls to D-Bus Daemon + #- + + def attach_asyncio(self, loop = None) : + "attaches this Connection object to an asyncio event loop. If none is" \ + " specified, the default event loop (as returned from asyncio.get_event_loop()" \ + " is used." + + w_self = weak_ref(self) + # to avoid a reference cycle + + def dispatch() : + return \ + _wderef(w_self, "connection").dispatch() + #end dispatch + + #begin attach_asyncio + assert self.loop == None, "already attached to an event loop" + _loop_attach(self, loop, dispatch) + #end attach_asyncio + +#end Connection + +class _MsgAiter : + # internal class for use by Connection.iter_messages_async (above). + + def __init__(self, conn, want_types, stop_on, timeout) : + self.conn = conn + self.want_types = want_types + self.stop_on = stop_on + self.timeout = timeout + #end __init__ + + def __aiter__(self) : + # I’m my own iterator. + return \ + self + #end __aiter__ + + async def __anext__(self) : + stop_iter = False + try : + result = await self.conn.receive_message_async(self.want_types, self.timeout) + if result == None and STOP_ON.TIMEOUT in self.stop_on : + stop_iter = True + #end if + except BrokenPipeError : + if STOP_ON.CLOSED not in self.stop_on : + raise + #end if + stop_iter = True + #end try + if stop_iter : + raise StopAsyncIteration("Connection.receive_message_async terminating") + #end if + return \ + result + #end __anext__ + +#end _MsgAiter + +class Server(TaskKeeper) : + "wrapper around a DBusServer object. Do not instantiate directly; use" \ + " the listen method.\n" \ + "\n" \ + "You only need this if you want to use D-Bus as a communication mechanism" \ + " separate from the system/session buses provided by the D-Bus daemon: you" \ + " create a Server object listening on a specified address, and clients can" \ + " use Connection.open() to connect to you on that address." + # + + # Doesn’t really need services of TaskKeeper for now, but might be + # useful in future + + __slots__ = \ + ( + "_dbobj", + "_new_connections", + "_await_new_connections", + "max_new_connections", + "autoattach_new_connections", + # need to keep references to ctypes-wrapped functions + # so they don't disappear prematurely: + "_new_connection_function", + "_free_new_connection_data", + "_add_watch_function", + "_remove_watch_function", + "_toggled_watch_function", + "_free_watch_data", + "_add_timeout_function", + "_remove_timeout_function", + "_toggled_timeout_function", + "_free_timeout_data", + ) # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, _dbobj) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + super()._init(self) + self._dbobj = _dbobj + self._new_connections = None + self._await_new_connections = None + self.max_new_connections = None + self.autoattach_new_connections = True + self._new_connection_function = None + self._free_new_connection_data = None + self._add_watch_function = None + self._remove_watch_function = None + self._toggled_watch_function = None + self._free_watch_data = None + self._add_timeout_function = None + self._remove_timeout_function = None + self._toggled_timeout_function = None + self._free_timeout_data = None + celf._instances[_dbobj] = self + else : + dbus.dbus_server_unref(self._dbobj) + # lose extra reference created by caller + #end if + return \ + self + #end __new__ + + def __del__(self) : + if self._dbobj != None : + if self.loop != None : + # remove via direct low-level libdbus calls + dbus.dbus_server_set_watch_functions(self._dbobj, None, None, None, None, None) + dbus.dbus_server_set_timeout_functions(self._dbobj, None, None, None, None, None) + self.loop = None + #end if + dbus.dbus_server_unref(self._dbobj) + self._dbobj = None + #end if + #end __del__ + + @classmethod + def listen(celf, address, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_server_listen(address.encode(), error._dbobj) + my_error.raise_if_set() + if result != None : + result = celf(result) + #end if + return \ + result + #end listen + + def _flush_awaiting_connect(self) : + if self._await_new_connections != None : + while len(self._await_new_connections) != 0 : + waiting = self._await_new_connections.pop(0) + waiting.set_exception(BrokenPipeError("async listens have been disabled")) + #end while + #end if + #end _flush_awaiting_connect + + def disconnect(self) : + self._flush_awaiting_connect() + dbus.dbus_server_disconnect(self._dbobj) + #end disconnect + + @property + def is_connected(self) : + return \ + dbus.dbus_server_get_is_connected(self._dbobj) != 0 + #end is_connected + + @property + def address(self) : + c_result = dbus.dbus_server_get_address(self._dbobj) + if c_result == None : + raise CallFailed("dbus_server_get_address") + #end if + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result + #end address + + @property + def id(self) : + c_result = dbus.dbus_server_get_id(self._dbobj) + if c_result == None : + raise CallFailed("dbus_server_get_id") + #end if + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result + #end id + + def set_new_connection_function(self, function, data, free_data = None) : + "sets the callback for libdbus to notify you of a new incoming connection." \ + " It is up to you to save the Connection object for later processing of" \ + " messages, or close it to reject the connection attempt." + + w_self = weak_ref(self) + + def wrap_function(c_self, c_conn, _data) : + function(_wderef(w_self, "server"), Connection(dbus.dbus_connection_ref(c_conn)), data) + # even though this is a new connection, I still have to reference it + #end wrap_function + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_new_connection_function + assert self.loop == None, "new connections are being managed by an event loop" + self._new_connection_function = DBUS.NewConnectionFunction(wrap_function) + if free_data != None : + self._free_new_connection_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_new_connection_data = None + #end if + dbus.dbus_server_set_new_connection_function(self._dbobj, self._new_connection_function, None, self._free_new_connection_data) + #end set_new_connection_function + + def set_watch_functions(self, add_function, remove_function, toggled_function, data, free_data = None) : + "sets the callbacks for libdbus to use to notify you of Watch objects it wants" \ + " you to manage." + + def wrap_add_function(c_watch, _data) : + return \ + add_function(Watch(c_watch), data) + #end wrap_add_function + + def wrap_remove_function(c_watch, _data) : + return \ + remove_function(Watch(c_watch), data) + #end wrap_remove_function + + def wrap_toggled_function(c_watch, _data) : + return \ + toggled_function(Watch(c_watch), data) + #end wrap_toggled_function + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_watch_functions + self._add_watch_function = DBUS.AddWatchFunction(wrap_add_function) + self._remove_watch_function = DBUS.RemoveWatchFunction(wrap_remove_function) + if toggled_function != None : + self._toggled_watch_function = DBUS.WatchToggledFunction(wrap_toggled_function) + else : + self._toggled_watch_function = None + #end if + if free_data != None : + self._free_watch_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_watch_data = None + #end if + if not dbus.dbus_server_set_watch_functions(self._dbobj, self._add_watch_function, self._remove_watch_function, self._toggled_watch_function, None, self._free_watch_data) : + raise CallFailed("dbus_server_set_watch_functions") + #end if + #end set_watch_functions + + def set_timeout_functions(self, add_function, remove_function, toggled_function, data, free_data = None) : + "sets the callbacks for libdbus to use to notify you of Timeout objects it wants" \ + " you to manage." + + def wrap_add_function(c_timeout, _data) : + return \ + add_function(Timeout(c_timeout), data) + #end wrap_add_function + + def wrap_remove_function(c_timeout, _data) : + return \ + remove_function(Timeout(c_timeout), data) + #end wrap_remove_function + + def wrap_toggled_function(c_timeout, _data) : + return \ + toggled_function(Timeout(c_timeout), data) + #end wrap_toggled_function + + def wrap_free_data(_data) : + free_data(data) + #end wrap_free_data + + #begin set_timeout_functions + self._add_timeout_function = DBUS.AddTimeoutFunction(wrap_add_function) + self._remove_timeout_function = DBUS.RemoveTimeoutFunction(wrap_remove_function) + if toggled_function != None : + self._toggled_timeout_function = DBUS.TimeoutToggledFunction(wrap_toggled_function) + else : + self._toggled_timeout_function = None + #end if + if free_data != None : + self._free_timeout_data = DBUS.FreeFunction(wrap_free_data) + else : + self._free_timeout_data = None + #end if + if not dbus.dbus_server_set_timeout_functions(self._dbobj, self._add_timeout_function, self._remove_timeout_function, self._toggled_timeout_function, None, self._free_timeout_data) : + raise CallFailed("dbus_server_set_timeout_functions") + #end if + #end set_timeout_functions + + def set_auth_mechanisms(self, mechanisms) : + nr_mechanisms = len(mechanisms) + c_mechanisms = (ct.c_char_p * (nr_mechanisms + 1))() + for i in range(nr_mechanisms) : + c_mechanisms[i] = mechanisms[i].encode() + #end if + c_mechanisms[nr_mechanisms] = None # marks end of array + if not dbus.dbus_server_set_auth_mechanisms(self._dbobj, c_mechanisms) : + raise CallFailed("dbus_server_set_auth_mechanisms") + #end if + #end set_auth_mechanisms + + # TODO: allocate/free slot (static methods) + # TODO: get/set/data + + def attach_asyncio(self, loop = None) : + "attaches this Server object to an asyncio event loop. If none is" \ + " specified, the default event loop (as returned from asyncio.get_event_loop()" \ + " is used.\n" \ + "\n" \ + "This call will also automatically attach a new_connection callback. You then use" \ + " the await_new_connection coroutine to obtain new connections. If" \ + " self.autoattach_new_connections, then Connection.attach_asyncio() will" \ + " automatically be called to handle events for the new connection." + + def new_connection(self, conn, user_data) : + if len(self._await_new_connections) != 0 : + awaiting = self._await_new_connections.pop(0) + awaiting.set_result(conn) + else : + # put it in _new_connections queue + if ( + self.max_new_connections != None + and + len(self._new_connections) >= self.max_new_connections + ) : + # too many connections pending, reject + conn.close() + else : + self._new_connections.append(conn) + #end if + #end if + #end new_connection + + #begin attach_asyncio + assert self.loop == None, "already attached to an event loop" + assert self._new_connection_function == None, "already set a new-connection function" + self._new_connections = [] + self._await_new_connections = [] + self.set_new_connection_function(new_connection, None) + _loop_attach(self, loop, None) + #end attach_asyncio + + async def await_new_connection(self, timeout = DBUS.TIMEOUT_INFINITE) : + "retrieves the next new Connection, if there is one available, otherwise" \ + " suspends the current coroutine for up to the specified timeout duration" \ + " while waiting for one to appear. Returns None if there is no new connection" \ + " within that time." + assert self.loop != None, "no event loop to attach coroutine to" + if len(self._new_connections) != 0 : + result = self._new_connections.pop(0) + else : + if not self.is_connected : + raise BrokenPipeError("Server has been disconnected") + #end if + if timeout == 0 : + # might as well short-circuit the whole waiting process + result = None + else : + awaiting = self.loop.create_future() + self._await_new_connections.append(awaiting) + if timeout == DBUS.TIMEOUT_INFINITE : + timeout = None + else : + if timeout == DBUS.TIMEOUT_USE_DEFAULT : + timeout = DBUSX.DEFAULT_TIMEOUT + #end if + #end if + await self.wait \ + ( + (awaiting,), + timeout = timeout + ) + # ignore done & pending results because they + # don’t match up with future I’m waiting for + if awaiting.done() : + result = awaiting.result() + else : + self._await_new_connections.pop(self._await_new_connections.index(awaiting)) + result = None + #end if + #end if + #end if + if result != None and self.autoattach_new_connections : + result.attach_asyncio(self.loop) + #end if + return \ + result + #end await_new_connection + + def iter_connections_async(self, stop_on = None, timeout = DBUS.TIMEOUT_INFINITE) : + "wrapper around await_new_connection() to allow use with an async-for" \ + " statement. Lets you write\n" \ + "\n" \ + " async for conn in «server».iter_connections_async(«timeout») :" \ + " «accept conn»\n" \ + " #end for\n" \ + "\n" \ + "to receive and process incoming connections in a loop. stop_on is an optional set of" \ + " STOP_ON.xxx values indicating the conditions under which the iterator will" \ + " raise StopAsyncIteration to terminate the loop." + assert self.loop != None, "no event loop to attach coroutine to" + if stop_on == None : + stop_on = frozenset() + elif ( + not isinstance(stop_on, (set, frozenset)) + or + not all(isinstance(elt, STOP_ON) for elt in stop_on) + ) : + raise TypeError("stop_on must be None or set of STOP_ON") + #end if + return \ + _SrvAiter(self, stop_on, timeout) + #end iter_connections_async + +#end Server + +class _SrvAiter : + # internal class for use by Server.iter_connections_async (above). + + def __init__(self, srv, stop_on, timeout) : + self.srv = srv + self.stop_on = stop_on + self.timeout = timeout + #end __init__ + + def __aiter__(self) : + # I’m my own iterator. + return \ + self + #end __aiter__ + + async def __anext__(self) : + stop_iter = False + try : + result = await self.srv.await_new_connection(self.timeout) + if result == None and STOP_ON.TIMEOUT in self.stop_on : + stop_iter = True + #end if + except BrokenPipeError : + if STOP_ON.CLOSED not in self.stop_on : + raise + #end if + stop_iter = True + #end try + if stop_iter : + raise StopAsyncIteration("Server.iter_connections_async terminating") + #end if + return \ + result + #end __anext__ + +#end _SrvAiter + +class PreallocatedSend : + "wrapper around a DBusPreallocatedSend object. Do not instantiate directly;" \ + " get from Connection.preallocate_send method." + # + + __slots__ = ("__weakref__", "_dbobj", "_w_parent", "_sent") # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, _dbobj, _parent) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + self._dbobj = _dbobj + self._w_parent = weak_ref(_parent) + self._sent = False + celf._instances[_dbobj] = self + else : + assert self._w_parent() == _parent + #end if + return \ + self + #end __new__ + + def __del__(self) : + if self._dbobj != None : + parent = self._w_parent() + if parent != None and not self._sent : + dbus.dbus_connection_free_preallocated_send(parent._dbobj, self._dbobj) + #end if + self._dbobj = None + #end if + #end __del__ + + def send(self, message) : + "alternative to Connection.send_preallocated." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + assert not self._sent, "preallocated has already been sent" + parent = self._w_parent() + assert parent != None, "parent Connection has gone away" + serial = ct.c_uint() + dbus.dbus_connection_send_preallocated(parent._dbobj, self._dbobj, message._dbobj, ct.byref(serial)) + self._sent = True + return \ + serial.value + #end send + +#end PreallocatedSend + +class Message : + "wrapper around a DBusMessage object. Do not instantiate directly; use one of the" \ + " new_xxx or copy methods, or Connection.pop_message or Connection.borrow_message." + # + + __slots__ = ("__weakref__", "_dbobj", "_conn", "_borrowed") # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, _dbobj) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + self._dbobj = _dbobj + self._conn = None + self._borrowed = False + celf._instances[_dbobj] = self + else : + dbus.dbus_message_unref(self._dbobj) + # lose extra reference created by caller + #end if + return \ + self + #end __new__ + + def __del__(self) : + if self._dbobj != None : + assert not self._borrowed, "trying to dispose of borrowed message" + dbus.dbus_message_unref(self._dbobj) + self._dbobj = None + #end if + #end __del__ + + @classmethod + def new(celf, type) : + "type is one of the DBUS.MESSAGE_TYPE_xxx codes. Using one of the type-specific" \ + " calls--new_error, new_method_call, new_method_return, new_signal--is probably" \ + " more convenient." + result = dbus.dbus_message_new(type) + if result == None : + raise CallFailed("dbus_message_new") + #end if + return \ + celf(result) + #end new + + def new_error(self, name, message) : + "creates a new DBUS.MESSAGE_TYPE_ERROR message that is a reply to this Message." + result = dbus.dbus_message_new_error(self._dbobj, name.encode(), (lambda : None, lambda : message.encode())[message != None]()) + if result == None : + raise CallFailed("dbus_message_new_error") + #end if + return \ + type(self)(result) + #end new_error + + # probably not much point trying to use new_error_printf + + @classmethod + def new_method_call(celf, destination, path, iface, method) : + "creates a new DBUS.MESSAGE_TYPE_METHOD_CALL message." + result = dbus.dbus_message_new_method_call \ + ( + (lambda : None, lambda : destination.encode())[destination != None](), + path.encode(), + (lambda : None, lambda : iface.encode())[iface != None](), + method.encode(), + ) + if result == None : + raise CallFailed("dbus_message_new_method_call") + #end if + return \ + celf(result) + #end new_method_call + + def new_method_return(self) : + "creates a new DBUS.MESSAGE_TYPE_METHOD_RETURN that is a reply to this Message." + result = dbus.dbus_message_new_method_return(self._dbobj) + if result == None : + raise CallFailed("dbus_message_new_method_return") + #end if + return \ + type(self)(result) + #end new_method_return + + @classmethod + def new_signal(celf, path, iface, name) : + "creates a new DBUS.MESSAGE_TYPE_SIGNAL message." + result = dbus.dbus_message_new_signal(path.encode(), iface.encode(), name.encode()) + if result == None : + raise CallFailed("dbus_message_new_signal") + #end if + return \ + celf(result) + #end new_signal + + def copy(self) : + "creates a copy of this Message." + result = dbus.dbus_message_copy(self._dbobj) + if result == None : + raise CallFailed("dbus_message_copy") + #end if + return \ + type(self)(result) + #end copy + + @property + def type(self) : + "returns the DBUS.MESSAGE_TYPE_XXX code for this Message." + return \ + dbus.dbus_message_get_type(self._dbobj) + #end type + + # NYI append_args, get_args -- probably not useful, use my + # objects and append_objects convenience methods (below) instead + + class ExtractIter : + "for iterating over the arguments in a Message for reading. Do not" \ + " instantiate directly; get from Message.iter_init or ExtractIter.recurse.\n" \ + "\n" \ + "You can use this as a Python iterator, in a for-loop, passing" \ + " it to the next() built-in function etc. Do not mix such usage with calls to" \ + " the has_next() and next() methods." + + __slots__ = ("_dbobj", "_parent", "_nulliter", "_startiter") # to forestall typos + + def __init__(self, _parent) : + self._dbobj = DBUS.MessageIter() + self._parent = _parent + self._nulliter = False + self._startiter = True + #end __init__ + + @property + def has_next(self) : + return \ + dbus.dbus_message_iter_has_next(self._dbobj) + #end has_next + + def next(self) : + if self._nulliter or not dbus.dbus_message_iter_next(self._dbobj) : + raise StopIteration("end of message iterator") + #end if + self._startiter = False + return \ + self + #end next + + def __iter__(self) : + return \ + self + #end __iter__ + + def __next__(self) : + if self._nulliter : + raise StopIteration("empty message iterator") + else : + if self._startiter : + self._startiter = False + else : + self.next() + #end if + #end if + return \ + self + #end __next__ + + @property + def arg_type(self) : + "the type code for this argument." + return \ + dbus.dbus_message_iter_get_arg_type(self._dbobj) + #end arg_type + + @property + def element_type(self) : + "the contained element type of this argument, assuming it is of a container type." + return \ + dbus.dbus_message_iter_get_element_type(self._dbobj) + #end element_type + + def recurse(self) : + "creates a sub-iterator for recursing into a container argument." + subiter = type(self)(self) + dbus.dbus_message_iter_recurse(self._dbobj, subiter._dbobj) + return \ + subiter + #end recurse + + @property + def signature(self) : + c_result = dbus.dbus_message_iter_get_signature(self._dbobj) + if c_result == None : + raise CallFailed("dbus_message_iter_get_signature") + #end if + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result + #end signature + + @property + def basic(self) : + "returns the argument value, assuming it is of a non-container type." + argtype = self.arg_type + c_result_type = DBUS.basic_to_ctypes[argtype] + c_result = c_result_type() + dbus.dbus_message_iter_get_basic(self._dbobj, ct.byref(c_result)) + if c_result_type == ct.c_char_p : + result = c_result.value.decode() + else : + result = c_result.value + #end if + if argtype in DBUS.basic_subclasses : + result = DBUS.basic_subclasses[argtype](result) + #end if + return \ + result + #end basic + + @property + def object(self) : + "returns the current iterator item as a Python object. Will recursively" \ + " process container objects." + argtype = self.arg_type + if argtype in DBUS.basic_to_ctypes : + result = self.basic + elif argtype == DBUS.TYPE_ARRAY : + if self.element_type == DBUS.TYPE_DICT_ENTRY : + result = {} + subiter = self.recurse() + while True : + entry = next(subiter, None) + if entry == None or entry.arg_type == DBUS.TYPE_INVALID : + # TYPE_INVALID can be returned for an empty dict + break + if entry.arg_type != DBUS.TYPE_DICT_ENTRY : + raise RuntimeError("invalid dict entry type %d" % entry.arg_type) + #end if + key, value = tuple(x.object for x in entry.recurse()) + result[key] = value + #end while + elif type_is_fixed_array_elttype(self.element_type) : + result = self.fixed_array + else : + result = list(x.object for x in self.recurse()) + if len(result) != 0 and result[-1] == None : + # fudge for iterating into an empty array + result = result[:-1] + #end if + #end if + elif argtype == DBUS.TYPE_STRUCT : + result = list(x.object for x in self.recurse()) + elif argtype == DBUS.TYPE_VARIANT : + subiter = self.recurse() + subiter = next(subiter) + result = (DBUS.Signature(subiter.signature), subiter.object) + elif argtype == DBUS.TYPE_INVALID : + # fudge for iterating into an empty array + result = None + else : + raise RuntimeError("unrecognized argtype %d" % argtype) + #end if + return \ + result + #end object + + if hasattr(dbus, "dbus_message_iter_get_element_count") : + + @property + def element_count(self) : + "returns the count of contained elements, assuming the current argument" \ + " is of a container type." + return \ + dbus.dbus_message_iter_get_element_count(self._dbobj) + #end element_count + + #end if + + @property + def fixed_array(self) : + "returns the array elements, assuming the current argument is an array" \ + " with a non-container element type." + c_element_type = DBUS.basic_to_ctypes[self.element_type] + c_result = ct.POINTER(c_element_type)() + c_nr_elts = ct.c_int() + subiter = self.recurse() + dbus.dbus_message_iter_get_fixed_array(subiter._dbobj, ct.byref(c_result), ct.byref(c_nr_elts)) + result = [] + for i in range(c_nr_elts.value) : + elt = c_result[i] + if c_element_type == ct.c_char_p : + elt = elt.value.decode() + #end if + result.append(elt) + #end for + return \ + result + #end fixed_array + + #end ExtractIter + + class AppendIter : + "for iterating over the arguments in a Message for appending." \ + " Do not instantiate directly; get from Message.iter_init_append or" \ + " AppendIter.open_container." + + __slots__ = ("_dbobj", "_parent") # to forestall typos + + def __init__(self, _parent) : + self._dbobj = DBUS.MessageIter() + self._parent = _parent + #end __init__ + + def append_basic(self, type, value) : + "appends a single value of a non-container type." + if type in DBUS.int_convert : + value = DBUS.int_convert[type](value) + #end if + c_type = DBUS.basic_to_ctypes[type] + if c_type == ct.c_char_p : + if not isinstance(value, str) : + raise TypeError \ + ( + "expecting type %s, got %s" % (TYPE(type), builtins.type(value).__name__) + ) + #end if + value = value.encode() + #end if + c_value = c_type(value) + if not dbus.dbus_message_iter_append_basic(self._dbobj, type, ct.byref(c_value)) : + raise CallFailed("dbus_message_iter_append_basic") + #end if + return \ + self + #end append_basic + + def append_fixed_array(self, element_type, values) : + "appends an array of elements of a non-container type." + c_elt_type = DBUS.basic_to_ctypes[element_type] + nr_elts = len(values) + c_arr = (nr_elts * c_elt_type)() + for i in range(nr_elts) : + if c_elt_type == ct.c_char_p : + c_arr[i] = values[i].encode() + else : + c_arr[i] = values[i] + #end if + #end for + c_arr_ptr = ct.pointer(c_arr) + if not dbus.dbus_message_iter_append_fixed_array(self._dbobj, element_type, ct.byref(c_arr_ptr), nr_elts) : + raise CallFailed("dbus_message_iter_append_fixed_array") + #end if + return \ + self + #end append_fixed_array + + def open_container(self, type, contained_signature) : + "starts appending an argument of a container type, returning a sub-iterator" \ + " for appending the contents of the argument. Can be called recursively for" \ + " containers of containers etc." + if contained_signature != None : + c_sig = contained_signature.encode() + else : + c_sig = None + #end if + subiter = builtins.type(self)(self) + if not dbus.dbus_message_iter_open_container(self._dbobj, type, c_sig, subiter._dbobj) : + raise CallFailed("dbus_message_iter_open_container") + #end if + return \ + subiter + #end open_container + + def close(self) : + "closes a sub-iterator, indicating the completion of construction" \ + " of a container value." + assert self._parent != None, "cannot close top-level iterator" + if not dbus.dbus_message_iter_close_container(self._parent._dbobj, self._dbobj) : + raise CallFailed("dbus_message_iter_close_container") + #end if + return \ + self._parent + #end close + + def abandon(self) : + "closes a sub-iterator, indicating the abandonment of construction" \ + " of a container value. The Message object is effectively unusable" \ + " after this point and should be discarded." + assert self._parent != None, "cannot abandon top-level iterator" + dbus.dbus_message_iter_abandon_container(self._parent._dbobj, self._dbobj) + return \ + self._parent + #end abandon + + #end AppendIter + + def iter_init(self) : + "creates an iterator for extracting the arguments of the Message." + iter = self.ExtractIter(None) + if dbus.dbus_message_iter_init(self._dbobj, iter._dbobj) == 0 : + iter._nulliter = True + #end if + return \ + iter + #end iter_init + + @property + def objects(self) : + "yields the arguments of the Message as Python objects." + for iter in self.iter_init() : + yield iter.object + #end for + #end objects + + @property + def all_objects(self) : + "all the arguments of the Message as a list of Python objects." + return \ + list(self.objects) + #end all_objects + + def expect_objects(self, signature) : + "expects the arguments of the Message to conform to the given signature," \ + " raising a TypeError if not. If they match, returns them as a list." + signature = unparse_signature(signature) + if self.signature != signature : + raise TypeError("message args don’t match: expected “%s”, got “%s”" % (signature, self.signature)) + #end if + return \ + self.all_objects + #end expect_objects + + def expect_return_objects(self, signature) : + "expects the Message to be of type DBUS.MESSAGE_TYPE_METHOD_RETURN and its" \ + " arguments to conform to the given signature. Raises the appropriate DBusError" \ + " if the Message is of type DBUS.MESSAGE_TYPE_ERROR." + if self.type == DBUS.MESSAGE_TYPE_METHOD_RETURN : + result = self.expect_objects(signature) + elif self.type == DBUS.MESSAGE_TYPE_ERROR : + raise DBusError(self.error_name, self.expect_objects("s")[0]) + else : + raise ValueError("unexpected message type %d" % self.type) + #end if + return \ + result + #end expect_return_objects + + def iter_init_append(self) : + "creates a Message.AppendIter for appending arguments to the Message." + iter = self.AppendIter(None) + dbus.dbus_message_iter_init_append(self._dbobj, iter._dbobj) + return \ + iter + #end iter_init_append + + def append_objects(self, signature, *args) : + "interprets Python values args according to signature and appends" \ + " converted item(s) to the message args." + + def append_sub(siglist, eltlist, appenditer) : + if len(siglist) != len(eltlist) : + raise ValueError \ + ( + "mismatch between signature entries %s and number of sequence elements %s" + % + (repr(siglist), repr(eltlist)) + ) + #end if + for elttype, elt in zip(siglist, eltlist) : + if isinstance(elttype, BasicType) : + appenditer.append_basic(elttype.code.value, elt) + elif isinstance(elttype, DictType) : + if not isinstance(elt, dict) : + raise TypeError("dict expected for %s" % repr(elttype)) + #end if + subiter = appenditer.open_container(DBUS.TYPE_ARRAY, elttype.entry_signature) + for key in sorted(elt) : # might as well insert in some kind of predictable order + value = elt[key] + subsubiter = subiter.open_container(DBUS.TYPE_DICT_ENTRY, None) + append_sub([elttype.keytype, elttype.valuetype], [key, value], subsubiter) + subsubiter.close() + #end for + subiter.close() + elif isinstance(elttype, ArrayType) : + # append 0 or more elements matching elttype.elttype + arrelttype = elttype.elttype + if type_is_fixed_array_elttype(arrelttype.code.value) : + subiter = appenditer.open_container(DBUS.TYPE_ARRAY, arrelttype.signature) + subiter.append_fixed_array(arrelttype.code.value, elt) + subiter.close() + else : + subiter = appenditer.open_container(DBUS.TYPE_ARRAY, arrelttype.signature) + if not isinstance(elt, (tuple, list)) : + raise TypeError("expecting sequence of values for array") + #end if + for subval in elt : + append_sub([arrelttype], [subval], subiter) + #end for + subiter.close() + #end if + elif isinstance(elttype, StructType) : + if not isinstance(elt, (tuple, list)) : + raise TypeError("expecting sequence of values for struct") + #end if + subiter = appenditer.open_container(DBUS.TYPE_STRUCT, None) + append_sub(elttype.elttypes, elt, subiter) + subiter.close() + elif isinstance(elttype, VariantType) : + if not isinstance(elt, (list, tuple)) or len(elt) != 2 : + raise TypeError("sequence of 2 elements expected for variant: %s" % repr(elt)) + #end if + actual_type = parse_single_signature(elt[0]) + subiter = appenditer.open_container(DBUS.TYPE_VARIANT, actual_type.signature) + append_sub([actual_type], [elt[1]], subiter) + subiter.close() + else : + raise RuntimeError("unrecognized type %s" % repr(elttype)) + #end if + #end for + #end append_sub + + #begin append_objects + append_sub(parse_signature(signature), args, self.iter_init_append()) + return \ + self + #end append_objects + + @property + def no_reply(self) : + "whether the Message is not expecting a reply." + return \ + dbus.dbus_message_get_no_reply(self._dbobj) != 0 + #end no_reply + + @no_reply.setter + def no_reply(self, no_reply) : + dbus.dbus_message_set_no_reply(self._dbobj, no_reply) + #end no_reply + + @property + def auto_start(self) : + return \ + dbus.dbus_message_get_auto_start(self._dbobj) != 0 + #end auto_start + + @auto_start.setter + def auto_start(self, auto_start) : + dbus.dbus_message_set_auto_start(self._dbobj, auto_start) + #end auto_start + + @property + def path(self) : + "the object path for a DBUS.MESSAGE_TYPE_METHOD_CALL or DBUS.DBUS.MESSAGE_TYPE_SIGNAL" \ + " message." + result = dbus.dbus_message_get_path(self._dbobj) + if result != None : + result = DBUS.ObjectPath(result.decode()) + #end if + return \ + result + #end path + + @path.setter + def path(self, object_path) : + if not dbus.dbus_message_set_path(self._dbobj, (lambda : None, lambda : object_path.encode())[object_path != None]()) : + raise CallFailed("dbus_message_set_path") + #end if + #end path + + @property + def path_decomposed(self) : + "the object path for a DBUS.MESSAGE_TYPE_METHOD_CALL or DBUS.DBUS.MESSAGE_TYPE_SIGNAL" \ + " message, decomposed into a list of the slash-separated components without the slashes." + path = ct.POINTER(ct.c_char_p)() + if not dbus.dbus_message_get_path_decomposed(self._dbobj, ct.byref(path)) : + raise CallFailed("dbus_message_get_path_decomposed") + #end if + if bool(path) : + result = [] + i = 0 + while True : + entry = path[i] + if entry == None : + break + result.append(entry.decode()) + i += 1 + #end while + dbus.dbus_free_string_array(path) + else : + result = None + #end if + return \ + result + #end path_decomposed + + @property + def interface(self) : + "the interface name for a DBUS.MESSAGE_TYPE_METHOD_CALL or DBUS.MESSAGE_TYPE_SIGNAL" \ + " message." + result = dbus.dbus_message_get_interface(self._dbobj) + if result != None : + result = result.decode() + #end if + return \ + result + #end interface + + @interface.setter + def interface(self, iface) : + if not dbus.dbus_message_set_interface(self._dbobj, (lambda : None, lambda : iface.encode())[iface != None]()) : + raise CallFailed("dbus_message_set_interface") + #end if + #end interface + + def has_interface(self, iface) : + return \ + dbus.dbus_message_has_interface(self._dbobj, iface.encode()) != 0 + #end has_interface + + @property + def member(self) : + "the method name for a DBUS.MESSAGE_TYPE_METHOD_CALL message or the signal" \ + " name for DBUS.MESSAGE_TYPE_SIGNAL." + result = dbus.dbus_message_get_member(self._dbobj) + if result != None : + result = result.decode() + #end if + return \ + result + #end member + + @member.setter + def member(self, member) : + if not dbus.dbus_message_set_member(self._dbobj, (lambda : None, lambda : member.encode())[member != None]()) : + raise CallFailed("dbus_message_set_member") + #end if + #end member + + def has_member(self, member) : + return \ + dbus.dbus_message_has_member(self._dbobj, member.encode()) != 0 + #end has_member + + @property + def error_name(self) : + "the error name for a DBUS.MESSAGE_TYPE_ERROR message." + result = dbus.dbus_message_get_error_name(self._dbobj) + if result != None : + result = result.decode() + #end if + return \ + result + #end error_name + + @error_name.setter + def error_name(self, error_name) : + if not dbus.dbus_message_set_error_name(self._dbobj, (lambda : None, lambda : error_name.encode())[error_name != None]()) : + raise CallFailed("dbus_message_set_error_name") + #end if + #end error_name + + @property + def destination(self) : + "the bus name that the message is to be sent to." + result = dbus.dbus_message_get_destination(self._dbobj) + if result != None : + result = result.decode() + #end if + return \ + result + #end destination + + @destination.setter + def destination(self, destination) : + if not dbus.dbus_message_set_destination(self._dbobj, (lambda : None, lambda : destination.encode())[destination != None]()) : + raise CallFailed("dbus_message_set_destination") + #end if + #end destination + + @property + def sender(self) : + result = dbus.dbus_message_get_sender(self._dbobj) + if result != None : + result = result.decode() + #end if + return \ + result + #end sender + + @sender.setter + def sender(self, sender) : + if not dbus.dbus_message_set_sender(self._dbobj, (lambda : None, lambda : sender.encode())[sender != None]()) : + raise CallFailed("dbus_message_set_sender") + #end if + #end sender + + @property + def signature(self) : + result = dbus.dbus_message_get_signature(self._dbobj) + if result != None : + result = DBUS.Signature(result.decode()) + #end if + return \ + result + #end signature + + def is_method_call(self, iface, method) : + return \ + dbus.dbus_message_is_method_call(self._dbobj, iface.encode(), method.encode()) != 0 + #end is_method_call + + def is_signal(self, iface, signal_name) : + return \ + dbus.dbus_message_is_signal(self._dbobj, iface.encode(), signal_name.encode()) != 0 + #end is_signal + + def is_error(self, iface, error_name) : + return \ + dbus.dbus_message_is_error(self._dbobj, error_name.encode()) != 0 + #end is_error + + def has_destination(self, iface, destination) : + return \ + dbus.dbus_message_has_destination(self._dbobj, destination.encode()) != 0 + #end has_destination + + def has_sender(self, iface, sender) : + return \ + dbus.dbus_message_has_sender(self._dbobj, sender.encode()) != 0 + #end has_sender + + def has_signature(self, iface, signature) : + return \ + dbus.dbus_message_has_signature(self._dbobj, signature.encode()) != 0 + #end has_signature + + def set_error(self, error) : + "fills in error if this is an error message, else does nothing. Returns" \ + " whether it was an error message or not." + if not isinstance(error, Error) : + raise TypeError("error must be an Error") + #end if + return \ + dbus.dbus_set_error_from_message(error._dbobj, self._dbobj) != 0 + #end set_error + + @property + def contains_unix_fds(self) : + return \ + dbus.dbus_message_contains_unix_fds(self._dbobj) != 0 + #end contains_unix_fds + + @property + def serial(self) : + "the serial number of the Message, to be referenced in replies." + return \ + dbus.dbus_message_get_serial(self._dbobj) + #end serial + + @serial.setter + def serial(self, serial) : + dbus.dbus_message_set_serial(self._dbobj, serial) + #end serial + + @property + def reply_serial(self) : + "the serial number of the original Message that that this" \ + " DBUS.MESSAGE_TYPE_METHOD_RETURN message is a reply to." + return \ + dbus.dbus_message_get_reply_serial(self._dbobj) + #end reply_serial + + @reply_serial.setter + def reply_serial(self, serial) : + if not dbus.dbus_message_set_reply_serial(self._dbobj, serial) : + raise CallFailed("dbus_message_set_reply_serial") + #end if + #end serial + + def lock(self) : + dbus.dbus_message_lock(self._dbobj) + #end lock + + def return_borrowed(self) : + assert self._borrowed and self._conn != None + dbus.dbus_connection_return_message(self._conn._dbobj, self._dbobj) + self._borrowed = False + #end return_borrowed + + def steal_borrowed(self) : + assert self._borrowed and self._conn != None + dbus.dbus_connection_steal_borrowed_message(self._conn._dbobj, self._dbobj) + self._borrowed = False + return \ + self + #end steal_borrowed + + # TODO: allocate/free data slot -- static methods + # (freeing slot can set passed-in var to -1 on actual free; do I care?) + # TODO: set/get data + + @staticmethod + def type_from_string(type_str) : + "returns a MESSAGE_TYPE_xxx value." + return \ + dbus.dbus_message_type_from_string(type_str.encode()) + #end type_from_string + + @staticmethod + def type_to_string(type) : + "type is a MESSAGE_TYPE_xxx value." + return \ + dbus.dbus_message_type_to_string(type).decode() + #end type_to_string + + def marshal(self) : + "serializes this Message into the wire protocol format and returns a bytes object." + buf = ct.POINTER(ct.c_ubyte)() + nr_bytes = ct.c_int() + if not dbus.dbus_message_marshal(self._dbobj, ct.byref(buf), ct.byref(nr_bytes)) : + raise CallFailed("dbus_message_marshal") + #end if + result = bytearray(nr_bytes.value) + ct.memmove \ + ( + ct.addressof((ct.c_ubyte * nr_bytes.value).from_buffer(result)), + buf, + nr_bytes.value + ) + dbus.dbus_free(buf) + return \ + result + #end marshal + + @classmethod + def demarshal(celf, buf, error = None) : + "deserializes a bytes or array-of-bytes object from the wire protocol" \ + " format into a Message object." + error, my_error = _get_error(error) + if isinstance(buf, bytes) : + baseadr = ct.cast(buf, ct.c_void_p).value + elif isinstance(buf, bytearray) : + baseadr = ct.addressof((ct.c_ubyte * len(buf)).from_buffer(buf)) + elif isinstance(buf, array.array) and buf.typecode == "B" : + baseadr = buf.buffer_info()[0] + else : + raise TypeError("buf is not bytes, bytearray or array.array of bytes") + #end if + msg = dbus.dbus_message_demarshal(baseadr, len(buf), error._dbobj) + my_error.raise_if_set() + if msg != None : + msg = celf(msg) + #end if + return \ + msg + #end demarshal + + @classmethod + def demarshal_bytes_needed(celf, buf) : + "the number of bytes needed to deserialize a bytes or array-of-bytes" \ + " object from the wire protocol format." + if isinstance(buf, bytes) : + baseadr = ct.cast(buf, ct.c_void_p).value + elif isinstance(buf, bytearray) : + baseadr = ct.addressof((ct.c_ubyte * len(buf)).from_buffer(buf)) + elif isinstance(buf, array.array) and buf.typecode == "B" : + baseadr = buf.buffer_info()[0] + else : + raise TypeError("buf is not bytes, bytearray or array.array of bytes") + #end if + return \ + dbus.dbus_message_demarshal_bytes_needed(baseadr, len(buf)) + #end demarshal_bytes_needed + + @property + def interactive_authorization(self) : + return \ + dbus.dbus_message_get_interactive_authorization(self._dbobj) + #end interactive_authorization + + @interactive_authorization.setter + def interactive_authorization(self, allow) : + dbus.dbus_message_set_interactive_authorization(self._dbobj, allow) + #end interactive_authorization + +#end Message + +class PendingCall : + "wrapper around a DBusPendingCall object. This represents a pending reply" \ + " message that hasn’t been received yet. Do not instantiate directly; libdbus" \ + " creates these as the result from calling send_with_reply() on a Message." + # + + __slots__ = \ + ( + "__weakref__", + "_dbobj", + "_w_conn", + "_wrap_notify", + "_wrap_free", + "_awaiting", + ) # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, _dbobj, _conn) : + self = celf._instances.get(_dbobj) + if self == None : + self = super().__new__(celf) + self._dbobj = _dbobj + self._w_conn = weak_ref(_conn) + self._wrap_notify = None + self._wrap_free = None + self._awaiting = None + celf._instances[_dbobj] = self + else : + dbus.dbus_pending_call_unref(self._dbobj) + # lose extra reference created by caller + #end if + return \ + self + #end __new__ + + def __del__(self) : + if self._dbobj != None : + dbus.dbus_pending_call_unref(self._dbobj) + self._dbobj = None + #end if + #end __del__ + + def set_notify(self, function, user_data, free_user_data = None) : + "sets the callback for libdbus to notify you that the pending message" \ + " has become available. Note: it appears to be possible for your notifier" \ + " to be called spuriously before the message is actually available." + + w_self = weak_ref(self) + + def wrap_notify(c_pending, c_user_data) : + function(_wderef(w_self, "pending call"), user_data) + #end _wrap_notify + + def wrap_free(c_user_data) : + free_user_data(user_data) + #end _wrap_free + + #begin set_notify + if function != None : + self._wrap_notify = DBUS.PendingCallNotifyFunction(wrap_notify) + else : + self._wrap_notify = None + #end if + if free_user_data != None : + self._wrap_free = DBUS.FreeFunction(wrap_free) + else : + self._wrap_free = None + #end if + if not dbus.dbus_pending_call_set_notify(self._dbobj, self._wrap_notify, None, self._wrap_free) : + raise CallFailed("dbus_pending_call_set_notify") + #end if + #end set_notify + + def cancel(self) : + "tells libdbus you no longer care about the pending incoming message." + dbus.dbus_pending_call_cancel(self._dbobj) + if self._awaiting != None : + # This probably shouldn’t occur. Looking at the source of libdbus, + # it doesn’t keep track of any “cancelled” state for the PendingCall, + # it just detaches it from any notifications about an incoming reply. + self._awaiting.cancel() + #end if + #end cancel + + @property + def completed(self) : + "checks whether the pending message is available." + return \ + dbus.dbus_pending_call_get_completed(self._dbobj) != 0 + #end completed + + def steal_reply(self) : + "retrieves the Message, assuming it is actually available." \ + " You should check PendingCall.completed returns True first." + result = dbus.dbus_pending_call_steal_reply(self._dbobj) + if result != None : + result = Message(result) + #end if + return \ + result + #end steal_reply + + async def await_reply(self) : + "retrieves the Message. If it is not yet available, suspends the" \ + " coroutine (letting the event loop do other things) until it becomes" \ + " available. On a timeout, libdbus will construct and return an error" \ + " return message." + conn = self._w_conn() + assert conn != None, "parent Connection has gone away" + assert conn.loop != None, "no event loop on parent Connection to attach coroutine to" + if self._wrap_notify != None or self._awaiting != None : + raise asyncio.InvalidStateError("there is already a notify set on this PendingCall") + #end if + done = conn.loop.create_future() + self._awaiting = done + + def pending_done(pending, wself) : + if not done.done() : # just in case of self.cancel() being called + self = wself() + # Note it seems to be possible for callback to be triggered spuriously + if self != None and self.completed : + done.set_result(self.steal_reply()) + #end if + #end if + #end pending_done + + self.set_notify(pending_done, weak_ref(self)) + # avoid reference circularity self → pending_done → self + reply = await done + return \ + reply + #end await_reply + + def block(self) : + "blocks the current thread until the pending message has become available." + dbus.dbus_pending_call_block(self._dbobj) + #end block + + # TODO: data slots (static methods), get/set data + +#end PendingCall + +class Error : + "wrapper around a DBusError object. You can create one by calling the init method." + # + + __slots__ = ("_dbobj",) # to forestall typos + + def __init__(self) : + dbobj = DBUS.Error() + dbus.dbus_error_init(dbobj) + self._dbobj = dbobj + #end __init__ + + def __del__(self) : + if self._dbobj != None : + dbus.dbus_error_free(self._dbobj) + self._dbobj = None + #end if + #end __del__ + + @classmethod + def init(celf) : + "for consistency with other classes that don’t want caller to instantiate directly." + return \ + celf() + #end init + + def set(self, name, msg) : + "fills in the error name and message." + dbus.dbus_set_error(self._dbobj, name.encode(), b"%s", msg.encode()) + #end set + + @property + def is_set(self) : + "has the Error been filled in." + return \ + dbus.dbus_error_is_set(self._dbobj) != 0 + #end is_set + + def has_name(self, name) : + "has the Error got the specified name." + return \ + dbus.dbus_error_has_name(self._dbobj, name.encode()) != 0 + #end has_name + + @property + def name(self) : + "the name of the Error, if it has been filled in." + return \ + (lambda : None, lambda : self._dbobj.name.decode())[self._dbobj.name != None]() + #end name + + @property + def message(self) : + "the message string for the Error, if it has been filled in." + return \ + (lambda : None, lambda : self._dbobj.message.decode())[self._dbobj.message != None]() + #end message + + def raise_if_set(self) : + "raises a DBusError exception if this Error has been filled in." + if self.is_set : + raise DBusError(self.name, self.message) + #end if + #end raise_if_set + + def set_from_message(self, message) : + "fills in this Error object from message if it is an error message." \ + " Returns whether it was or not." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + return \ + dbus.dbus_set_error_from_message(self._dbobj, message._dbobj) != 0 + #end set_from_message + +#end Error + +class AddressEntries : + "wrapper for arrays of DBusAddressEntry values. Do not instantiate directly;" \ + " get from AddressEntries.parse. This object behaves like an array; you can obtain" \ + " the number of elements with len(), and use array subscripting to access the elements." + # + + __slots__ = ("__weakref__", "_dbobj", "_nrelts") # to forestall typos + + def __init__(self, _dbobj, _nrelts) : + self._dbobj = _dbobj + self._nrelts = _nrelts + #end __init__ + + def __del__(self) : + if self._dbobj != None : + dbus.dbus_address_entries_free(self._dbobj) + self._dbobj = None + #end if + #end __del__ + + class Entry : + "a single AddressEntry. Do not instantiate directly; get from AddressEntries[]." \ + " This object behaves like a dictionary in that you can use keys to get values;" \ + " however, there is no libdbus API to check what keys are present; unrecognized" \ + " keys return a value of None." + + __slots__ = ("_dbobj", "_parent", "_index") # to forestall typos + + def __init__(self, _parent, _index) : + self._dbobj = _parent._dbobj + self._parent = weak_ref(_parent) + self._index = _index + #end __init__ + + @property + def method(self) : + assert self._parent() != None, "AddressEntries object has gone" + result = dbus.dbus_address_entry_get_method(self._dbobj[self._index]) + if result != None : + result = result.decode() + #end if + return \ + result + #end method + + def get_value(self, key) : + assert self._parent() != None, "AddressEntries object has gone" + c_result = dbus.dbus_address_entry_get_value(self._dbobj[self._index], key.encode()) + if c_result != None : + result = c_result.decode() + else : + result = None + #end if + return \ + result + #end get_value + __getitem__ = get_value + + #end Entry + + @classmethod + def parse(celf, address, error = None) : + error, my_error = _get_error(error) + c_result = ct.POINTER(ct.c_void_p)() + nr_elts = ct.c_int() + if not dbus.dbus_parse_address(address.encode(), ct.byref(c_result), ct.byref(nr_elts), error._dbobj) : + c_result.contents = None + nr_elts.value = 0 + #end if + my_error.raise_if_set() + if c_result.contents != None : + result = celf(c_result, nr_elts.value) + else : + result = None + #end if + return \ + result + #end parse + + def __len__(self) : + return \ + self._nrelts + #end __len__ + + def __getitem__(self, index) : + if not isinstance(index, int) or index < 0 or index >= self._nrelts : + raise IndexError("AddressEntries[%d] out of range" % index) + #end if + return \ + type(self).Entry(self, index) + #end __getitem__ + +#end AddressEntries + +def address_escape_value(value) : + c_result = dbus.dbus_address_escape_value(value.encode()) + if c_result == None : + raise CallFailed("dbus_address_escape_value") + #end if + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result +#end address_escape_value + +def address_unescape_value(value, error = None) : + error, my_error = _get_error(error) + c_result = dbus.dbus_address_unescape_value(value.encode(), error._dbobj) + my_error.raise_if_set() + if c_result != None : + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + elif not error.is_set : + raise CallFailed("dbus_address_unescape_value") + else : + result = None + #end if + return \ + result +#end address_unescape_value + +def format_rule(rule) : + "convenience routine to allow a match rule to be expressed as either" \ + " a dict of {key : value} or the usual string \"key='value'\", automatically" \ + " converting the former to the latter." + + def escape_val(val) : + if "," in val : + if "'" in val : + out = "'" + in_quotes = True + for ch in val : + if ch == "'" : + if in_quotes : + out += "'" + in_quotes = False + #end if + out += "\\'" + else : + if not in_quotes : + out += "'" + in_quotes = True + #end if + out += ch + #end if + #end for + if in_quotes : + out += "'" + #end if + else : + out = "'" + val + "'" + #end if + else : + out = "" + for ch in val : + if ch in ("\\", "'") : + out += "\\" + #end if + out += ch + #end for + #end if + return \ + out + #end escape_val + +#begin format_rule + if isinstance(rule, str) : + pass + elif isinstance(rule, dict) : + rule = ",".join("%s=%s" % (k, escape_val(rule[k])) for k in sorted(rule)) + # sort to ensure some kind of consistent ordering, just for + # appearance’s sake + else : + raise TypeError("rule “%s” must be a dict or string" % repr(rule)) + #end if + return \ + rule +#end format_rule + +class _RuleParser : + # internal definitions for rule parsing. + + class PARSE(enum.Enum) : + EXPECT_NAME = 1 + EXPECT_UNQUOTED_VALUE = 2 + EXPECT_ESCAPED = 3 + EXPECT_QUOTED_VALUE = 4 + #end PARSE + + @classmethod + def unformat_rule(celf, rule) : + "converts a match rule string from the standard syntax to a dict of {key : value} entries." + if isinstance(rule, dict) : + pass + elif isinstance(rule, str) : + PARSE = celf.PARSE + parsed = {} + chars = iter(rule) + state = PARSE.EXPECT_NAME + curname = None + curval = None + while True : + ch = next(chars, None) + if ch == None : + if state == PARSE.EXPECT_ESCAPED : + raise SyntaxError("missing character after backslash") + elif state == PARSE.EXPECT_QUOTED_VALUE : + raise SyntaxError("missing closing apostrophe") + else : # state in (PARSE.EXPECT_NAME, PARSE.EXPECT_UNQUOTED_VALUE) + if curname != None : + if curval != None : + if curname in parsed : + raise SyntaxError("duplicated attribute “%s”" % curname) + #end if + parsed[curname] = curval + else : + raise SyntaxError("missing value for attribute “%s”" % curname) + #end if + #end if + #end if + break + #end if + if state == PARSE.EXPECT_ESCAPED : + if ch == "'" : + usech = ch + nextch = None + else : + usech = "\\" + nextch = ch + #end if + ch = usech + if curval == None : + curval = ch + else : + curval += ch + #end if + ch = nextch # None indicates already processed + state = PARSE.EXPECT_UNQUOTED_VALUE + #end if + if ch != None : + if ch == "," and state != PARSE.EXPECT_QUOTED_VALUE : + if state == PARSE.EXPECT_UNQUOTED_VALUE : + if curname in parsed : + raise SyntaxError("duplicated attribute “%s”" % curname) + #end if + if curval == None : + curval = "" + #end if + parsed[curname] = curval + curname = None + curval = None + state = PARSE.EXPECT_NAME + else : + raise SyntaxError("unexpected comma") + #end if + elif ch == "\\" and state != PARSE.EXPECT_QUOTED_VALUE : + if state == PARSE.EXPECT_UNQUOTED_VALUE : + state = PARSE.EXPECT_ESCAPED + else : + raise SyntaxError("unexpected backslash") + #end if + elif ch == "=" and state != PARSE.EXPECT_QUOTED_VALUE : + if curname == None : + raise SyntaxError("empty attribute name") + #end if + if state == PARSE.EXPECT_NAME : + state = PARSE.EXPECT_UNQUOTED_VALUE + else : + raise SyntaxError("unexpected equals sign") + #end if + elif ch == "'" : + if state == PARSE.EXPECT_UNQUOTED_VALUE : + state = PARSE.EXPECT_QUOTED_VALUE + elif state == PARSE.EXPECT_QUOTED_VALUE : + state = PARSE.EXPECT_UNQUOTED_VALUE + else : + raise SyntaxError("unexpected apostrophe") + #end if + else : + if state == PARSE.EXPECT_NAME : + if curname == None : + curname = ch + else : + curname += ch + #end if + elif state in (PARSE.EXPECT_QUOTED_VALUE, PARSE.EXPECT_UNQUOTED_VALUE) : + if curval == None : + curval = ch + else : + curval += ch + #end if + else : + raise AssertionError("shouldn’t occur: parse state %s" % repr(state)) + #end if + #end if + #end if + #end while + rule = parsed + else : + raise TypeError("rule “%s” must be a dict or string" % repr(rule)) + #end if + return \ + rule + #end unformat_rule + +#end _RuleParser + +unformat_rule = _RuleParser.unformat_rule +del _RuleParser + +def matches_rule(message, rule, destinations = None) : + "does Message message match against the specified rule." + if not isinstance(message, Message) : + raise TypeError("message must be a Message") + #end if + rule = unformat_rule(rule) + eavesdrop = rule.get("eavesdrop", "false") == "true" + + def match_message_type(expect, actual) : + return \ + actual == Message.type_from_string(expect) + #end match_message_type + + def match_path_namespace(expect, actual) : + return \ + ( + actual != None + and + ( + expect == actual + or + actual.startswith(expect) and (expect == "/" or actual[len(expect)] == "/") + ) + ) + #end match_path_namespace + + def match_dotted_namespace(expect, actual) : + return \ + ( + actual != None + and + ( + expect == actual + or + actual.startswith(expect) and actual[len(expect)] == "." + ) + ) + #end match_dotted_namespace + + def get_nth_arg(msg, n, expect_types) : + msg_signature = parse_signature(msg.signature) + if n >= len(msg_signature) : + raise IndexError("arg nr %d beyond nr args %d" % (n, len(msg_signature))) + #end if + val = msg.all_objects[n] + valtype = msg_signature[n] + if valtype not in expect_types : + if False : + raise TypeError \ + ( + "expecting one of types %s, not %s for arg %d val %s" + % + ((repr(expect_types), repr(valtype), n, repr(val))) + ) + #end if + val = None # never match + #end if + return \ + val + #end get_nth_arg + + def get_arg_0_str(message) : + return \ + get_nth_arg(message, 0, [BasicType(TYPE.STRING)]) + #end get_arg_0_str + + def match_arg_paths(expect, actual) : + return \ + ( + actual != None + and + ( + expect == actual + or + expect.endswith("/") and actual.startswith(expect) + or + actual.endswith("/") and expect.startswith(actual) + ) + ) + #end match_arg_paths + + match_types = \ + ( # note that message attribute value of None will fail to match + # any expected string value, which is exactly what we want + ("type", None, match_message_type, None), + ("sender", None, operator.eq, None), + ("interface", None, operator.eq, None), + ("member", None, operator.eq, None), + ("path", None, operator.eq, None), + ("destination", None, operator.eq, None), + ("path_namespace", "path", match_path_namespace, None), + ("arg0namespace", None, match_dotted_namespace, get_arg_0_str), + # “arg«n»path” handled specially below + ) + +#begin matches_rule + keys_used = set(rule.keys()) - {"eavesdrop"} + matches = \ + ( + eavesdrop + or + destinations == None + or + message.destination == None + or + message.destination in destinations + ) + if matches : + try_matching = iter(match_types) + while True : + try_rule = next(try_matching, None) + if try_rule == None : + break + rulekey, attrname, action, accessor = try_rule + if attrname == None : + attrname = rulekey + #end if + if rulekey in rule : + if accessor != None : + val = accessor(message) + else : + val = getattr(message, attrname) + #end if + keys_used.remove(rulekey) + if not action(rule[rulekey], val) : + matches = False + break + #end if + #end if + #end while + #end if + if matches : + try_matching = iter(rule.keys()) + while True : + try_key = next(try_matching, None) + if try_key == None : + break + if try_key.startswith("arg") and not try_key.endswith("namespace") : + argnr = try_key[3:] + is_path = argnr.endswith("path") + if is_path : + argnr = argnr[:-4] + #end if + argnr = int(argnr) + if not (0 <= argnr < 64) : + raise ValueError("argnr %d out of range" % argnr) + #end if + argval = get_nth_arg \ + ( + message, + argnr, + [BasicType(TYPE.STRING)] + ([], [BasicType(TYPE.OBJECT_PATH)])[is_path] + ) + keys_used.remove(try_key) + if not (operator.eq, match_arg_paths)[is_path](rule[try_key], argval) : + matches = False + break + #end if + #end if + #end while + #end if + if matches and len(keys_used) != 0 : + # fixme: not checking for unrecognized rule keys if I didn’t try matching them all + raise KeyError("unrecognized rule keywords: %s" % ", ".join(sorted(keys_used))) + #end if + return \ + matches +#end matches_rule + +class SignatureIter : + "wraps a DBusSignatureIter object. Do not instantiate directly; use the init" \ + " and recurse methods." + # + + __slots__ = ("_dbobj", "_signature", "_startiter") # to forestall typos + + @classmethod + def init(celf, signature) : + self = celf() + self._signature = ct.c_char_p(signature.encode()) # need to ensure storage stays valid + dbus.dbus_signature_iter_init(self._dbobj, self._signature) + return \ + self + #end init + + def __init__(self) : + self._dbobj = DBUS.SignatureIter() + self._signature = None # caller will set as necessary + self._startiter = True + #end __init__ + + def __iter__(self) : + return \ + self + #end __iter__ + + def __next__(self) : + if self._startiter : + self._startiter = False + else : + self.next() + #end if + return \ + self + #end __next__ + + def next(self) : + if dbus.dbus_signature_iter_next(self._dbobj) == 0 : + raise StopIteration("end of signature iterator") + #end if + self._startiter = False + return \ + self + #end next + + def recurse(self) : + subiter = type(self)() + dbus.dbus_signature_iter_recurse(self._dbobj, subiter._dbobj) + return \ + subiter + #end recurse + + @property + def current_type(self) : + return \ + dbus.dbus_signature_iter_get_current_type(self._dbobj) + #end current_type + + @property + def signature(self) : + c_result = dbus.dbus_signature_iter_get_signature(self._dbobj) + result = ct.cast(c_result, ct.c_char_p).value.decode() + dbus.dbus_free(c_result) + return \ + result + #end signature + + @property + def parsed_signature(self) : + return \ + parse_single_signature(self.signature) + #end parsed_signature + + @property + def element_type(self) : + return \ + dbus.dbus_signature_iter_get_element_type(self._dbobj) + #end element_type + +#end SignatureIter + +def signature_validate(signature, error = None) : + "is signature a valid sequence of zero or more complete types." + error, my_error = _get_error(error) + result = dbus.dbus_signature_validate(signature.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end signature_validate + +def parse_signature(signature) : + "convenience routine for parsing a signature string into a list of Type()" \ + " instances." + + def process_subsig(sigelt) : + elttype = sigelt.current_type + if elttype in DBUS.basic_to_ctypes : + result = BasicType(TYPE(elttype)) + elif elttype == DBUS.TYPE_ARRAY : + if sigelt.element_type == DBUS.TYPE_DICT_ENTRY : + subsig = sigelt.recurse() + subsubsig = subsig.recurse() + keytype = process_subsig(next(subsubsig)) + valuetype = process_subsig(next(subsubsig)) + result = DictType(keytype, valuetype) + else : + subsig = sigelt.recurse() + result = ArrayType(process_subsig(next(subsig))) + #end if + elif elttype == DBUS.TYPE_STRUCT : + result = [] + subsig = sigelt.recurse() + for subelt in subsig : + result.append(process_subsig(subelt)) + #end for + result = StructType(*result) + elif elttype == DBUS.TYPE_VARIANT : + result = VariantType() + else : + raise RuntimeError("unrecognized type %s" % bytes((elttype,))) + #end if + return \ + result + #end process_subsig + +#begin parse_signature + if isinstance(signature, (tuple, list)) : + if not all(isinstance(t, Type) for t in signature) : + raise TypeError("signature is list containing non-Type objects") + #end if + result = signature + elif isinstance(signature, Type) : + result = [signature] + elif isinstance(signature, str) : + signature_validate(signature) + result = [] + if len(signature) != 0 : + sigiter = SignatureIter.init(signature) + for elt in sigiter : + result.append(process_subsig(elt)) + #end for + #end if + else : + raise TypeError("signature must be list or str") + #end if + return \ + result +#end parse_signature + +def parse_single_signature(signature) : + result = parse_signature(signature) + if len(result) != 1 : + raise ValueError("only single type expected") + #end if + return \ + result[0] +#end parse_single_signature + +def unparse_signature(signature) : + "converts a signature from parsed form to string form." + signature = parse_signature(signature) + if not isinstance(signature, (tuple, list)) : + signature = [signature] + #end if + return \ + DBUS.Signature("".join(t.signature for t in signature)) +#end unparse_signature + +def signature_validate_single(signature, error = None) : + "is signature a single valid type." + error, my_error = _get_error(error) + result = dbus.dbus_signature_validate_single(signature.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end signature_validate_single + +def type_is_valid(typecode) : + return \ + dbus.dbus_type_is_valid(typecode) != 0 +#end type_is_valid + +def type_is_basic(typecode) : + return \ + dbus.dbus_type_is_basic(typecode) != 0 +#end type_is_basic + +def type_is_container(typecode) : + return \ + dbus.dbus_type_is_container(typecode) != 0 +#end type_is_container + +def type_is_fixed(typecode) : + return \ + dbus.dbus_type_is_fixed(typecode) != 0 +#end type_is_fixed + +def type_is_fixed_array_elttype(typecode) : + "is typecode suitable as the element type of a fixed_array." + return \ + type_is_fixed(typecode) and typecode != DBUS.TYPE_UNIX_FD +#end type_is_fixed_array_elttype + +# syntax validation + +def validate_path(path, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_validate_path(path.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end validate_path + +def valid_path(path) : + "returns path if valid, raising appropriate exception if not." + validate_path(path) + return \ + path +#end valid_path + +def split_path(path) : + "convenience routine for splitting a path into a list of components." + if isinstance(path, (tuple, list)) : + result = path # assume already split + elif path == "/" : + result = [] + else : + if not path.startswith("/") or path.endswith("/") : + raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path)) + #end if + result = path.split("/")[1:] + #end if + return \ + result +#end split_path + +def unsplit_path(path) : + path = split_path(path) + if len(path) != 0 : + result = DBUS.ObjectPath("".join("/" + component for component in path)) + else : + result = "/" + #end if + return \ + result +#end unsplit_path + +def validate_interface(name, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_validate_interface(name.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end validate_interface + +def valid_interface(name) : + "returns name if it is a valid interface name, raising appropriate exception if not." + validate_interface(name) + return \ + name +#end valid_interface + +def validate_member(name, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_validate_member(name.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end validate_member + +def valid_member(name) : + "returns name if it is a valid member name, raising appropriate exception if not." + validate_member(name) + return \ + name +#end valid_member + +def validate_error_name(name, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_validate_error_name(name.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end validate_error_name + +def valid_error_name(name) : + "returns name if it is a valid error name, raising appropriate exception if not." + validate_error_name(name) + return \ + name +#end valid_error_name + +def validate_bus_name(name, error = None) : + error, my_error = _get_error(error) + result = dbus.dbus_validate_bus_name(name.encode(), error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end validate_bus_name + +def valid_bus_name(name) : + "returns name if it is a valid bus name, raising appropriate exception if not." + validate_bus_name(name) + return \ + name +#end valid_bus_name + +def validate_utf8(alleged_utf8, error = None) : + "alleged_utf8 must be null-terminated bytes." + error, my_error = _get_error(error) + result = dbus.dbus_validate_utf8(alleged_utf8, error._dbobj) != 0 + my_error.raise_if_set() + return \ + result +#end validate_utf8 + +def valid_utf8(alleged_utf8) : + "returns alleged_utf8 if it is a valid utf-8 bytes value, raising" \ + " appropriate exception if not." + validate_utf8(alleged_utf8) + return \ + alleged_utf8 +#end valid_utf8 + +#+ +# Introspection representation +#- + +class _TagCommon : + + def get_annotation(self, name) : + "returns the value of the annotation with the specified name, or None" \ + " if none could be found" + annots = iter(self.annotations) + while True : + annot = next(annots, None) + if annot == None : + result = None + break + #end if + if annot.name == name : + result = annot.value + break + #end if + #end while + return \ + result + #end get_annotation + + @property + def is_deprecated(self) : + "is this interface/method/signal etc deprecated." + return \ + self.get_annotation("org.freedesktop.DBus.Deprecated") == "true" + #end is_deprecated + + def __repr__(self) : + celf = type(self) + return \ + ( + "%s(%s)" + % + ( + celf.__name__, + ", ".join + ( + "%s = %s" + % + (name, repr(getattr(self, name))) + for name in celf.__slots__ + ), + ) + ) + #end __repr__ + +#end _TagCommon + +class Introspection(_TagCommon) : + "high-level wrapper for the DBUS.INTERFACE_INTROSPECTABLE interface." + + __slots__ = ("name", "interfaces", "nodes", "annotations") + + tag_name = "node" + tag_attrs = ("name",) + tag_attrs_optional = {"name"} + + class DIRECTION(enum.Enum) : + "argument direction." + IN = "in" # client to server + OUT = "out" # server to client + #end DIRECTION + + class ACCESS(enum.Enum) : + "property access." + READ = "read" + WRITE = "write" + READWRITE = "readwrite" + #end ACCESS + + class PROP_CHANGE_NOTIFICATION(enum.Enum) : + "how/if a changed property emits a notification signal." + NEW_VALUE = "true" # notification includes new value + INVALIDATES = "invalidates" # notification does not include new value + CONST = "const" # property shouldn’t change + NONE = "false" # does not notify changes + #end PROP_CHANGE_NOTIFICATION + + class Annotation(_TagCommon) : + + __slots__ = ("name", "value") + tag_name = "annotation" + tag_attrs = ("name", "value") + tag_elts = {} + + def __init__(self, name, value) : + self.name = name + self.value = value + #end __init__ + + #end Annotation + + def _get_annotations(annotations) : + # common validation of annotations arguments. + if not all(isinstance(a, Introspection.Annotation) for a in annotations) : + raise TypeError("annotations must be Annotation instances") + #end if + return \ + annotations + #end _get_annotations + + class Interface(_TagCommon) : + + __slots__ = ("name", "methods", "signals", "properties", "annotations") + tag_name = "interface" + tag_attrs = ("name",) + + class Method(_TagCommon) : + + __slots__ = ("name", "args", "annotations") + tag_name = "method" + tag_attrs = ("name",) + + class Arg(_TagCommon) : + + __slots__ = ("name", "type", "direction", "annotations") + tag_name = "arg" + tag_attrs = ("name", "type", "direction") + tag_attrs_optional = {"name"} + tag_elts = {} + attr_convert = {} # {"direction" : Introspection.DIRECTION} assigned below + + def __init__(self, *, name = None, type, direction, annotations = ()) : + if not isinstance(direction, Introspection.DIRECTION) : + raise TypeError("direction must be an Introspection.DIRECTION.xxx enum") + #end if + self.name = name + self.type = parse_single_signature(type) + self.direction = direction + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + #end Arg + + tag_elts = {"args" : Arg} + + def __init__(self, name, args = (), annotations = ()) : + if not all(isinstance(a, self.Arg) for a in args) : + raise TypeError("args must be Arg instances") + #end if + self.name = name + self.args = list(args) + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + @property + def in_signature(self) : + return \ + list(a.type for a in self.args if a.direction == Introspection.DIRECTION.IN) + #end in_signature + + @property + def out_signature(self) : + return \ + list \ + (a.type for a in self.args if a.direction == Introspection.DIRECTION.OUT) + #end out_signature + + @property + def expect_reply(self) : + "will there be replies to this request method." + return \ + self.get_annotation("org.freedesktop.DBus.Method.NoReply") != "true" + #end expect_reply + + #end Method + + class Signal(_TagCommon) : + + __slots__ = ("name", "args", "annotations") + tag_name = "signal" + tag_attrs = ("name",) + + class Arg(_TagCommon) : + + __slots__ = ("name", "type", "direction", "annotations") + tag_name = "arg" + tag_attrs = ("name", "type", "direction") + tag_attrs_optional = {"name", "direction"} + tag_elts = {} + attr_convert = {} # {"direction" : Introspection.DIRECTION} assigned below + + def __init__(self, *, name = None, type, direction = None, annotations = ()) : + if direction != None and direction != Introspection.DIRECTION.OUT : + raise ValueError("direction can only be Introspection.DIRECTION.OUT") + #end if + self.name = name + self.type = parse_single_signature(type) + self.direction = direction + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + #end Arg + + tag_elts = {"args" : Arg} + + def __init__(self, name, args = (), annotations = ()) : + if not all(isinstance(a, self.Arg) for a in args) : + raise TypeError("args must be Arg instances") + #end if + self.name = name + self.args = list(args) + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + @property + def in_signature(self) : + return \ + list(a.type for a in self.args) + #end in_signature + + #end Signal + + class Property(_TagCommon) : + + __slots__ = ("name", "type", "access", "annotations") + tag_name = "property" + tag_attrs = ("name", "type", "access") + tag_elts = {} + attr_convert = {} # {"access" : Introspection.ACCESS} assigned below + + def __init__(self, name, type, access, annotations = ()) : + if not isinstance(access, Introspection.ACCESS) : + raise TypeError("access must be an Introspection.ACCESS.xxx enum") + #end if + self.name = name + self.type = parse_single_signature(type) + self.access = access + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + #end Property + + tag_elts = {"methods" : Method, "signals" : Signal, "properties" : Property} + + def __init__(self, name, methods = (), signals = (), properties = (), annotations = ()) : + if not all(isinstance(m, self.Method) for m in methods) : + raise TypeError("methods must be Method instances") + #end if + if not all(isinstance(s, self.Signal) for s in signals) : + raise TypeError("signals must be Signal instances") + #end if + if not all(isinstance(p, self.Property) for p in properties) : + raise TypeError("properties must be Property instances") + #end if + self.name = name + self.methods = list(methods) + self.signals = list(signals) + self.properties = list(properties) + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + @property + def methods_by_name(self) : + "returns a dict associating all the methods with their names." + return \ + dict((method.name, method) for method in self.methods) + #end methods_by_name + + @property + def signals_by_name(self) : + "returns a dict associating all the signals with their names." + return \ + dict((signal.name, signal) for signal in self.signals) + #end signals_by_name + + @property + def properties_by_name(self) : + "returns a dict associating all the properties with their names." + return \ + dict((prop.name, prop) for prop in self.properties) + #end properties_by_name + + #end Interface + Interface.Method.Arg.attr_convert["direction"] = DIRECTION + Interface.Signal.Arg.attr_convert["direction"] = lambda x : (lambda : None, lambda : Introspection.DIRECTION(x))[x != None]() + Interface.Property.attr_convert["access"] = ACCESS + + class StubInterface(_TagCommon) : + "use this as a replacement for an Interface that you don’t want" \ + " to see expanded, e.g. if it has already been seen." + + __slots__ = ("name", "annotations") + tag_name = "interface" + tag_attrs = ("name",) + tag_elts = {} + + def __init__(self, name) : + self.name = name + self.annotations = () + #end __init__ + + #end StubInterface + + class Node(_TagCommon) : + + __slots__ = ("name", "interfaces", "nodes", "annotations") + tag_name = "node" + tag_attrs = ("name",) + + def __init__(self, name, interfaces = (), nodes = (), annotations = ()) : + if not all(isinstance(i, (Introspection.Interface, Introspection.StubInterface)) for i in interfaces) : + raise TypeError("interfaces must be Interface or StubInterface instances") + #end if + if not all(isinstance(n, Introspection.Node) for n in nodes) : + raise TypeError("nodes must be Node instances") + #end if + self.name = name + self.interfaces = interfaces + self.nodes = nodes + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + @property + def interfaces_by_name(self) : + "returns a dict associating all the interfaces with their names." + return \ + dict((iface.name, iface) for iface in self.interfaces) + #end interfaces_by_name + + @property + def nodes_by_name(self) : + "returns a dict associating all the child nodes with their names." + return \ + dict((node.name, node) for node in self.nodes) + #end nodes_by_name + + #end Node + Node.tag_elts = {"interfaces" : Interface, "nodes" : Node} + + tag_elts = {"interfaces" : Interface, "nodes" : Node} + + def __init__(self, name = None, interfaces = (), nodes = (), annotations = ()) : + if not all(isinstance(i, self.Interface) for i in interfaces) : + raise TypeError("interfaces must be Interface instances") + #end if + if not all(isinstance(n, self.Node) for n in nodes) : + raise TypeError("nodes must be Node instances") + #end if + self.name = name + self.interfaces = list(interfaces) + self.nodes = list(nodes) + self.annotations = Introspection._get_annotations(annotations) + #end __init__ + + @property + def interfaces_by_name(self) : + "returns a dict associating all the interfaces with their names." + return \ + dict((iface.name, iface) for iface in self.interfaces) + #end interfaces_by_name + + @property + def nodes_by_name(self) : + "returns a dict associating all the nodes with their names." + return \ + dict((node.name, node) for node in self.nodes) + #end nodes_by_name + + @classmethod + def parse(celf, s) : + "generates an Introspection tree from the given XML string description." + + def from_string_elts(celf, attrs, tree) : + elts = dict((k, attrs[k]) for k in attrs) + child_tags = dict \ + ( + (childclass.tag_name, childclass) + for childclass in tuple(celf.tag_elts.values()) + (Introspection.Annotation,) + ) + children = [] + for child in tree : + if child.tag not in child_tags : + raise KeyError("unrecognized tag %s" % child.tag) + #end if + childclass = child_tags[child.tag] + childattrs = {} + for attrname in childclass.tag_attrs : + if hasattr(childclass, "tag_attrs_optional") and attrname in childclass.tag_attrs_optional : + childattrs[attrname] = child.attrib.get(attrname, None) + else : + if attrname not in child.attrib : + raise ValueError("missing %s attribute for %s tag" % (attrname, child.tag)) + #end if + childattrs[attrname] = child.attrib[attrname] + #end if + #end for + if hasattr(childclass, "attr_convert") : + for attr in childclass.attr_convert : + if attr in childattrs : + childattrs[attr] = childclass.attr_convert[attr](childattrs[attr]) + #end if + #end for + #end if + children.append(from_string_elts(childclass, childattrs, child)) + #end for + for child_tag, childclass in tuple(celf.tag_elts.items()) + ((), (("annotations", Introspection.Annotation),))[tree.tag != "annotation"] : + for child in children : + if isinstance(child, childclass) : + if child_tag not in elts : + elts[child_tag] = [] + #end if + elts[child_tag].append(child) + #end if + #end for + #end for + return \ + celf(**elts) + #end from_string_elts + + #begin parse + tree = XMLElementTree.fromstring(s) + assert tree.tag == "node", "root of introspection tree must be tag" + return \ + from_string_elts(Introspection, {}, tree) + #end parse + + def unparse(self, indent_step = 4, max_linelen = 72) : + "returns an XML string description of this Introspection tree." + + out = io.StringIO() + + def to_string(obj, indent) : + tag_name = obj.tag_name + attrs = [] + for attrname in obj.tag_attrs : + attr = getattr(obj, attrname) + if attr != None : + if isinstance(attr, enum.Enum) : + attr = attr.value + elif isinstance(attr, Type) : + attr = unparse_signature(attr) + elif not isinstance(attr, str) : + raise TypeError("unexpected attribute type %s for %s" % (type(attr).__name__, repr(attr))) + #end if + attrs.append("%s=%s" % (attrname, quote_xml_attr(attr))) + #end if + #end for + has_elts = \ + ( + sum + ( + len(getattr(obj, attrname)) + for attrname in + tuple(obj.tag_elts.keys()) + + + ((), ("annotations",)) + [not isinstance(obj, Introspection.Annotation)] + ) + != + 0 + ) + out.write(" " * indent + "<" + tag_name) + if ( + max_linelen != None + and + indent + + + len(tag_name) + + + sum((len(s) + 1) for s in attrs) + + + 2 + + + int(has_elts) + > + max_linelen + ) : + out.write("\n") + for attr in attrs : + out.write(" " * (indent + indent_step)) + out.write(attr) + out.write("\n") + #end for + out.write(" " * indent) + else : + for attr in attrs : + out.write(" ") + out.write(attr) + #end for + #end if + if not has_elts : + out.write("/") + #end if + out.write(">\n") + if has_elts : + for attrname in sorted(obj.tag_elts.keys()) + ["annotations"] : + for elt in getattr(obj, attrname) : + to_string(elt, indent + indent_step) + #end for + #end for + out.write(" " * indent + "\n") + #end if + #end to_string + + #begin unparse + out.write(DBUS.INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE) + out.write("\n") + for elt in self.interfaces : + to_string(elt, indent_step) + #end for + for elt in self.nodes : + to_string(elt, indent_step) + #end for + out.write("\n") + return \ + out.getvalue() + #end unparse + +#end Introspection + +del _TagCommon + +#+ +# Standard interfaces +#- + +standard_interfaces = \ + { + DBUS.INTERFACE_PEER : + # note implementation of this is hard-coded inside libdbus + Introspection.Interface + ( + name = DBUS.INTERFACE_PEER, + methods = + [ + Introspection.Interface.Method(name = "Ping"), + Introspection.Interface.Method + ( + name = "GetMachineId", + args = + [ + Introspection.Interface.Method.Arg + ( + name = "machine_uuid", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + ], + ), + DBUS.INTERFACE_LOCAL : + # note implementation of this is hard-coded inside, and specific to, libdbus + Introspection.Interface + ( + name = DBUS.INTERFACE_LOCAL, + signals = + [ + Introspection.Interface.Signal(name = "Disconnected"), + # auto-generated by libdbus with path = DBUS.PATH_LOCAL + # when connection is closed; cannot be explicitly sent by + # clients. Documented here: + # + ], + ), + DBUS.INTERFACE_DBUS : + Introspection.Interface + ( + name = DBUS.INTERFACE_DBUS, + methods = + [ + Introspection.Interface.Method + ( + name = "Hello", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.OUT, + ), # returned unique name + ] + ), + Introspection.Interface.Method + ( + name = "RequestName", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), # name + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.IN, + ), # flags DBUS.NAME_FLAG_xxx + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.OUT, + ), # result DBUS.REQUEST_NAME_REPLY_xxx + ] + ), + Introspection.Interface.Method + ( + name = "ReleaseName", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.OUT, + ), # result DBUS.RELEASE_NAME_REPLY_xxx + ] + ), + Introspection.Interface.Method + ( + name = "StartServiceByName", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), # name + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.IN, + ), # flags (currently unused) + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.OUT, + ), # result DBUS.START_REPLY_xxx + ] + ), + Introspection.Interface.Method + ( + name = "UpdateActivationEnvironment", + args = + [ + Introspection.Interface.Method.Arg + ( + type = DictType + ( + keytype = BasicType(TYPE.STRING), + valuetype = BasicType(TYPE.STRING) + ), + direction = Introspection.DIRECTION.IN, + ), # environment + ] + ), + Introspection.Interface.Method + ( + name = "NameHasOwner", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), # name + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.BOOLEAN), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "ListNames", + args = + [ + Introspection.Interface.Method.Arg + ( + type = ArrayType(BasicType(TYPE.STRING)), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "ListActivatableNames", + args = + [ + Introspection.Interface.Method.Arg + ( + type = ArrayType(BasicType(TYPE.STRING)), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "AddMatch", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + ] + ), + Introspection.Interface.Method + ( + name = "RemoveMatch", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + ] + ), + Introspection.Interface.Method + ( + name = "GetNameOwner", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "ListQueuedOwners", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = ArrayType(BasicType(TYPE.STRING)), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "GetConnectionUnixUser", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "GetConnectionUnixProcessID", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "GetAdtAuditSessionData", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = ArrayType(BasicType(TYPE.BYTE)), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "GetConnectionSELinuxSecurityContext", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = ArrayType(BasicType(TYPE.BYTE)), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "ReloadConfig", + ), + Introspection.Interface.Method + ( + name = "GetId", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + Introspection.Interface.Method + ( + name = "GetConnectionCredentials", + args = + [ + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + type = DictType(BasicType(TYPE.STRING), VariantType()), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + ], + signals = + [ + Introspection.Interface.Signal + ( + name = "NameOwnerChanged", + args = + [ + Introspection.Interface.Signal.Arg + ( + type = BasicType(TYPE.STRING), + ), # bus name + Introspection.Interface.Signal.Arg + ( + type = BasicType(TYPE.STRING), + ), # old owner, empty if none + Introspection.Interface.Signal.Arg + ( + type = BasicType(TYPE.STRING), + ), # new owner, empty if none + ] + ), + Introspection.Interface.Signal + ( + name = "NameLost", # sent to previous owner of name + args = + [ + Introspection.Interface.Signal.Arg + ( + type = BasicType(TYPE.STRING), + ), + ] + ), + Introspection.Interface.Signal + ( + name = "NameAcquired", # sent to new owner of name + args = + [ + Introspection.Interface.Signal.Arg + ( + type = BasicType(TYPE.STRING), + ), + ] + ), + ], + ), + DBUS.INTERFACE_INTROSPECTABLE : + Introspection.Interface + ( + name = DBUS.INTERFACE_INTROSPECTABLE, + methods = + [ + Introspection.Interface.Method + ( + name = "Introspect", + args = + [ + Introspection.Interface.Method.Arg + ( + name = "data", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.OUT, + ), + ] + ), + ], + ), + DBUS.INTERFACE_PROPERTIES : + Introspection.Interface + ( + name = DBUS.INTERFACE_PROPERTIES, + methods = + [ + Introspection.Interface.Method + ( + name = "Get", + args = + [ + Introspection.Interface.Method.Arg + ( + name = "interface_name", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + name = "property_name", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + name = "value", + type = VariantType(), + direction = Introspection.DIRECTION.OUT, + ), + ], + ), + Introspection.Interface.Method + ( + name = "Set", + args = + [ + Introspection.Interface.Method.Arg + ( + name = "interface_name", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + name = "property_name", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + name = "value", + type = VariantType(), + direction = Introspection.DIRECTION.IN, + ), + ], + ), + Introspection.Interface.Method + ( + name = "GetAll", + args = + [ + Introspection.Interface.Method.Arg + ( + name = "interface_name", + type = BasicType(TYPE.STRING), + direction = Introspection.DIRECTION.IN, + ), + Introspection.Interface.Method.Arg + ( + name = "values", + type = DictType(BasicType(TYPE.STRING), VariantType()), + direction = Introspection.DIRECTION.OUT, + ), + ], + ), + ], + signals = + [ + Introspection.Interface.Signal + ( + name = "PropertiesChanged", + args = + [ + Introspection.Interface.Signal.Arg + ( + name = "interface_name", + type = BasicType(TYPE.STRING), + ), + Introspection.Interface.Signal.Arg + ( + name = "changed_properties", + type = DictType(BasicType(TYPE.STRING), VariantType()), + ), + Introspection.Interface.Signal.Arg + ( + name = "invalidated_properties", + type = ArrayType(BasicType(TYPE.STRING)), + ), + ], + ), + ], + ), + DBUS.INTERFACE_MONITORING : + Introspection.Interface + ( + name = DBUS.INTERFACE_MONITORING, + methods = + [ + Introspection.Interface.Method + ( + name = "BecomeMonitor", + args = + [ + Introspection.Interface.Method.Arg + ( + type = ArrayType(BasicType(TYPE.STRING)), + direction = Introspection.DIRECTION.IN, + ), # match rules to add to the connection + Introspection.Interface.Method.Arg + ( + type = BasicType(TYPE.UINT32), + direction = Introspection.DIRECTION.IN, + ), # flags (currently unused) + ], + ), + ], + ), + DBUSX.INTERFACE_OBJECT_MANAGER : + Introspection.Interface + ( + name = DBUSX.INTERFACE_OBJECT_MANAGER, + methods = + [ + Introspection.Interface.Method + ( + name = "GetManagedObjects", + args = + [ + Introspection.Interface.Method.Arg + ( + name = "objpath_interfaces_and_properties", + type = DictType + ( + BasicType(TYPE.OBJECT_PATH), + DictType + ( + BasicType(TYPE.STRING), # interface + DictType(BasicType(TYPE.STRING), VariantType()) + # properties and values + ) + ), + direction = Introspection.DIRECTION.OUT, + ), + ], + ), + ], + signals = + [ + Introspection.Interface.Signal + ( + name = "InterfacesAdded", + args = + [ + Introspection.Interface.Signal.Arg + ( + name = "object_path", + type = BasicType(TYPE.OBJECT_PATH), + ), + Introspection.Interface.Signal.Arg + ( + name = "interfaces_and_properties", + type = DictType + ( + BasicType(TYPE.STRING), # interface added/changed + DictType(BasicType(TYPE.STRING), VariantType()) + # properties and values added + ), + ), + ], + ), + Introspection.Interface.Signal + ( + name = "InterfacesRemoved", + args = + [ + Introspection.Interface.Signal.Arg + ( + name = "object_path", + type = BasicType(TYPE.OBJECT_PATH), + ), + Introspection.Interface.Signal.Arg + ( + name = "interfaces", + type = ArrayType(BasicType(TYPE.STRING)), + # interfaces removed + ), + ], + ), + ], + ), + } + +#+ +# Cleanup +#- + +def _atexit() : + # disable all __del__ methods at process termination to avoid segfaults + for cls in Connection, Server, PreallocatedSend, Message, PendingCall, Error, AddressEntries : + delattr(cls, "__del__") + #end for +#end _atexit +atexit.register(_atexit) +del _atexit diff --git a/defaults/dbussy/ravel.py b/defaults/dbussy/ravel.py new file mode 100644 index 0000000..415eace --- /dev/null +++ b/defaults/dbussy/ravel.py @@ -0,0 +1,3723 @@ +""" +Simplified higher-level Python binding for D-Bus on top of dbussy. +Provides a framework for dispatching method and signal calls, and also +for on-the-fly invocation of method calls in the server from the +client using proxy objects, all with the option of running via an +asyncio event loop. +""" +#+ +# Copyright 2017-2020 Lawrence D'Oliveiro . +# Licensed under the GNU Lesser General Public License v2.1 or later. +#- + +import enum +from weakref import \ + ref as weak_ref, \ + WeakValueDictionary +import asyncio +import atexit +import dbussy as dbus +from dbussy import \ + DBUS, \ + DBUSX, \ + Introspection + +#+ +# High-level bus connection +#- + +class ErrorReturn(Exception) : + "Dispatch handlers can raise this to report an error that will be returned" \ + " in a message back to the other end of the connection." + + def __init__(self, name, message) : + self.args = (name, message) + #end __init__ + + def as_error(self) : + "fills in and returns an Error object that reports the specified error name and message." + result = dbus.Error.init() + result.set(self.args[0], self.args[1]) + return \ + result + #end as_error + +#end ErrorReturn + +def _signal_key(fallback, interface, name) : + # constructs a key for the signal-listener dictionary from the + # given args. + return \ + (fallback, interface, name) +#end _signal_key + +def _signal_rule(path, fallback, interface, name) : + # constructs a D-Bus match rule from the given args. + return \ + dbus.format_rule \ + ( + { + "type" : "signal", + ("path", "path_namespace")[fallback] : dbus.unsplit_path(path), + "interface" : interface, + "member" : name, + } + ) +#end _signal_rule + +class _DispatchNode : + + __slots__ = ("children", "signal_listeners") + + class _Interface : + + __slots__ = ("interface", "fallback", "listening") + + def __init__(self, interface, fallback) : + self.interface = interface + self.fallback = fallback + self.listening = set() # of match rule strings + #end __init + + #end _Interface + + def __init__(self) : + self.children = {} # dict of path component => _DispatchNode + self.signal_listeners = {} # dict of _signal_key() => list of functions + #end __init__ + + @property + def is_empty(self) : + return \ + ( + len(self.children) == 0 + and + len(self.signal_listeners) == 0 + ) + #end _is_empty + +#end _DispatchNode + +class _ClientDispatchNode(_DispatchNode) : + + __slots__ = () + + def __init__(self, bus) : + # bus arg ignored, only accepted for compatibility with _ServerDispatchNode + super().__init__() + #end __init__ + +#end _ClientDispatchNode + +class _ServerDispatchNode(_DispatchNode) : + + __slots__ = ("interfaces", "user_data") + + class _UserDataDict(dict) : + # for holding user data, does automatic cleaning up of object + # tree as items are removed. + + __slots__ = ("_ravel_bus",) + + def __init__(self, bus) : + super().__init__() + self._ravel_bus = weak_ref(bus) + #end __init + + def __delitem__(self, key) : + super().__delitem__(key) + if len(self) == 0 : + bus = self._ravel_bus() + assert bus != None, "parent Connection has gone" + bus._trim_dispatch(True) + #end if + #end __delitem__ + + #end _UserDataDict + + def __init__(self, bus) : + super().__init__() + self.interfaces = {} # dict of interface name => _Interface + self.user_data = self._UserDataDict(bus) # for caller use + #end __init__ + + @property + def is_empty(self) : + return \ + ( + super().is_empty + and + len(self.interfaces) == 0 + and + len(self.user_data) == 0 + ) + #end is_empty + +#end _ServerDispatchNode + +class _UserData : + + __slots__ = ("_w_conn",) + + def __init__(self, conn) : + self._w_conn = weak_ref(conn) + #end __init__ + + @property + def conn(self) : + result = self._w_conn() + assert result != None + return \ + result + #end conn + + def __getitem__(self, path) : + node = self.conn._get_dispatch_node(path, True, True) + return \ + node.user_data + #end __getitem__ + +#end _UserData + +class Connection(dbus.TaskKeeper) : + "higher-level wrapper around dbussy.Connection. Do not instantiate directly: use" \ + " the session_bus() and system_bus() calls in this module, or obtain from accepting" \ + " connections on a Server().\n" \ + "\n" \ + "This class provides various functions, some more suited to client-side use and" \ + " some more suitable to the server side. Allows for registering of @interface()" \ + " classes for automatic dispatching of method calls at appropriate points in" \ + " the object hierarchy." + + __slots__ = \ + ( + "connection", + "notify_delay", + "user_data", + "_direct_connect", + "bus_names_acquired", + "bus_names_pending", + "_client_dispatch", + "_server_dispatch", + "_managed_objects", + "_registered_bus_names_listeners", + "_bus_name_acquired_action", + "_bus_name_acquired_action_arg", + "_bus_name_lost_action", + "_bus_name_lost_action_arg", + "_props_changed", + "_objects_added", + "_objects_removed", + ) # to forestall typos + + _instances = WeakValueDictionary() + + def __new__(celf, connection, direct_connect) : + # always return the same Connection for the same dbus.Connection. + if not isinstance(connection, dbus.Connection) : + raise TypeError("connection must be a Connection") + #end if + self = celf._instances.get(connection) + if self == None : + self = super().__new__(celf) + super()._init(self) + self.connection = connection + self.loop = connection.loop + self.notify_delay = 0 + self._client_dispatch = None # for signal listeners + self._server_dispatch = None # for registered classes that field method calls + self._managed_objects = None + self._direct_connect = direct_connect + unique_name = connection.bus_unique_name + if direct_connect : + assert unique_name == None, "connection already registered" + self.bus_names_acquired = None + self.bus_names_pending = None + else : + assert unique_name != None, "connection not yet registered" + self.bus_names_acquired = {unique_name} + self.bus_names_pending = set() + #end if + self._registered_bus_names_listeners = False + self._props_changed = None + self._objects_added = None + self._objects_removed = None + self._bus_name_acquired_action = None + self._bus_name_acquired_action_arg = None + self._bus_name_lost_action = None + self._bus_name_lost_action_arg = None + self.user_data = _UserData(self) + celf._instances[connection] = self + for interface in \ + ( + PeerStub, + IntrospectionHandler, + PropertyHandler, + ) \ + : + self.register \ + ( + path = "/", + interface = interface(), + fallback = True + ) + #end for + else : + assert self._direct_connect == direct_connect + #end if + return \ + self + #end __new__ + + def __del__(self) : + + # Note: if remove_listeners refers directly to outer “self", + # then Connection object is not disposed immediately. Passing + # reference as explicit arg seems to fix this. + def remove_listeners(self, is_server, level, path) : + for node, child in level.children.items() : + remove_listeners(self, is_server, child, path + [node]) + #end for + if not self._direct_connect : + if is_server : + for interface in level.interfaces.values() : + for rulestr in interface.listening : + ignore = dbus.Error.init() + self.connection.bus_remove_match(rulestr, ignore) + #end for + #end for + #end if + for rulekey in level.signal_listeners : + fallback, interface, name = rulekey + ignore = dbus.Error.init() + self.connection.bus_remove_match \ + ( + _signal_rule(path, fallback, interface, name), + ignore + ) + #end for + #end if + #end remove_listeners + + #begin __del__ + if self.connection != None : + if self._server_dispatch != None : + remove_listeners(self, True, self._server_dispatch, []) + #end if + if self._client_dispatch != None : + remove_listeners(self, False, self._client_dispatch, []) + #end if + self.connection = None + #end if + #end __del__ + + def attach_asyncio(self, loop = None) : + "attaches this Connection object to an asyncio event loop. If none is" \ + " specified, the default event loop (as returned from asyncio.get_event_loop()" \ + " is used." + self.connection.attach_asyncio(loop) + self.loop = self.connection.loop + return \ + self + #end attach_asyncio + + @staticmethod + def _bus_name_acquired(conn, msg, w_self) : + # internal callback which keeps track of bus names and dispatches + # to user-specified action. + self = w_self() + assert self != None + assert not self._direct_connect, "shouldn’t be acquiring bus names on direct server connection" + bus_name = msg.expect_objects("s")[0] + self.bus_names_pending.discard(bus_name) + if bus_name not in self.bus_names_acquired : + self.bus_names_acquired.add(bus_name) + if self._bus_name_acquired_action != None : + result = self._bus_name_acquired_action(self, bus_name, self._bus_name_acquired_action_arg) + if asyncio.iscoroutine(result) : + self.create_task(result) + #end if + #end if + #end if + #end _bus_name_acquired + + @staticmethod + def _bus_name_lost(conn, msg, w_self) : + # internal callback which keeps track of bus names and dispatches + # to user-specified action. + self = w_self() + assert self != None + assert not self._direct_connect, "shouldn’t be losing bus names on direct server connection" + bus_name = msg.expect_objects("s")[0] + self.bus_names_pending.discard(bus_name) + if bus_name in self.bus_names_acquired : + self.bus_names_acquired.remove(bus_name) + if self._bus_name_lost_action != None : + result = self._bus_name_lost_action(self, bus_name, self._bus_name_lost_action_arg) + if asyncio.iscoroutine(result) : + self.create_task(result) + #end if + #end if + #end if + #end _bus_name_lost + + def set_bus_name_acquired_action(self, action, action_arg) : + "sets the action (if not None) to be called on receiving a bus-name-acquired" \ + " signal. action is invoked as\n" \ + "\n" \ + " action(conn, bus_name, action_arg)\n" \ + "\n" \ + "where conn is the Connection object and bus_name is the name." + assert not self._direct_connect, "cannot acquire bus names on direct server connection" + self._bus_name_acquired_action = action + self._bus_name_acquired_action_arg = action_arg + #end set_bus_name_acquired_action + + def set_bus_name_lost_action(self, action, action_arg) : + "sets the action (if not None) to be called on receiving a bus-name-lost" \ + " signal. action is invoked as\n" \ + "\n" \ + " action(conn, bus_name, action_arg)\n" \ + "\n" \ + "where conn is the Connection object and bus_name is the name." + assert not self._direct_connect, "cannot acquire bus names on direct server connection" + self._bus_name_lost_action = action + self._bus_name_lost_action_arg = action_arg + #end set_bus_name_lost_action + + def request_name(self, bus_name, flags) : + "registers a bus name." + assert not self._direct_connect, "cannot register bus names on direct server connection" + if not self._registered_bus_names_listeners : + self.connection.bus_add_match_action \ + ( + rule = "type=signal,interface=org.freedesktop.DBus,member=NameAcquired", + func = self._bus_name_acquired, + user_data = weak_ref(self) + ) + self.connection.bus_add_match_action \ + ( + rule = "type=signal,interface=org.freedesktop.DBus,member=NameLost", + func = self._bus_name_lost, + user_data = weak_ref(self) + ) + self._registered_bus_names_listeners = True + #end if + return \ + self.connection.bus_request_name(bus_name, flags) + #end request_name + + async def request_name_async(self, bus_name, flags, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "registers a bus name." + assert not self._direct_connect, "cannot register bus names on direct server connection" + assert self.loop != None, "no event loop to attach coroutine to" + if not self._registered_bus_names_listeners : + self._registered_bus_names_listeners = True # do first in case of reentrant call + await self.connection.bus_add_match_action_async \ + ( + rule = "type=signal,interface=org.freedesktop.DBus,member=NameAcquired", + func = self._bus_name_acquired, + user_data = weak_ref(self) + ) + await self.connection.bus_add_match_action_async \ + ( + rule = "type=signal,interface=org.freedesktop.DBus,member=NameLost", + func = self._bus_name_lost, + user_data = weak_ref(self) + ) + #end if + is_acquired = bus_name in self.bus_names_acquired + is_pending = bus_name in self.bus_names_pending + if not (is_acquired or is_pending) : + self.bus_names_pending.add(bus_name) + result = await self.connection.bus_request_name_async(bus_name, flags, error = error, timeout = timeout) + if error != None and error.is_set or result != DBUS.REQUEST_NAME_REPLY_IN_QUEUE : + self.bus_names_pending.discard(bus_name) + #end if + elif is_pending : + result = DBUS.REQUEST_NAME_REPLY_IN_QUEUE + else : + result = DBUS.REQUEST_NAME_REPLY_ALREADY_OWNER + #end if + return \ + result + #end request_name_async + + def release_name(self, bus_name) : + "releases a registered bus name." + assert not self._direct_connect, "cannot register bus names on direct server connection" + return \ + self.connection.bus_release_name(bus_name) + #end release_name + + async def release_name_async(self, bus_name, error = None, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "releases a registered bus name." + assert not self._direct_connect, "cannot register bus names on direct server connection" + assert self.loop != None, "no event loop to attach coroutine to" + return \ + await self.connection.bus_release_name_async(bus_name, error = error, timeout = timeout) + #end release_name_async + + def _trim_dispatch(self, is_server) : + # removes empty subtrees from the object tree. + + def trim_dispatch_node(level) : + to_delete = set() + for node, child in level.children.items() : + trim_dispatch_node(child) + if child.is_empty : + to_delete.add(node) + #end if + #end for + for node in to_delete : + del level.children[node] + #end for + #end trim_dispatch_node + + #begin _trim_dispatch + dispatch = (self._client_dispatch, self._server_dispatch)[is_server] + if dispatch != None : + trim_dispatch_node(dispatch) + if dispatch.is_empty : + if is_server : + self._server_dispatch = None + else : + self._client_dispatch = None + #end if + #end if + #end if + #end _trim_dispatch + + def _get_dispatch_node(self, path, is_server, create_if) : + # returns the appropriate _DispatchNode entry in the + # client or server dispatch tree (depending on is_server) for + # the specified path, or None if no such and not create_if. + if create_if : + if is_server and self._server_dispatch == None : + self._server_dispatch = _ServerDispatchNode(self) + elif not is_server and self._client_dispatch == None : + self._client_dispatch = _ClientDispatchNode(self) + #end if + #end if + level = (self._client_dispatch, self._server_dispatch)[is_server] + DispatchNode = (_ClientDispatchNode, _ServerDispatchNode)[is_server] + if level != None : + levels = iter(dbus.split_path(path)) + while True : + component = next(levels, None) + if component == None : + break # found if level != None + if component not in level.children : + if not create_if : + level = None + break + #end if + level.children[component] = DispatchNode(self) + #end if + level = level.children[component] + # search another step down the path + #end while + #end if + return \ + level + #end _get_dispatch_node + + def _remove_matches(self, dispatch) : + if not self._direct_connect : + for rulestr in dispatch.listening : + ignore = dbus.Error.init() + self.connection.bus_remove_match(rulestr, ignore) + #end for + #end if + #end _remove_matches + + def register_additional_standard(self, **kwargs) : + "registers additional standard interfaces that are not automatically" \ + " installed at Connection creation time. Currently the only one is" \ + " the object-manager interface, registered with\n" \ + "\n" \ + " «conn».register_additional_standard(managed_objects = True)\n" + for key in kwargs : + if kwargs[key] : + if key == "managed_objects" : + if self._managed_objects != None : + raise asyncio.InvalidStateError \ + ( + "object manager interface already registered" + ) + #end if + self.register \ + ( + path = "/", + interface = ManagedObjectsHandler(), + fallback = True + ) + self._managed_objects = {} + else : + raise TypeError("unrecognized argument keyword “%s”" % key) + #end if + #end if + #end for + return \ + self + #end register_additional_standard + + def register(self, path, fallback, interface, replace = True) : + "for server-side use; registers the specified instance of an @interface()" \ + " class for handling method calls on the specified path, and also on subpaths" \ + " if fallback." + if is_interface_instance(interface) : + iface_type = type(interface) + elif is_interface(interface) : + # assume can instantiate without arguments + iface_type = interface + interface = iface_type() + else : + raise TypeError("interface must be an @interface() class or instance thereof") + #end if + if self._server_dispatch == None : + self._server_dispatch = _ServerDispatchNode(self) + self.connection.add_filter(_message_interface_dispatch, weak_ref(self)) + #end if + level = self._server_dispatch + for component in dbus.split_path(path) : + if component not in level.children : + level.children[component] = _ServerDispatchNode(self) + #end if + level = level.children[component] + #end for + interface_name = iface_type._interface_name + if interface_name in level.interfaces : + entry = level.interfaces[interface_name] + existing_kind = type(entry.interface)._interface_kind + if not replace or existing_kind != iface_type._interface_kind : + raise KeyError \ + ( + "already registered an interface named “%s” of kind %s" + % + (interface_name, existing_kind) + ) + #end if + self._remove_matches(entry) + #end if + entry = _ServerDispatchNode._Interface(interface, fallback) + if iface_type._interface_kind != INTERFACE.SERVER and not self._direct_connect : + signals = iface_type._interface_signals + for name in signals : + if not iface_type._interface_signals[name]._signal_info["stub"] : + rulestr = _signal_rule(path, fallback, interface_name, name) + self.connection.bus_add_match(rulestr) + entry.listening.add(rulestr) + #end if + #end for + #end for + level.interfaces[interface_name] = entry + #end register + + def unregister(self, path, interface = None) : + "for server-side use; unregisters the specified interface class (or all" \ + " registered interface classes, if None) from handling method calls on path." + if interface != None : + if is_interface_instance(interface) : + interface = type(interface) + elif not is_interface(interface) : + raise TypeError("interface must be None or an @interface() class or instance thereof") + #end if + #end if + if self._server_dispatch != None : + level = self._server_dispatch + levels = iter(dbus.split_path(path)) + while True : + component = next(levels, None) + if component == None : + if interface != None : + interfaces = {interface._interface_name} + else : + interfaces = set(level.interfaces.keys()) + #end if + for iface_name in interfaces : + self._remove_matches(level.interfaces[iface_name]) + del level.interfaces[iface_name] + #end for + break + #end if + if component not in level.children : + break + level = level.children[component] + #end while + self._trim_dispatch(True) + #end if + #end unregister + + def listen_signal(self, path, fallback, interface, name, func) : + "for client-side use; registers a callback which will be invoked when a" \ + " signal is received for the specified path, interface and name." + if not hasattr(func, "_signal_info") : + raise TypeError("callback must have @signal() decorator applied") + #end if + signal_info = func._signal_info + entry = self._get_dispatch_node(path, False, True) + # Should I bother to check it matches a registered interface and + # defined signal therein? + # Also, should I pay any attention to signal_info["name"]? Perhaps + # default if name arg is None? + listeners = entry.signal_listeners + rulekey = _signal_key(fallback, interface, name) + if rulekey not in listeners : + if not self._direct_connect : + self.connection.bus_add_match(_signal_rule(path, fallback, interface, name)) + #end if + listeners[rulekey] = [] + #end if + listeners[rulekey].append(func) + #end listen_signal + + def unlisten_signal(self, path, fallback, interface, name, func) : + "for client-side use; unregisters a previously-registered callback" \ + " which would have been invoked when a signal is received for the" \ + " specified path, interface and name." + entry = self._get_dispatch_node(path, False, False) + if entry != None : + signal_listeners = entry.signal_listeners + rulekey = _signal_key(fallback, interface, name) + if rulekey in signal_listeners : + listeners = signal_listeners[rulekey] + try : + listeners.pop(listeners.index(func)) + except ValueError : + pass + #end try + if len(listeners) == 0 : + if not self._direct_connect : + ignore = dbus.Error.init() + self.connection.bus_remove_match \ + ( + _signal_rule(path, fallback, interface, name), + ignore + ) + #end if + del signal_listeners[rulekey] + # as a note to myself that I will need to call bus_add_match + # if a new listener is added + #end if + #end if + self._trim_dispatch(False) + #end if + #end unlisten_signal + + def listen_propchanged(self, path, fallback, interface, func) : + "special case of Connection.listen_signal specifically for listening" \ + " for properties-changed signals. The interface is ignored for now;" \ + " your listener will have to check for matches on this itself." + self.listen_signal \ + ( + path = path, + fallback = fallback, + interface = DBUS.INTERFACE_PROPERTIES, + name = "PropertiesChanged", + func = func, + ) + #end listen_propchanged + + def unlisten_propchanged(self, path, fallback, interface, func) : + "special case of Connection.unlisten_signal specifically for listening" \ + " for properties-changed signals. The interface is ignored for now;" \ + " your listener has to check for matches on this itself." + self.unlisten_signal \ + ( + path = path, + fallback = fallback, + interface = DBUS.INTERFACE_PROPERTIES, + name = "PropertiesChanged", + func = func, + ) + #end unlisten_propchanged + + def listen_objects_added(self, func) : + self.listen_signal \ + ( + path = "/", + fallback = True, + interface = DBUSX.INTERFACE_OBJECT_MANAGER, + name = "InterfacesAdded", + func = func, + ) + #end listen_objects_added + + def unlisten_objects_added(self, func) : + self.unlisten_signal \ + ( + path = "/", + fallback = True, + interface = DBUSX.INTERFACE_OBJECT_MANAGER, + name = "InterfacesAdded", + func = func, + ) + #end unlisten_objects_added + + def listen_objects_removed(self, func) : + self.listen_signal \ + ( + path = "/", + fallback = True, + interface = DBUSX.INTERFACE_OBJECT_MANAGER, + name = "InterfacesRemoved", + func = func, + ) + #end listen_objects_removed + + def unlisten_objects_removed(self, func) : + self.unlisten_signal \ + ( + path = "/", + fallback = True, + interface = DBUSX.INTERFACE_OBJECT_MANAGER, + name = "InterfacesRemoved", + func = func, + ) + #end unlisten_objects_removed + + def get_dispatch_interface(self, path, interface_name) : + "returns the appropriate instance of a previously-registered interface" \ + " class for handling calls to the specified interface name for the" \ + " specified object path, or None if no such." + fallback = None # to begin with + level = self._server_dispatch + if level != None : + levels = iter(dbus.split_path(path)) + while True : + component = next(levels, None) + if ( + interface_name in level.interfaces + and + (level.interfaces[interface_name].fallback or component == None) + ) : + iface = level.interfaces[interface_name].interface + else : + iface = fallback + #end if + if ( + component == None + # reached bottom of path + or + component not in level.children + # no handlers to be found further down path + ) : + break + #end if + fallback = iface + level = level.children[component] + # search another step down the path + #end while + else : + iface = None + #end if + return \ + iface + #end get_dispatch_interface + + def _get_iface_entry(self, path, interface, name, namekind) : + iface = self.get_dispatch_interface(path, interface) + if iface == None : + raise TypeError \ + ( + "no suitable interface %s for object %s" % (interface, dbus.unsplit_path(path)) + ) + #end if + iface_type = type(iface) + if iface_type._interface_kind == (INTERFACE.SERVER, INTERFACE.CLIENT)[namekind == "signal"] : + raise TypeError \ + ( + "cannot send %s call from %s side" + % + (("method", "server"), ("signal", "client"))[namekind == "signal"] + ) + #end if + lookup = getattr \ + ( + iface_type, + { + "method" : "_interface_methods", + "signal" : "_interface_signals", + "property" : "_interface_props", + }[namekind] + ) + if name not in lookup : + raise KeyError \ + ( + "name “%s” is not a %s of interface “%s”" % (name, namekind, interface) + ) + #end if + return \ + lookup[name] + #end _get_iface_entry + + def _notify_props_changed(self) : + # callback that is queued on the event loop to actually send the + # properties-changed notification signals. + if self._props_changed != None : + done = set() + now = self.loop.time() + for key in self._props_changed : + entry = self._props_changed[key] + path, interface = key + if entry["at"] <= now : + self.send_signal \ + ( + path = path, + interface = DBUS.INTERFACE_PROPERTIES, + name = "PropertiesChanged", + args = (interface, entry["changed"], sorted(entry["invalidated"])) + ) + done.add(key) + #end if + #end for + for key in done : + del self._props_changed[key] + #end for + if len(self._props_changed) == 0 : + # all done for now + self._props_changed = None # indicates I am not pending to be called any more + else : + # another notification waiting to be sent later + next_time = min(entry["at"] for entry in self._props_changed.values()) + self.loop.call_at(next_time, self._notify_props_changed) + #end if + #end if + #end _notify_props_changed + + def _get_all_my_props(self, message, path, interface_name) : + # utility wrapper that retrieves all property values for the specified + # object path defined by the specified interface. Returns two results: + # property values, and list of (propname, coroutine) tuples for async + # propgetters. Could raise ErrorReturn if a propgetter does so. + dispatch = self.get_dispatch_interface(path, interface_name) + props = type(dispatch)._interface_props + propnames = iter(props.keys()) + properror = None + propvalues = {} + to_await = [] + while True : + propname = next(propnames, None) + if propname == None : + break + propentry = props[propname] + if "getter" in propentry : + getter = getattr(dispatch, propentry["getter"].__name__) + kwargs = {} + for keyword_keyword, value in \ + ( + ("name_keyword", lambda : propname), + ("connection_keyword", lambda : self.connection), + ("message_keyword", lambda : message), + ("path_keyword", lambda : path), + ("bus_keyword", lambda : bus), + ) \ + : + if getter._propgetter_info[keyword_keyword] != None : + value = value() + if value == None : + raise ValueError \ + ( + "getter for prop “%s” expects a %s arg but" + " no value supplied for that" + % + (propname, keyword_keyword) + ) + #end if + kwargs[getter._propgetter_info[keyword_keyword]] = value + #end if + #end for + propvalue = getter(**kwargs) + # could raise ErrorReturn + if asyncio.iscoroutine(propvalue) : + if self.loop == None : + raise TypeError \ + ( + "not expecting getter for prop “%s” to be coroutine" % propname + ) + #end if + to_await.append((propname, propvalue)) + #end if + propvalues[propname] = (propentry["type"], propvalue) + #end if + #end for + return \ + propvalues, to_await + #end _get_all_my_props + + def prop_changed(self, path, interface, propname, proptype, propvalue) : + "indicates that a signal should be sent notifying of a change to the specified" \ + " property of the specified object path in the specified interface. propvalue" \ + " is either the new value to be included in the signal, or None to indicate" \ + " that the property has merely become invalidated, and its new value needs" \ + " to be obtained explicitly.\n" \ + "\n" \ + "If there is an event loop attached, then multiple calls to this with different" \ + " properties on the same path and interface can be batched up into a single" \ + " signal notification." + assert (proptype != None) == (propvalue != None), \ + "either specify both of proptype and propvalue, or neither" + if self.loop != None : + queue_task = False + if self._props_changed == None : + self._props_changed = {} + queue_task = True + #end if + key = (dbus.unsplit_path(path), interface) + if key not in self._props_changed : + self._props_changed[key] = \ + { + "at" : self.loop.time() + self.notify_delay, + "changed" : {}, + "invalidated" : set(), + } + #end if + if propvalue != None : + self._props_changed[key]["changed"][propname] = (proptype, propvalue) + else : + self._props_changed[key]["invalidated"].add(propname) + #end if + if queue_task : + if self.notify_delay != 0 : + self.loop.call_later(self.notify_delay, self._notify_props_changed) + else : + self.loop.call_soon(self._notify_props_changed) + #end if + #end if + else : + # cannot batch them up--send message immediately + changed = {} + invalidated = [] + if propvalue != None : + changed[propname] = (proptype, propvalue) + else : + invalidated.append(propname) + #end if + self.send_signal \ + ( + path = path, + interface = DBUS.INTERFACE_PROPERTIES, + name = "PropertiesChanged", + args = (interface, changed, invalidated) + ) + #end if + #end prop_changed + + def _notify_objects_added(self) : + # callback that is queued on the event loop to actually send the + # objects-added notification signals. + if self._objects_added != None : + notify_again = None + if self.loop != None : + now = self.loop.time() + else : + now = None + #end if + paths_to_delete = set() + for path in sorted(self._objects_added.keys()) : + entry = self._objects_added[path] + added = {} + for interface, iface_entry in entry.items() : + when = iface_entry["at"] + if when == None or when <= now : + added[interface] = iface_entry["props"] + else : + if notify_again == None : + notify_again = when + else : + notify_again = min(notify_again, when) + #end if + #end if + #end for + if len(added) != 0 : + self.send_signal \ + ( + path = path, + interface = DBUSX.INTERFACE_OBJECT_MANAGER, + name = "InterfacesAdded", + args = (path, added) + ) + for interface in added : + del entry[interface] + #end for + #end if + if len(entry) == 0 : + paths_to_delete.add(path) + #end if + #end for + for path in paths_to_delete : + del self._objects_added[path] + #end for + if len(self._objects_added) == 0 : + self._objects_added = None + #end if + if notify_again != None : + self.loop.call_later(notify_again - now, self._notify_objects_added) + #end if + #end if + #end _notify_objects_added + + def _notify_objects_removed(self) : + # callback that is queued on the event loop to actually send the + # objects-removed notification signals. + if self._objects_removed != None : + notify_again = None + if self.loop != None : + now = self.loop.time() + else : + now = None + #end if + paths_to_delete = set() + for path in sorted(self._objects_removed.keys()) : + entry = self._objects_removed[path] + removed = set() + for interface in entry : + when = entry[interface]["at"] + if when == None or when <= now : + removed.add(interface) + else : + if notify_again == None : + notify_again = when + else : + notify_again = min(notify_again, when) + #end if + #end if + #end for + if len(removed) != 0 : + self.send_signal \ + ( + path = path, + interface = DBUSX.INTERFACE_OBJECT_MANAGER, + name = "InterfacesRemoved", + args = (path, sorted(removed)) + ) + for interface in removed : + del entry[interface] + #end for + #end if + if len(entry) == 0 : + paths_to_delete.add(path) + #end if + #end for + for path in paths_to_delete : + del self._objects_removed[path] + #end for + if len(self._objects_removed) == 0 : + self._objects_removed = None + #end if + if notify_again != None : + self.loop.call_later(notify_again - now, self._notify_objects_removed) + #end if + #end if + #end _notify_objects_removed + + def find_interfaces_for_object(self, path) : + "returns a dict of interfaces, keyed by name, applicable to the" \ + " given object path." + level = self._server_dispatch + result = {} + if level != None : + levels = iter(dbus.split_path(path)) + while True : + component = next(levels, None) + for interface_name, interface in level.interfaces.items() : + if component == None or interface.fallback : + result[interface_name] = interface.interface + # Note that a fallback entry might be replaced + # by a more specific one further down the path. + #end if + #end for + if ( + component == None + # reached bottom of path + or + component not in level.children + # no handlers to be found further down path + ) : + break + #end if + level = level.children[component] + # search another step down the path + #end while + #end if + return \ + result + #end find_interfaces_for_object + + def object_added(self, path, interfaces_and_props = None) : + "Call this to send an ObjectManager notification about the addition of" \ + " the specified interfaces and property values to the specified object" \ + " path. The ObjectManager interface must already have been registered on" \ + " this Connection, by calling" \ + " «conn».register_additional_standard(managed_objects = True)." + + added_entry = None + to_await = [] + queue_task = False + notify_when = None + + def queue_notify(deferred) : + if queue_task : + if deferred : + delay = notify_when - self.loop.time() + else : + delay = self.notify_delay + #end if + if delay > 0 : + self.loop.call_later(delay, self._notify_objects_added) + else : + self.loop.call_soon(self._notify_objects_added) + #end if + #end if + #end queue_notify + + async def await_propvalues() : + nonlocal queue_task + for path, interface_name, propname, fute in to_await : + propvalue = await fute # don’t trap ErrorReturn + propvalues = added_entry[interface_name]["props"] + propvalues[propname] = (propvalues[propname][0], propvalue) + #end for + if self._objects_added == None : # might have happened in meantime + self._objects_added = {} + queue_task = True + #end if + self._objects_added[path] = added_entry # all prop values now complete + queue_notify(True) + #end await_propvalues + + #begin object_added + path = dbus.unsplit_path(path) + if self._managed_objects == None : + raise RuntimeError("ObjectManager interface needs to be registered on this Connection") + #end if + if interfaces_and_props == None : + # get all applicable interface names, props will be retrieved below + intfs = self.find_interfaces_for_object(path) + interfaces_and_props = dict \ + ( + (iface_name, None) + for iface_name in intfs + if len(intfs[iface_name]._interface_props) != 0 + ) + #end if + if path in self._managed_objects : + obj_entry = self._managed_objects[path] + else : + obj_entry = set() + self._managed_objects[path] = obj_entry + #end if + if self._objects_added == None : + self._objects_added = {} + queue_task = True + #end if + if path in self._objects_added : + added_entry = self._objects_added[path] + else : + added_entry = {} + #end if + if self.loop != None : + notify_when = self.loop.time() + self.notify_delay + else : + notify_when = None + #end if + for interface, props in interfaces_and_props.items() : + if props != None : + added_props = {} + for propname, propvalue in props.items() : + if not isinstance(propvalue, (list, tuple)) or len(propvalue) != 2 : + raise TypeError \ + ( + "value for property “%s” must be (type, value) pair" % propname + ) + #end if + proptype = dbus.parse_single_signature(propvalue[0]) + propvalue = proptype.validate(propvalue[1]) + proptype = dbus.unparse_signature(proptype) + added_props[propname] = (proptype, propvalue) + #end for + else : + added_props, await_props = self._get_all_my_props(None, path, interface) + # propgetters should not expect a message arg + for propname, propvalue in await_props : + to_await.append((path, interface, propname, propvalue)) + #end for + #end if + added_entry[interface] = \ + { + "at" : notify_when, + "props" : added_props, + } + obj_entry.add(interface) + #end for + if len(to_await) == 0 : + self._objects_added[path] = added_entry # all prop values complete + #end if + if self.loop != None : + if len(to_await) != 0 : + self.create_task(await_propvalues()) + else : + queue_notify(False) + #end if + else : + # cannot queue, notify immediately + self._notify_objects_added() + #end if + #end object_added + + def object_removed(self, path, interfaces = None) : + "Call this to send an ObjectManager notification about the removal of the" \ + " specified set/sequence of interfaces from the specified object path. The" \ + " ObjectManager interface must already have been registered on this Connection," \ + " by calling «conn».register_additional_standard(managed_objects = True)." + path = dbus.unsplit_path(path) + if self._managed_objects == None : + raise RuntimeError("ObjectManager interface not registered on this Connection") + #end if + queue_task = False + if self._objects_removed == None : + self._objects_removed = {} + queue_task = True + #end if + if self._objects_added != None : + added_entry = self._objects_added.get(path) + else : + added_entry = None + #end if + obj_entry = self._managed_objects[path] + if path in self._objects_removed : + removed_entry = self._objects_removed[path] + else : + removed_entry = {} + self._objects_removed[path] = removed_entry + #end if + if self.loop != None : + when = self.loop.time() + self.notify_delay + else : + when = None + #end if + if interfaces == None : + intfs = self.find_interfaces_for_object(path) + interfaces = set \ + ( + iface_name + for iface_name in intfs + if len(intfs[iface_name]._interface_props) != 0 + ) + #end if + for interface in interfaces : + if added_entry != None and interface in added_entry : + # object-added notification was never sent, just cancel + # it and don’t send object-removed notification + del added_entry[interface] + else : + removed_entry[interface] = {"at" : when} + #end if + if self._props_changed != None : + props_key = (path, interface) + if self._props_changed != None and props_key in self._props_changed : + # cancel pending properties-changed notification + del self._props_changed[key] + if len(self._props_changed) == 0 : + self._props_changed = None + #end if + #end if + #end if + obj_entry.remove(interface) + #end for + if len(obj_entry) == 0 : + del self._managed_objects[path] + #end if + if added_entry != None and len(added_entry) == 0 : + del self._objects_added[path] + if len(self._objects_added) == 0 : + self._objects_added = None + #end if + #end if + if len(removed_entry) != 0 : + if self.loop != None : + if queue_task : + if self.notify_delay != 0 : + self.loop.call_later(self.notify_delay, self._notify_objects_removed) + else : + self.loop.call_soon(self._notify_objects_removed) + #end if + #end if + else : + # cannot queue, notify immediately + self._notify_objects_removed() + #end if + #end if + #end object_removed + + def send_signal(self, *, path, interface, name, args) : + "intended for server-side use: sends a signal with the specified" \ + " interface and name from the specified object path. There must" \ + " already be a registered interface instance with that name which" \ + " defines that signal for that path." + call_info = self._get_iface_entry(path, interface, name, "signal")._signal_info + message = dbus.Message.new_signal \ + ( + path = dbus.unsplit_path(path), + iface = interface, + name = name + ) + message.append_objects(call_info["in_signature"], *args) + self.connection.send(message) + #end send_signal + + def introspect(self, destination, path, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path," \ + " and returns the resulting parsed Introspection structure." + message = dbus.Message.new_method_call \ + ( + destination = destination, + path = dbus.unsplit_path(path), + iface = DBUS.INTERFACE_INTROSPECTABLE, + method = "Introspect" + ) + reply = self.connection.send_with_reply_and_block(message, timeout) + return \ + Introspection.parse(reply.expect_return_objects("s")[0]) + #end introspect + + async def introspect_async(self, destination, path, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path," \ + " and returns the resulting parsed Introspection structure." + assert self.loop != None, "no event loop to attach coroutine to" + message = dbus.Message.new_method_call \ + ( + destination = destination, + path = dbus.unsplit_path(path), + iface = DBUS.INTERFACE_INTROSPECTABLE, + method = "Introspect" + ) + reply = await self.connection.send_await_reply(message, timeout) + return \ + Introspection.parse(reply.expect_return_objects("s")[0]) + #end introspect_async + + def __getitem__(self, bus_name) : + "for client-side use; lets you obtain references to bus peers by" \ + " looking up their names in this Connection object as though it" \ + " were a mapping." + return \ + BusPeer(self, bus_name) + #end __getitem__ + + def get_proxy_object(self, bus_name, path) : + "for client-side use; returns a BusPeer.Object instance for communicating" \ + " with a specified server object. You can then call get_interface" \ + " on the result to create an interface object that can be used to" \ + " call any method defined on the server by that interface." + return \ + BusPeer(self, bus_name)[path] + #end get_proxy_object + + def get_proxy_interface(self, destination, path, interface, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path" \ + " (if interface is not an Interface object or the name of one of the standard" \ + " interfaces), and generates a client-side proxy interface for that interface." + if isinstance(interface, Introspection.Interface) : + definition = interface + interface = definition.name + elif isinstance(interface, str) : + if interface in dbus.standard_interfaces : + definition = dbus.standard_interfaces[interface] + else : + introspection = self.introspect(destination, path, timeout) + interfaces = introspection.interfaces_by_name + if interface not in interfaces : + raise dbus.DBusError \ + ( + DBUS.ERROR_UNKNOWN_INTERFACE, + "peer “%s” object “%s” does not understand interface “%s”" + % + (destination, path, interface) + ) + #end if + definition = interfaces[interface] + #end if + else : + raise TypeError("interface must be an Interface or name of one") + #end if + return \ + def_proxy_interface \ + ( + name = interface, + kind = INTERFACE.CLIENT, + introspected = definition, + is_async = False + ) + #end get_proxy_interface + + async def get_proxy_interface_async(self, destination, path, interface, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path" \ + " (if interface is not an Interface object or the name of one of the standard" \ + " interfaces), and generates a client-side proxy interface for that interface." + assert self.loop != None, "no event loop to attach coroutine to" + if isinstance(interface, Introspection.Interface) : + definition = interface + interface = definition.name + elif isinstance(interface, str) : + if interface in dbus.standard_interfaces : + definition = dbus.standard_interfaces[interface] + else : + introspection = await self.introspect_async(destination, path, timeout) + interfaces = introspection.interfaces_by_name + if interface not in interfaces : + raise dbus.DBusError \ + ( + DBUS.ERROR_UNKNOWN_INTERFACE, + "peer “%s” object “%s” does not understand interface “%s”" + % + (destination, path, interface) + ) + #end if + definition = interfaces[interface] + #end if + else : + raise TypeError("interface must be an Interface or name of one") + #end if + return \ + def_proxy_interface \ + ( + name = interface, + kind = INTERFACE.CLIENT, + introspected = definition, + is_async = True + ) + #end get_proxy_interface_async + +#end Connection + +def session_bus(**kwargs) : + "returns a Connection object for the current D-Bus session bus." + return \ + Connection(dbus.Connection.bus_get(DBUS.BUS_SESSION, private = False), False) \ + .register_additional_standard(**kwargs) +#end session_bus + +def system_bus(**kwargs) : + "returns a Connection object for the D-Bus system bus." + return \ + Connection(dbus.Connection.bus_get(DBUS.BUS_SYSTEM, private = False), False) \ + .register_additional_standard(**kwargs) +#end system_bus + +def starter_bus(**kwargs) : + "returns a Connection object for the D-Bus starter bus." + return \ + Connection(dbus.Connection.bus_get(DBUS.BUS_STARTER, private = False), False) \ + .register_additional_standard(**kwargs) +#end starter_bus + +async def session_bus_async(loop = None, **kwargs) : + "returns a Connection object for the current D-Bus session bus." + return \ + Connection \ + ( + await dbus.Connection.bus_get_async(DBUS.BUS_SESSION, private = False, loop = loop), + False + ) \ + .register_additional_standard(**kwargs) +#end session_bus_async + +async def system_bus_async(loop = None, **kwargs) : + "returns a Connection object for the D-Bus system bus." + return \ + Connection \ + ( + await dbus.Connection.bus_get_async(DBUS.BUS_SYSTEM, private = False, loop = loop), + False + ) \ + .register_additional_standard(**kwargs) +#end system_bus_async + +async def starter_bus_async(loop = None, **kwargs) : + "returns a Connection object for the D-Bus starter bus." + return \ + Connection \ + ( + await dbus.Connection.bus_get_async(DBUS.BUS_STARTER, private = False, loop = loop), + False + ) \ + .register_additional_standard(**kwargs) +#end starter_bus_async + +def connect_server(address, private, **kwargs) : + "opens a connection to a server at the specified network address and" \ + " returns a Connection object for the connection." + return \ + Connection(dbus.Connection.open(address, private = private), True) \ + .register_additional_standard(**kwargs) +#end connect_server + +async def connect_server_async(address, private, loop = None, timeout = DBUS.TIMEOUT_INFINITE, **kwargs) : + "opens a connection to a server at the specified network address and" \ + " returns a Connection object for the connection." + return \ + Connection \ + ( + await dbus.Connection.open_async(address, private = private, loop = loop, timeout = timeout), + True + ) \ + .register_additional_standard(**kwargs) +#end connect_server_async + +class Server : + "listens for connections on a particular socket address, separate from" \ + " the D-Bus daemon. Requires asyncio." + + __slots__ = ("server",) + + def __init__(self, address, loop = None) : + self.server = dbus.Server.listen(address) + self.server.attach_asyncio(loop) + #end __init__ + + def __del__(self) : + self.server.disconnect() + #end __del__ + + async def await_new_connection(self, timeout = DBUS.TIMEOUT_INFINITE) : + "waits for a new connection attempt and returns a wrapping Connection object." \ + " If no connection appears within the specified timeout, returns None." + conn = await self.server.await_new_connection(timeout) + if conn != None : + result = Connection(conn, True) + else : + result = None + #end if + return \ + result + #end await_new_connection + +#end Server + +#+ +# Proxy interface objects -- for client-side use +#- + +class BusPeer : + "Intended for client-side use: a proxy for a D-Bus peer. These offer two" \ + " different ways to traverse the bus-name/path/interface hierarchy. Start" \ + " by obtaining a BusPeer object from a Connection:\n" \ + "\n" \ + " peer = conn[«bus_name»]\n" \ + "\n" \ + "Now you can either reference a proxy object by specifying its path:\n" \ + "\n" \ + " obj = peer[«object_path»]\n" \ + "\n" \ + "from which you can get a proxy interface thus:\n" \ + "\n" \ + " iface = obj.get_interface(«iface_name»)\n" \ + "\n" \ + "Or you can get a root proxy interface from the bus peer by" \ + " introspecting some arbitrary (but suitable) reference path:\n" \ + "\n" \ + " iface_root = peer.get_interface(«reference_path», «iface_name»)\n" \ + "\n" \ + "from which you can obtain a proxy interface for a specific object thus:\n" \ + "\n" \ + " iface = iface_root[«object_path»]\n" \ + "\n" \ + "Whichever way you do it, you can now make method calls on the iface object" \ + " which will automatically be communicated as method calls to the peer via" \ + " the D-Bus." + + __slots__ = ("conn", "bus_name") + + class RootProxy : + "abstract base class for identifying root proxy classes." + pass + #end RootProxy + + class Object : + "identifies a proxy object by a bus, a bus name and a path." + + __slots__ = ("conn", "peer", "bus_name", "path") + + class ProxyInterface : + "abstract base class for identifying proxy interface classes." + pass + #end ProxyInterface + + def __init__(self, conn, bus_name, path) : + if not isinstance(conn, Connection) : + raise TypeError("conn must be a Connection") + #end if + dbus.validate_bus_name(bus_name) + self.conn = conn + self.bus_name = bus_name + self.path = path + #end __init__ + + def introspect(self, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path," \ + " and returns the resulting parsed Introspection structure." + return \ + self.conn.introspect \ + ( + destination = self.bus_name, + path = self.path, + timeout = timeout, + ) + #end introspect + + async def introspect_async(self, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path," \ + " and returns the resulting parsed Introspection structure." + return await \ + self.conn.introspect_async \ + ( + destination = self.bus_name, + path = self.path, + timeout = timeout, + ) + #end introspect_async + + def get_interface(self, interface, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path" \ + " (if interface is not one of the standard interfaces), and generates" \ + " a client-side proxy interface for the interface with the specified name." + return \ + self.conn.get_proxy_interface \ + ( + destination = self.bus_name, + path = self.path, + interface = interface, + timeout = timeout + )( + connection = self.conn.connection, + dest = self.bus_name, + timeout = timeout, + )[self.path] + #end get_interface + + async def get_async_interface(self, interface, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path" \ + " (if interface is not one of the standard interfaces), and generates" \ + " a client-side proxy interface for the interface with the specified name." + return \ + (await self.conn.get_proxy_interface_async + ( + destination = self.bus_name, + path = self.path, + interface = interface, + timeout = timeout + ))( + connection = self.conn.connection, + dest = self.bus_name, + timeout = timeout, + )[self.path] + #end get_async_interface + + #end Object + + def __init__(self, conn, bus_name) : + self.conn = conn + self.bus_name = bus_name + #end __init__ + + def __getitem__(self, path) : + "lets you obtain references to objects implemented by this bus peer" \ + " by using their paths as lookup keys in a mapping. Of course, there" \ + " is no guarantee such an object actually exists within the peer." + return \ + type(self).Object(self.conn, self.bus_name, path) + #end __getitem__ + + def introspect(self, path, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path," \ + " and returns the resulting parsed Introspection structure." + return \ + self.conn.introspect \ + ( + destination = self.bus_name, + path = path, + timeout = timeout, + ) + #end introspect + + async def introspect_async(self, path, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path," \ + " and returns the resulting parsed Introspection structure." + return await \ + self.conn.introspect_async \ + ( + destination = self.bus_name, + path = path, + timeout = timeout, + ) + #end introspect_async + + def get_interface(self, path, interface, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path" \ + " (if interface is not one of the standard interfaces), and generates" \ + " a client-side proxy interface for the interface with the specified name." + return \ + self.conn.get_proxy_interface \ + ( + destination = self.bus_name, + path = path, + interface = interface, + timeout = timeout, + )( + connection = self.conn.connection, + dest = self.bus_name, + timeout = timeout, + ) + #end get_interface + + async def get_interface_async(self, path, interface, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + "sends an Introspect request to the specified bus name and object path" \ + " (if interface is not one of the standard interfaces), and generates" \ + " a client-side proxy interface for the interface with the specified name." + return \ + (await self.conn.get_proxy_interface_async + ( + destination = self.bus_name, + path = path, + interface = interface, + timeout = timeout, + ))( + connection = self.conn.connection, + dest = self.bus_name, + timeout = timeout, + ) + #end get_interface_async + +#end BusPeer + +#+ +# Interface-dispatch mechanism +#- + +class INTERFACE(enum.Enum) : + "what kind of @interface() is this:\n" \ + " * CLIENT -- client-side, for sending method calls to server and" \ + " receiving signals from server\n" \ + " * SERVER -- server-side, for receiving method calls from clients and" \ + " sending signals to clients\n" \ + " * CLIENT_AND_SERVER -- this side is both client and server." + + CLIENT = 1 + SERVER = 2 + CLIENT_AND_SERVER = 3 # CLIENT | SERVER +#end INTERFACE + +def _send_method_return(connection, message, sig, args) : + reply = message.new_method_return() + reply.append_objects(sig, *args) + connection.send(reply) +#end _send_method_return + +def _message_interface_dispatch(connection, message, w_bus) : + # installed as message filter on a connection to handle dispatch + # to registered @interface() classes. + + bus = w_bus() + assert bus != None, "parent Connection has gone" + + def dispatch_signal(level, path) : + # Note I ignore handled/not handled status and pass the signal + # to all registered handlers. + if len(path) != 0 and path[0] in level.children : + # call lower-level (more-specific) signal handlers first, + # not that it’s important + dispatch_signal(level.children[path[0]], path[1:]) + #end if + signal_listeners = level.signal_listeners + name = message.member + for fallback in (False, True) : + # again, call more-specific (fallback = False) handlers first, + # not that it’s important + rulekey = _signal_key(fallback, message.interface, name) + if rulekey in signal_listeners and (len(path) == 0 or fallback) : + funcs = signal_listeners[rulekey] + for func in funcs : + try : + call_info = func._signal_info + try : + args = message.expect_objects(call_info["in_signature"]) + except (TypeError, ValueError) : + raise ErrorReturn \ + ( + name = DBUS.ERROR_INVALID_ARGS, + message = "message arguments do not match expected signature" + ) + #end try + kwargs = {} + for keyword_keyword, value in \ + ( + ("connection_keyword", lambda : connection), + ("message_keyword", lambda : message), + ("path_keyword", lambda : message.path), + ("bus_keyword", lambda : bus), + ) \ + : + if call_info[keyword_keyword] != None : + kwargs[call_info[keyword_keyword]] = value() + #end if + #end for + if call_info["args_keyword"] != None : + if call_info["arg_keys"] != None : + args = dict \ + ( + (key, value) + for key, value in zip(call_info["arg_keys"], args) + ) + if "args_constructor" in call_info : + args = call_info["args_constructor"](**args) + #end if + #end if + kwargs[call_info["args_keyword"]] = args + args = () + else : + if call_info["arg_keys"] != None : + for key, value in zip(call_info["arg_keys"], args) : + kwargs[key] = value + #end for + args = () + #end if + #end if + result = func(*args, **kwargs) + if asyncio.iscoroutine(result) : + async def await_result(coro) : + # just to gobble any ErrorReturn + try : + await coro + except ErrorReturn : + pass + #end try + #end await_result + bus.create_task(await_result(result)) + elif result != None : + raise ValueError \ + ( + "invalid result from signal handler: %s" % repr(result) + ) + #end if + except ErrorReturn : + pass + #end try + #end for + #end if + #end for + #end dispatch_signal + + def return_result_common(call_info, result) : + # handles list, tuple, dict or Error returned from method handler; + # packs into reply message and sends it off. + if isinstance(result, dbus.Error) : + assert result.is_set, "unset Error object returned from handler" + reply = message.new_error(result.name, result.message) + connection.send(reply) + else : + sig = dbus.parse_signature(call_info["out_signature"]) + if isinstance(result, dict) and call_info["result_keys"] != None : + result = list(result[key] for key in call_info["result_keys"]) + # convert result items to list in right order + elif ( + "result_constructor" in call_info + and + isinstance(result, call_info["result_constructor"]) + ) : + result = list(result) + elif len(sig) == 0 and result == None : + # might as well allow method call to not bother returning an empty result + result = [] + elif not isinstance(result, (tuple, list)) : + raise ValueError("invalid handler result %s" % repr(result)) + #end if + _send_method_return \ + ( + connection = connection, + message = message, + sig = sig, + args = result + ) + #end if + #end return_result_common + +#begin _message_interface_dispatch + result = DBUS.HANDLER_RESULT_NOT_YET_HANDLED # to begin with + if message.type in (DBUS.MESSAGE_TYPE_METHOD_CALL, DBUS.MESSAGE_TYPE_SIGNAL) : + is_method = message.type == DBUS.MESSAGE_TYPE_METHOD_CALL + if not is_method and bus._client_dispatch != None : + dispatch_signal(bus._client_dispatch, dbus.split_path(message.path)) + #end if + iface = bus.get_dispatch_interface(message.path, message.interface) + if iface != None : + method_name = message.member + methods = (iface._interface_signals, iface._interface_methods)[is_method] + if ( + iface._interface_kind != (INTERFACE.SERVER, INTERFACE.CLIENT)[is_method] + and + method_name in methods + ) : + method = methods[method_name] + call_info = getattr(method, ("_signal_info", "_method_info")[is_method]) + if is_method or not call_info["stub"] : + to_return_result = None + try : + try : + args = message.expect_objects(call_info["in_signature"]) + except (TypeError, ValueError) : + raise ErrorReturn \ + ( + name = DBUS.ERROR_INVALID_ARGS, + message = "message arguments do not match expected signature" + ) + #end try + kwargs = {} + for keyword_keyword, value in \ + ( + ("connection_keyword", lambda : connection), + ("message_keyword", lambda : message), + ("path_keyword", lambda : message.path), + ("bus_keyword", lambda : bus), + ) \ + : + if call_info[keyword_keyword] != None : + kwargs[call_info[keyword_keyword]] = value() + #end if + #end for + if call_info["args_keyword"] != None : + if call_info["arg_keys"] != None : + args = dict \ + ( + (key, value) + for key, value in zip(call_info["arg_keys"], args) + ) + if "args_constructor" in call_info : + args = call_info["args_constructor"](**args) + #end if + #end if + kwargs[call_info["args_keyword"]] = args + args = () + else : + if call_info["arg_keys"] != None : + for key, value in zip(call_info["arg_keys"], args) : + kwargs[key] = value + #end for + args = () + #end if + #end if + if is_method : + # additional ways of returning method result + if call_info["result_keyword"] != None : + # construct a mutable result object that handler will update in place + to_return_result = [None] * len(call_info["out_signature"]) + if call_info["result_keys"] != None : + to_return_result = dict \ + ( + (key, val) + for key, val in zip(call_info["result_keys"], to_return_result) + ) + if "result_constructor" in call_info : + to_return_result = call_info["result_constructor"](**to_return_result) + #end if + #end if + kwargs[call_info["result_keyword"]] = to_return_result + elif call_info["set_result_keyword"] != None : + # caller wants to return result via callback + def set_result(the_result) : + "Call this to set the args for the reply message." + nonlocal to_return_result + to_return_result = the_result + #end set_result + kwargs[call_info["set_result_keyword"]] = set_result + #end if + #end if + result = method(iface, *args, **kwargs) + except ErrorReturn as err : + result = err.as_error() + #end try + if result == None : + if to_return_result != None or is_method : + # method handler possibly used set_result mechanism + return_result_common(call_info, to_return_result) + #end if + result = DBUS.HANDLER_RESULT_HANDLED + elif asyncio.iscoroutine(result) : + assert bus.loop != None, "no event loop to attach coroutine to" + if is_method : + # wait for result + async def await_result(coro) : + try : + result = await coro + except ErrorReturn as err : + result = err.as_error() + #end try + if result == None and to_return_result != None : + # method handler used set_result mechanism + result = to_return_result + #end if + return_result_common(call_info, result) + #end await_result + bus.create_task(await_result(result)) + else : + bus.create_task(result) + #end if + result = DBUS.HANDLER_RESULT_HANDLED + elif isinstance(result, bool) : + # slightly tricky: interpret True as handled, False as not handled, + # even though DBUS.HANDLER_RESULT_HANDLED is zero and + # DBUS.HANDLER_RESULT_NOT_YET_HANDLED is nonzero. + result = \ + (DBUS.HANDLER_RESULT_NOT_YET_HANDLED, DBUS.HANDLER_RESULT_HANDLED)[result] + elif ( + result + in + ( + DBUS.HANDLER_RESULT_HANDLED, + DBUS.HANDLER_RESULT_NOT_YET_HANDLED, + DBUS.HANDLER_RESULT_NEED_MEMORY, + ) + ) : + pass + else : + return_result_common(call_info, result) + result = DBUS.HANDLER_RESULT_HANDLED + #end if + #end if + #end if + #end if + #end if + dispatch_signal = None # remove reference circularity + return \ + result +#end _message_interface_dispatch + +def def_attr_class(name, attrs) : + "defines a class with read/write attributes with names from the sequence attrs." \ + " Objects of this class can be coerced to lists or tuples, and attributes can" \ + " also be accessed by index, like a list." + + class result : + __slots__ = tuple(attrs) + + def __init__(self, **kwargs) : + for name in type(self).__slots__ : + setattr(self, name, None) + #end for + for name in kwargs : + setattr(self, name, kwargs[name]) + #end for + #end __init__ + + def __repr__(self) : + return \ + ( + "%s(%s)" + % + ( + type(self).__name__, + ", ".join + ( + "%s = %s" + % + (name, repr(getattr(self, name))) + for name in type(self).__slots__ + ), + ) + ) + #end __repr__ + + def __len__(self) : + return \ + len(type(self).__slots__) + #end __len__ + + def __getitem__(self, i) : + return \ + getattr(self, type(self).__slots__[i]) + #end __getitem__ + + def __setitem__(self, i, val) : + setattr(self, type(self).__slots__[i], val) + #end __setitem__ + + #end class + +#begin def_attr_class + result.__name__ = name + return \ + result +#end def_attr_class + +def interface \ + ( + kind, *, + name, + property_change_notification = Introspection.PROP_CHANGE_NOTIFICATION.NEW_VALUE, + deprecated = False + ) : + "class decorator creator for defining interface classes. “kind” is an" \ + " INTERFACE.xxx value indicating whether this is for use on the client side" \ + " (send methods, receive signals), server side (receive methods, send signals)" \ + " or both. “name” (required) is the interface name that will be known to D-Bus." \ + " Interface methods and signals should be invocable as\n" \ + "\n" \ + " method(self, ...)\n" \ + "\n" \ + " and definitions should be prefixed with calls to the “@method()” or “@signal()”" \ + " decorator to identify them. The return result can be a DBUS.HANDLER_RESULT_xxx" \ + " code, or None (equivalent to DBUS.HANDLER_RESULT_HANDLED), or a coroutine" \ + " to queue for execution after indicating that the message has been handled. Note" \ + " that if you declare the method with “async def”, then the return result seen" \ + " will be such a coroutine." + + if not isinstance(kind, INTERFACE) : + raise TypeError("kind must be an INTERFACE enum value") + #end if + if not isinstance(name, str) : + raise ValueError("name is required") + #end if + dbus.validate_interface(name) + + def decorate(celf) : + if not isinstance(celf, type) : + raise TypeError("only apply decorator to classes.") + #end if + if not isinstance(property_change_notification, Introspection.PROP_CHANGE_NOTIFICATION) : + raise TypeError \ + ( + "property_change_notification must be an Introspection." + "PROP_CHANGE_NOTIFICATION value" + ) + #end if + celf._interface_kind = kind + celf._interface_name = name + celf._interface_property_change_notification = property_change_notification + celf._interface_deprecated = deprecated + celf._interface_methods = \ + dict \ + ( + (f._method_info["name"], f) + for fname in dir(celf) + for f in (getattr(celf, fname),) + if hasattr(f, "_method_info") + ) + celf._interface_signals = \ + dict \ + ( + (f._signal_info["name"], f) + for fname in dir(celf) + for f in (getattr(celf, fname),) + if hasattr(f, "_signal_info") + ) + props = {} + for info_type, meth_type in \ + ( + ("_propgetter_info", "getter"), # do first so setter can check change_notification + ("_propsetter_info", "setter"), + ) \ + : + for fname in dir(celf) : + func = getattr(celf, fname) + if hasattr(func, info_type) : + propinfo = getattr(func, info_type) + propname = propinfo["name"] + if propname not in props : + props[propname] = {"type" : None} + #end if + propentry = props[propname] + if propinfo["type"] != None : + if propentry["type"] != None : + if propentry["type"] != propinfo["type"] : + raise ValueError \ + ( + "disagreement on type for property “%s” between" + " getter and setter: “%s” versus “%s”" + % + ( + propname, + dbus.unparse_signature(propentry["type"]), + dbus.unparse_signature(propinfo["type"]), + ) + ) + #end if + else : + propentry["type"] = propinfo["type"] + #end if + #end if + if ( + meth_type == "setter" + and + "getter" in propentry + and + propentry["change_notification"] + == + Introspection.PROP_CHANGE_NOTIFICATION.CONST + ) : + raise ValueError \ + ( + "mustn’t specify @propsetter() for a" + " PROP_CHANGE_NOTIFICATION.CONST property" + ) + #end if + if meth_type == "getter" : + if propinfo["change_notification"] != None : + propentry["change_notification"] = propinfo["change_notification"] + else : + propentry["change_notification"] = property_change_notification + #end if + #end if + propentry[meth_type] = func + #end if + #end for + #end for + celf._interface_props = props + return \ + celf + #end decorate + +#begin interface + return \ + decorate +#end interface + +def is_interface(cłass) : + "is cłass defined as an interface class." + return \ + isinstance(cłass, type) and hasattr(cłass, "_interface_name") +#end is_interface + +def is_interface_instance(obj) : + "is obj an instance of an interface class." + return \ + is_interface(type(obj)) +#end is_interface_instance + +def method \ + (*, + name = None, + in_signature, + out_signature, + args_keyword = None, + arg_keys = None, + arg_attrs = None, + result_keyword = None, + result_keys = None, + result_attrs = None, + connection_keyword = None, + message_keyword = None, + path_keyword = None, + bus_keyword = None, + set_result_keyword = None, + reply = True, + deprecated = False + ) : + "Put a call to this function as a decorator for each method of an @interface()" \ + " class that is to be registered as a method of the interface." \ + " “name” is the name of the method as specified in the D-Bus message; if omitted," \ + " it defaults to the name of the function.\n" \ + "\n" \ + "This is really only useful on the server side. On the client side, omit the" \ + " method definition, and even leave out the interface definition and registration" \ + " altogether, unless you want to receive signals from the server; instead, use" \ + " Connection.get_proxy_object() to send method calls to the server." + + in_signature = dbus.parse_signature(in_signature) + out_signature = dbus.parse_signature(out_signature) + for cond, msg in \ + ( + ( + (result_keyword != None or result_keys != None or result_attrs != None) + and + not reply, + "result_keyword, result_keys and result_attrs are" + " meaningless if method does not reply", + ), + (arg_keys != None and arg_attrs != None, "specify arg_keys or arg_attrs, not both"), + (arg_attrs != None and args_keyword == None, "need args_keyword with arg_attrs"), + ( + arg_keys != None and len(arg_keys) != len(in_signature), + "number of arg_keys should match number of items in in_signature", + ), + ( + arg_attrs != None and len(arg_attrs) != len(in_signature), + "number of arg_attrs should match number of items in in_signature", + ), + ( + set_result_keyword != None and result_keyword != None, + "specify set_result_keyword or result_keyword, not both", + ), + ( + result_keys != None and result_attrs != None, + "specify result_keys or result_attrs, not both", + ), + ( + result_attrs != None and result_keyword == None, + "need result_keyword with result_attrs", + ), + ( + result_keys != None and len(result_keys) != len(out_signature), + "number of result_keys should match number of items in out_signature", + ), + ( + result_attrs != None and len(result_attrs) != len(out_signature), + "number of result_attrs should match number of items in out_signature", + ), + ) \ + : + if cond : + raise ValueError(msg) + #end if + #end for + if arg_keys == None : + args_keys = arg_attrs + #end if + if result_keys == None : + result_keys = result_attrs + #end if + + def decorate(func) : + if not callable(func) : + raise TypeError("only apply decorator to callables.") + #end if + if name != None : + func_name = name + else : + func_name = func.__name__ + #end if + dbus.validate_member(func_name) + func._method_info = \ + { + "name" : func_name, + "in_signature" : in_signature, + "out_signature" : out_signature, + "args_keyword" : args_keyword, + "arg_keys" : arg_keys, + "result_keyword" : result_keyword, + "result_keys" : result_keys, + "connection_keyword" : connection_keyword, + "message_keyword" : message_keyword, + "path_keyword" : path_keyword, + "bus_keyword" : bus_keyword, + "set_result_keyword" : set_result_keyword, + "reply" : reply, + "deprecated" : deprecated, + } + if arg_attrs != None : + func._method_info["args_constructor"] = def_attr_class("%s_args" % func_name, arg_attrs) + #end if + if result_attrs != None : + func._method_info["result_constructor"] = \ + def_attr_class("%s_result" % func_name, result_attrs) + #end if + return \ + func + #end decorate + +#begin method + return \ + decorate +#end method + +def signal \ + (*, + name = None, + in_signature, + args_keyword = None, + arg_keys = None, + arg_attrs = None, + stub = False, + connection_keyword = None, + message_keyword = None, + path_keyword = None, + bus_keyword = None, + deprecated = False # can signals be deprecated? + ) : + "Put a call to this function as a decorator for each method of an @interface()" \ + " class that is to be registered as a signal of the interface." \ + " “name” is the name of the signal as specified in the D-Bus message; if omitted," \ + " it defaults to the name of the function.\n" \ + "\n" \ + "On the server side, the actual function need only be a dummy, since it is just" \ + " a placeholder for storing the information used by Connection.send_signal()." + + in_signature = dbus.parse_signature(in_signature) + if arg_attrs != None and args_keyword == None : + raise ValueError("need args_keyword with arg_attrs") + #end if + if arg_keys != None and len(arg_keys) != len(in_signature) : + raise ValueError("number of arg_keys should match number of items in in_signature") + #end if + if arg_attrs != None and len(arg_attrs) != len(in_signature) : + raise ValueError("number of arg_attrs should match number of items in in_signature") + #end if + if arg_keys == None : + args_keys = arg_attrs + #end if + + def decorate(func) : + if not callable(func) : + raise TypeError("only apply decorator to callables.") + #end if + if name != None : + func_name = name + else : + func_name = func.__name__ + #end if + dbus.validate_member(func_name) + func._signal_info = \ + { + "name" : func_name, + "in_signature" : in_signature, + "args_keyword" : args_keyword, + "arg_keys" : arg_keys, + "stub" : stub, + "connection_keyword" : connection_keyword, + "message_keyword" : message_keyword, + "path_keyword" : path_keyword, + "bus_keyword" : bus_keyword, + "deprecated" : deprecated, + } + if arg_attrs != None : + func._signal_info["args_constructor"] = def_attr_class("%s_args" % func_name, arg_attrs) + #end if + return \ + func + #end decorate + +#begin signal + return \ + decorate +#end signal + +def def_signal_stub(**kwargs) : + "convenience routine for defining a signal stub function. Instead of\n" \ + "\n" \ + " @signal(«args»)\n" \ + " def stubfunc() : pass\n" \ + "\n" \ + "you can do\n" \ + "\n" \ + " stubfunc = def_signal_stub(«args»)\n" \ + "\n" \ + "passing the same «args» as you would to the @signal() decorator. But note" \ + " that the name arg is no longer optional." + + def stub() : + "This is just a stub, standing in for a signal definition in a" \ + " proxy interface class. It is not meant to be called." + # Lack of formal args should also stymie attempts to invoke as a method. + raise \ + NotImplementedError("attempted call on signal stub") + #end stub + +#begin def_signal_stub + if "name" not in kwargs : + raise KeyError("name arg is mandatory") + #end if + stub.__name__ = kwargs["name"] + return \ + signal(**kwargs)(stub) +#end def_signal_stub + +def propgetter \ + (*, + name, + type, + name_keyword = None, + connection_keyword = None, + message_keyword = None, + path_keyword = None, + bus_keyword = None, + change_notification = None + ) : + "Put a call to this function as a decorator for a method of an @interface()" \ + " class that is to be the getter of the named property." + + def decorate(func) : + if not callable(func) : + raise TypeError("only apply decorator to callables.") + #end if + assert isinstance(name, str), "property name is mandatory" + if ( + change_notification != None + and + not isinstance(change_notification, Introspection.PROP_CHANGE_NOTIFICATION) + ) : + raise TypeError \ + ( + "change_notification must be None or an Introspection." + "PROP_CHANGE_NOTIFICATION value" + ) + #end if + func._propgetter_info = \ + { + "name" : name, + "type" : dbus.parse_single_signature(type), + "name_keyword" : name_keyword, + "connection_keyword" : connection_keyword, + "message_keyword" : message_keyword, + "path_keyword" : path_keyword, + "bus_keyword" : bus_keyword, + "change_notification" : change_notification, + } + return \ + func + #end decorate + +#begin propgetter + return \ + decorate +#end propgetter + +def propsetter \ + (*, + name, + type, + name_keyword = None, + type_keyword = None, + value_keyword, + connection_keyword = None, + message_keyword = None, + path_keyword = None, + bus_keyword = None + ) : + "Put a call to this function as a decorator for a method of an @interface()" \ + " class that is to be the setter of the named property." + + def decorate(func) : + if not callable(func) : + raise TypeError("only apply decorator to callables.") + #end if + assert isinstance(name, str), "property name is mandatory" + func._propsetter_info = \ + { + "name" : name, + "type" : dbus.parse_single_signature(type), + "name_keyword" : name_keyword, + "type_keyword" : type_keyword, + "value_keyword" : value_keyword, + "connection_keyword" : connection_keyword, + "message_keyword" : message_keyword, + "path_keyword" : path_keyword, + "bus_keyword" : bus_keyword, + } + return \ + func + #end decorate + +#begin propsetter + return \ + decorate +#end propsetter + +#+ +# Introspection +#- + +def introspect(interface) : + "returns an Introspection.Interface object that describes the specified" \ + " @interface() class." + if not is_interface(interface) : + raise TypeError("interface must be an @interface()-type class") + #end if + + def add_deprecated(annots, deprecated) : + # common routine for generating “deprecated” annotations. + if deprecated : + annots.append \ + ( + Introspection.Annotation(name = "org.freedesktop.DBus.Deprecated", value = "true") + ) + #end if + #end add_deprecated + +#begin introspect + methods = [] + for name in interface._interface_methods : + method = interface._interface_methods[name] + annots = [] + add_deprecated(annots, method._method_info["deprecated"]) + if not method._method_info["reply"] : + annots.append \ + ( + Introspection.Annotation + ( + name = "org.freedesktop.DBus.Method.NoReply", + value = "true" + ) + ) + #end if + args = [] + for keys_keyword, sig_keyword, direction in \ + ( + ("arg_keys", "in_signature", Introspection.DIRECTION.IN), + ("result_keys", "out_signature", Introspection.DIRECTION.OUT), + ) \ + : + arg_sigs = dbus.parse_signature(method._method_info[sig_keyword]) + arg_names = method._method_info[keys_keyword] + if arg_names == None : + arg_names = [None] * len(arg_sigs) + #end if + for arg_name, arg_sig in zip(arg_names, arg_sigs) : + args.append \ + ( + Introspection.Interface.Method.Arg + ( + name = arg_name, + type = arg_sig, + direction = direction + ) + ) + #end for + #end for + methods.append \ + ( + Introspection.Interface.Method + ( + name = name, + args = args, + annotations = annots + ) + ) + #end for + signals = [] + for name in interface._interface_signals : + signal = interface._interface_signals[name] + annots = [] + add_deprecated(annots, signal._signal_info["deprecated"]) + args = [] + arg_sigs = dbus.parse_signature(signal._signal_info["in_signature"]) + arg_names = signal._signal_info["arg_keys"] + if arg_names == None : + arg_names = [None] * len(arg_sigs) + #end if + for arg_name, arg_sig in zip(arg_names, arg_sigs) : + args.append \ + ( + Introspection.Interface.Signal.Arg(name = arg_name, type = arg_sig) + ) + #end for + signals.append \ + ( + Introspection.Interface.Signal + ( + name = name, + args = args, + annotations = annots + ) + ) + #end for + properties = [] + for name in interface._interface_props : + prop = interface._interface_props[name] + annots = [] + if ( + "getter" in prop + and + prop["change_notification"] != interface._interface_property_change_notification + ) : + annots.append \ + ( + Introspection.Annotation + ( + name = "org.freedesktop.DBus.Property.EmitsChangedSignal", + value = prop["change_notification"].value + ) + ) + #end if + properties.append \ + ( + Introspection.Interface.Property + ( + name = name, + type = dbus.parse_single_signature(prop["type"]), + access = + ( + None, + Introspection.ACCESS.READ, + Introspection.ACCESS.WRITE, + Introspection.ACCESS.READWRITE, + )[ + int("getter" in prop) + | + int("setter" in prop) << 1 + ], + annotations = annots + ) + ) + #end for + annots = [] + if ( + interface._interface_property_change_notification + != + Introspection.PROP_CHANGE_NOTIFICATION.NEW_VALUE + ) : + annots.append \ + ( + Introspection.Annotation + ( + name = "org.freedesktop.DBus.Property.EmitsChangedSignal", + value = interface._interface_property_change_notification.value + ) + ) + #end if + add_deprecated(annots, interface._interface_deprecated) + return \ + Introspection.Interface \ + ( + name = interface._interface_name, + methods = methods, + signals = signals, + properties = properties, + annotations = annots + ) +#end introspect + +def _append_args(message, call_info, args, kwargs) : + message_args = [None] * len(call_info.in_signature) + if len(args) != 0 : + if len(args) > len(message_args) : + raise ValueError("too many args") + #end if + message_args[:len(args)] = args + #end if + if len(kwargs) != 0 : + arg_positions = {} + idx = 0 + for arg in call_info.args : + if ( + isinstance(arg, Introspection.Interface.Signal.Arg) + or + arg.direction == Introspection.DIRECTION.IN + ) : + arg_positions[arg.name] = idx + idx += 1 + #end if + #end for + for arg_name in kwargs : + if arg_name not in arg_positions : + raise KeyError("no such arg name “%s”" % arg_name) + #end if + pos = arg_positions[arg_name] + if message_args[pos] != None : + raise ValueError("duplicate value for arg %d" % pos) + #end if + message_args[pos] = kwargs[arg_name] + #end for + #end if + missing = set(pos for pos in range(len(message_args)) if message_args[pos] == None) + if len(missing) != 0 : + raise ValueError \ + ( + "too few args specified: missing %s" + % + ", ".join("%d" % pos for pos in sorted(missing)) + ) + #end if + message.append_objects(call_info.in_signature, *message_args) +#end _append_args + +def def_proxy_interface(kind, *, name, introspected, is_async) : + "given an Introspection.Interface object, creates a proxy class that can be" \ + " instantiated by a client to send method-call messages to a server," \ + " or by a server to send signal messages to clients. is_async indicates" \ + " whether method calls are done via coroutines as opposed to blocking" \ + " the thread. The resulting class can be instantiated by\n" \ + "\n" \ + " instance = proxy_class(«connection», «dest»)\n" \ + "\n" \ + " where «connection» is a dbussy.Connection object to use for sending and receiving" \ + " the messages, and «dest» is the bus name to which to send the messages. The" \ + " resulting instance is a “root proxy”: a proxy for a particular object path" \ + " is obtained from this by an indexing call like\n" \ + "\n" \ + " obj = instance[«object_path»]\n" \ + "\n" \ + "from which you can make proxy method calls like obj.«method»(«args») and so on." + + if not isinstance(kind, INTERFACE) : + raise TypeError("kind must be an INTERFACE enum value") + #end if + if not isinstance(introspected, Introspection.Interface) : + raise TypeError("introspected must be an Introspection.Interface") + #end if + + class proxy(BusPeer.Object.ProxyInterface) : + # class that will be constructed, to be instantiated for a given connection, + # destination and path. + + # class field _iface_name contains interface name. + __slots__ = ("_parent", "_conn", "_dest", "_path", "_timeout") + + def __init__(self, *, parent, connection, dest, path, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + if is_async : + assert connection.loop != None, "no event loop to attach coroutines to" + #end if + self._parent = parent + self._conn = connection + self._dest = dest + self._path = path + self._timeout = timeout + #end __init__ + + # rest filled in dynamically below. + + #end proxy + + def def_method(intr_method) : + # constructs a method-call method. + + if is_async : + + async def call_method(self, *args, **kwargs) : + message = dbus.Message.new_method_call \ + ( + destination = self._dest, + path = dbus.unsplit_path(self._path), + iface = self._iface_name, + method = intr_method.name + ) + _append_args(message, intr_method, args, kwargs) + if intr_method.expect_reply : + reply = await self._conn.send_await_reply(message, self._timeout) + result = reply.expect_return_objects(intr_method.out_signature) + else : + message.no_reply = True + self._conn.send(message) + result = None + #end if + return \ + result + #end call_method + + else : + + def call_method(self, *args, **kwargs) : + message = dbus.Message.new_method_call \ + ( + destination = self._dest, + path = dbus.unsplit_path(self._path), + iface = self._iface_name, + method = intr_method.name + ) + _append_args(message, intr_method, args, kwargs) + if intr_method.expect_reply : + reply = self._conn.send_with_reply_and_block(message, self._timeout) + result = reply.expect_return_objects(intr_method.out_signature) + else : + message.no_reply = True + self._conn.send(message) + result = None + #end if + return \ + result + #end call_method + + #end if + + #begin def_method + call_method.__name__ = intr_method.name + call_method.__doc__ = \ + ( + "method, %(args)s, %(result)s" + % + { + "args" : + ( + lambda : "no args", + lambda : "args %s" % dbus.unparse_signature(intr_method.in_signature), + )[len(intr_method.in_signature) != 0](), + "result" : + ( + lambda : "no result", + lambda : "result %s" % dbus.unparse_signature(intr_method.out_signature), + )[len(intr_method.out_signature) != 0](), + } + ) + setattr(proxy, intr_method.name, call_method) + #end def_method + + def def_signal(intr_signal) : + # constructs a signal method. These are never async, since messages + # are queued and there is no reply. + + def send_signal(self, *args, **kwargs) : + message = dbus.Message.new_signal \ + ( + path = dbus.unsplit_path(self._path), + iface = self._iface_name, + name = intr_signal.name + ) + _append_args(message, intr_signal, args, kwargs) + self._conn.send(message) + #end send_signal + + #begin def_signal + send_signal.__name__ = intr_signal.name + send_signal.__doc__ = \ + ( + "signal, %(args)s" + % + { + "args" : + ( + lambda : "no args", + lambda : "args %s" % dbus.unparse_signature(intr_signal.in_signature), + )[len(intr_signal.in_signature) != 0](), + } + ) + setattr(proxy, signal.name, send_signal) + #end def_signal + + def def_prop(intr_prop) : + # defines getter and/or setter methods as appropriate for a property. + + if is_async : + + async def get_prop(self) : + message = dbus.Message.new_method_call \ + ( + destination = self._dest, + path = dbus.unsplit_path(self._path), + iface = DBUS.INTERFACE_PROPERTIES, + method = "Get" + ) + message.append_objects("ss", self._iface_name, intr_prop.name) + reply = await self._conn.send_await_reply(message, self._timeout) + return \ + reply.expect_return_objects("v")[0][1] + #end get_prop + + def set_prop(self, value) : + # Unfortunately, Python doesn’t (currently) allow “await” + # on the LHS of an assignment. So to avoid holding up the + # thread, I put a task on the event loop to watch over + # the completion of the send. This means any error is + # going to be reported asynchronously. C’est la vie. + message = dbus.Message.new_method_call \ + ( + destination = self._dest, + path = dbus.unsplit_path(self._path), + iface = DBUS.INTERFACE_PROPERTIES, + method = "Set" + ) + message.append_objects("ssv", self._iface_name, intr_prop.name, (intr_prop.type, value)) + set_prop_pending = self._conn.loop.create_future() + self._parent._set_prop_pending.append(set_prop_pending) + pending = self._conn.send_with_reply(message, self._timeout) + async def sendit() : + reply = await pending.await_reply() + self._parent._set_prop_pending.pop \ + ( + self._parent._set_prop_pending.index(set_prop_pending) + ) + if reply.type == DBUS.MESSAGE_TYPE_METHOD_RETURN : + set_prop_pending.set_result(reply) + elif reply.type == DBUS.MESSAGE_TYPE_ERROR : + failed = dbus.DBusError(reply.error_name, reply.expect_objects("s")[0]) + if self._parent._set_prop_failed == None : + set_prop_pending.set_exception(failed) + self._parent._set_prop_failed = set_prop_pending + else : + # don’t let failures pile up + raise failed + #end if + else : + raise ValueError("unexpected reply type %d" % reply.type) + #end if + #end sendit + + self._conn.create_task(sendit()) + #end set_prop + + else : + + def get_prop(self) : + message = dbus.Message.new_method_call \ + ( + destination = self._dest, + path = dbus.unsplit_path(self._path), + iface = DBUS.INTERFACE_PROPERTIES, + method = "Get" + ) + message.append_objects("ss", self._iface_name, intr_prop.name) + reply = self._conn.send_with_reply_and_block(message, self._timeout) + return \ + reply.expect_return_objects("v")[0][1] + #end get_prop + + def set_prop(self, value) : + message = dbus.Message.new_method_call \ + ( + destination = self._dest, + path = dbus.unsplit_path(self._path), + iface = DBUS.INTERFACE_PROPERTIES, + method = "Set" + ) + message.append_objects("ssv", self._iface_name, intr_prop.name, (intr_prop.type, value)) + reply = self._conn.send_with_reply_and_block(message, self._timeout) + if reply.type == DBUS.MESSAGE_TYPE_METHOD_RETURN : + pass + elif reply.type == DBUS.MESSAGE_TYPE_ERROR : + raise dbus.DBusError(reply.error_name, reply.expect_objects("s")[0]) + else : + raise ValueError("unexpected reply type %d" % reply.type) + #end if + #end set_prop + + #end if + + def get_prop_noaccess(self) : + raise dbus.DbusError \ + ( + name = DBUS.ERROR_ACCESS_DENIED, + message = "property “%s” cannot be read" % intro_prop.name + ) + #end get_prop_noaccess + + #begin def_prop + if intr_prop.access == Introspection.ACCESS.WRITE : + get_prop = get_prop_noaccess + #end if + if intr_prop.access == Introspection.ACCESS.READ : + set_prop = None + #end if + prop = property(fget = get_prop, fset = set_prop) + setattr(proxy, intr_prop.name, prop) + #end def_prop + + class proxy_factory(BusPeer.RootProxy) : + # class that will be returned. + + __slots__ = ("connection", "dest", "timeout", "_set_prop_pending", "_set_prop_failed") + # class variables: + # template -- = proxy class (set up above) + # props -- dict of introspected.properties by name + + def __init__(self, *, connection, dest, timeout = DBUS.TIMEOUT_USE_DEFAULT) : + if is_async : + assert connection.loop != None, "no event loop to attach coroutines to" + #end if + self.connection = connection + self.dest = dest + self.timeout = timeout + if is_async : + self._set_prop_pending = [] + else : + self._set_prop_pending = None + #end if + self._set_prop_failed = None + #end __init__ + + def __getitem__(self, path) : + return \ + self.template \ + ( + parent = self, + connection = self.connection, + dest = self.dest, + path = path, + timeout = self.timeout + ) + #end __getitem__ + + if is_async : + + async def set_prop_flush(self) : + "workaround for the fact that prop-setter has to queue a separate" \ + " asynchronous task; caller can await this coroutine to ensure that" \ + " all pending set-property calls have completed." + if not is_async : + raise RuntimeError("not without an event loop") + #end if + if self._set_prop_failed != None : + set_prop_pending = [self._set_prop_failed] + self._set_prop_failed = None + else : + set_prop_pending = self._set_prop_pending + #end if + if len(set_prop_pending) != 0 : + done = (await self.connection.wait(set_prop_pending))[0] + failed = list(e for f in done for e in (f.exception(),) if e != None) + if len(failed) > 1 : + raise RuntimeError \ + ( + "multiple failures to set properties: %s" + % + ", ".join(str(f) for f in failed) + ) + elif len(failed) == 1 : + raise failed[0] + #end if + #end if + #end set_prop_flush + + def set_prop(self, path, propname, newvalue) : + "alternative way of asynchronously setting a new property value:" \ + " returns a Future that can be explicitly awaited." + if propname not in self.props : + raise dbus.DBusError \ + ( + DBUS.ERROR_UNKNOWN_PROPERTY, + message = "no such property “%s”" % propname + ) + #end if + propdef = self.props[propname] + if propdef.access == Introspection.ACCESS.READ : + raise dbus.DBusError \ + ( + DBUS.ERROR_PROPERTY_READ_ONLY, + message = "property “%s” cannot be written" % propdef.name + ) + #end if + message = dbus.Message.new_method_call \ + ( + destination = self.dest, + path = dbus.unsplit_path(path), + iface = DBUS.INTERFACE_PROPERTIES, + method = "Set" + ) + message.append_objects("ssv", self.template._iface_name, propname, (propdef.type, newvalue)) + set_prop_pending = self.connection.loop.create_future() + pending = self.connection.send_with_reply(message, self.timeout) + async def sendit() : + reply = await pending.await_reply() + if reply.type == DBUS.MESSAGE_TYPE_METHOD_RETURN : + set_prop_pending.set_result(None) + elif reply.type == DBUS.MESSAGE_TYPE_ERROR : + set_prop_pending.set_exception \ + ( + dbus.DBusError(reply.error_name, reply.expect_objects("s")[0]) + ) + else : + raise ValueError("unexpected reply type %d" % reply.type) + #end if + #end sendit + self.connection.create_task(sendit()) + return \ + set_prop_pending + #end set_prop + + #end if + + #end proxy_factory + +#begin def_proxy_interface + if name != None : + class_name = name + else : + class_name = introspected.name.replace(".", "_") + #end if + proxy.__name__ = class_name + proxy._iface_name = introspected.name + proxy.__doc__ = "for making method calls on the %s interface." % introspected.name + if kind != INTERFACE.SERVER : + for method in introspected.methods : + def_method(method) + #end for + for prop in introspected.properties : + def_prop(prop) + #end for + #end if + if kind != INTERFACE.CLIENT : + for signal in introspected.signals : + def_signal(signal) + #end for + #end if + proxy_factory.__name__ = "%s_factory" % proxy.__name__ + proxy_factory.__doc__ = \ + ( + "proxy factory for a %(kind)s D-Bus interface named %(iname)s. Instantiate as\n" + "\n" + " %(cname)s(connection = «connection»[, dest = «dest»[, timeout = «timeout»]])\n" + "\n" + "where «connection» is the dbussy.Connection instance to use for sending" + " messages and receiving replies, and «dest» is the destination" \ + " bus name for sending method calls (not needed if only sending signals)." + " The resulting «proxy» object can be indexed by object path, as follows:\n" + "\n" + " «proxy»[«path»]\n" + "\n" + "to obtain the actual proxy interface object that can be used to do method" + " calls or signal calls." + % + { + "cname" : class_name, + "iname" : introspected.name, + "kind" : + { + INTERFACE.CLIENT : "client-side", + INTERFACE.SERVER : "server-side", + INTERFACE.CLIENT_AND_SERVER : "client-and-server-side", + }[kind] + } + ) + proxy_factory.template = proxy + proxy_factory.props = dict \ + ( + (prop.name, prop) + for prop in introspected.properties + ) + return \ + proxy_factory +#end def_proxy_interface + +async def set_prop_flush(iface) : + "iface must be either a BusPeer.RootProxy or BusPeer.Object.ProxyInterface" \ + " instance; calls the set_prop_flush() method on the correct root proxy in" \ + " either case." + if isinstance(iface, BusPeer.RootProxy) : + await iface.set_prop_flush() + elif isinstance(iface, BusPeer.Object.ProxyInterface) : + await iface._parent.set_prop_flush() + else : + raise TypeError("iface type %s is not a RootProxy or a ProxyInterface" % type(iface).__name__) + #end if +#end set_prop_flush + +#+ +# Predefined interfaces +#- + +@interface(INTERFACE.CLIENT_AND_SERVER, name = DBUS.INTERFACE_PEER) +class PeerStub : + "This is registered as a fallback at the root of your object tree to get" \ + " automatic introspection of the DBUS.INTERFACE_PEER interface. The" \ + " implementation is hard-coded inside libdbus itself, so the methods" \ + " here will never be called." + + @method \ + ( + name = "Ping", + in_signature = "", + out_signature = "", + ) + def ping(self) : + raise NotImplementedError("How did you get here?") + #end ping + + @method \ + ( + name = "GetMachineId", + in_signature = "", + out_signature = "s", + result_keys = ["machine_uuid"], + ) + def get_machine_id(self) : + raise NotImplementedError("How did you get here?") + #end get_machine_id + +#end PeerStub + +@interface(INTERFACE.CLIENT_AND_SERVER, name = DBUS.INTERFACE_INTROSPECTABLE) +class IntrospectionHandler : + "Register this as a fallback at the root of your object tree to obtain" \ + " automatic introspection of any point in the tree." + + @method \ + ( + name = "Introspect", + in_signature = "", + out_signature = "s", + path_keyword = "path", + message_keyword = "message", + bus_keyword = "bus", + ) + def introspect(self, message, bus, path) : + interfaces = {} + children = None # actually redundant + level = bus._server_dispatch + levels = iter(dbus.split_path(path)) + while True : + component = next(levels, None) + for entry in level.interfaces.values() : + if component == None or entry.fallback : + interface = type(entry.interface) + if interface._interface_kind != INTERFACE.CLIENT : + interfaces[interface._interface_name] = interface + # replace any higher-level entry for same name + #end if + #end if + #end for + if ( + component == None + # reached bottom of path + or + component not in level.children + # no handlers to be found further down path + ) : + children = sorted(level.children.keys()) + break + #end if + level = level.children[component] + # search another step down the path + #end while + introspection = Introspection \ + ( + interfaces = list + ( + introspect(iface) + for iface in sorted(interfaces.values(), key = lambda iface : iface._interface_name) + ), + nodes = list + ( + Introspection.Node(name = child) for child in children + ) + ) + _send_method_return(bus.connection, message, "s", [introspection.unparse()]) + return \ + DBUS.HANDLER_RESULT_HANDLED + #end introspect + +#end IntrospectionHandler + +@interface(INTERFACE.CLIENT_AND_SERVER, name = DBUS.INTERFACE_PROPERTIES) +class PropertyHandler : + "Register this as a fallback at the root of your object tree to provide" \ + " automatic dispatching to any @propgetter() and @propsetter() methods" \ + " defined for registered interfaces appropriate to an object path." + + @method \ + ( + name = "Get", + in_signature = "ss", + out_signature = "v", + args_keyword = "args", + path_keyword = "path", + message_keyword = "message", + bus_keyword = "bus" + ) + def getprop(self, bus, message, path, args) : + interface_name, propname = args + dispatch = bus.get_dispatch_interface(path, interface_name) + props = type(dispatch)._interface_props + if propname in props : + propentry = props[propname] + if "getter" in propentry : + getter = getattr(dispatch, propentry["getter"].__name__) + kwargs = {} + for keyword_keyword, value in \ + ( + ("name_keyword", lambda : propname), + ("connection_keyword", lambda : bus.connection), + ("message_keyword", lambda : message), + ("path_keyword", lambda : path), + ("bus_keyword", lambda : bus), + ) \ + : + if getter._propgetter_info[keyword_keyword] != None : + kwargs[getter._propgetter_info[keyword_keyword]] = value() + #end if + #end for + try : + propvalue = getter(**kwargs) + except ErrorReturn as err : + propvalue = err.as_error() + #end try + if asyncio.iscoroutine(propvalue) : + assert bus.loop != None, "no event loop to attach coroutine to" + async def await_return_value(task) : + try : + propvalue = await task + except ErrorReturn as err : + result = err.as_error() + reply = message.new_error(result.name, result.message) + bus.connection.send(reply) + else : + _send_method_return \ + ( + connection = bus.connection, + message = message, + sig = [dbus.VariantType()], + args = [(propentry["type"], propvalue)] + ) + #end try + #end await_return_value + bus.create_task(await_return_value(propvalue)) + reply = None + elif isinstance(propvalue, dbus.Error) : + assert propvalue.is_set, "unset Error object returned from propgetter" + reply = message.new_error(propvalue.name, propvalue.nessage) + else : + _send_method_return \ + ( + connection = bus.connection, + message = message, + sig = [dbus.VariantType()], + args = [(propentry["type"], propvalue)] + ) + reply = None + #end if + else : + reply = message.new_error \ + ( + name = DBUS.ERROR_ACCESS_DENIED, + message = "property “%s” cannot be read" % propname + ) + #end if + else : + reply = message.new_error \ + ( + name = DBUS.ERROR_UNKNOWN_PROPERTY, + message = "property “%s” cannot be found" % propname + ) + #end if + if reply != None : + bus.connection.send(reply) + #end if + return \ + DBUS.HANDLER_RESULT_HANDLED + #end getprop + + @method \ + ( + name = "Set", + in_signature = "ssv", + out_signature = "", + args_keyword = "args", + path_keyword = "path", + message_keyword = "message", + bus_keyword = "bus" + ) + def setprop(self, bus, message, path, args) : + + def notify_changed() : + # sends property-changed signal if appropriate. + if "getter" in propentry : + notify = propentry["change_notification"] + if notify == Introspection.PROP_CHANGE_NOTIFICATION.NEW_VALUE : + bus.prop_changed(path, interface_name, propname, proptype, propvalue) + elif notify == Introspection.PROP_CHANGE_NOTIFICATION.INVALIDATES : + bus.prop_changed(path, interface_name, propname, None, None) + #end if + #end if + #end notify_changed + + #begin setprop + interface_name, propname, (proptype, propvalue) = args + dispatch = bus.get_dispatch_interface(path, interface_name) + props = type(dispatch)._interface_props + if propname in props : + propentry = props[propname] + if "setter" in propentry : + setter = getattr(dispatch, propentry["setter"].__name__) + try : + if propentry["type"] != None and propentry["type"] != dbus.parse_single_signature(proptype) : + raise ErrorReturn \ + ( + name = DBUS.ERROR_INVALID_ARGS, + message = + "new property type %s does not match expected signature %s" + % + (proptype, dbus.unparse_signature(propentry["type"])) + ) + #end if + kwargs = {} + for keyword_keyword, value in \ + ( + ("name_keyword", lambda : propname), + ("type_keyword", lambda : proptype), + ("value_keyword", lambda : propvalue), + ("connection_keyword", lambda : bus.connection), + ("message_keyword", lambda : message), + ("path_keyword", lambda : path), + ("bus_keyword", lambda : bus), + ) \ + : + if setter._propsetter_info[keyword_keyword] != None : + kwargs[setter._propsetter_info[keyword_keyword]] = value() + #end if + #end for + setresult = setter(**kwargs) + except ErrorReturn as err : + setresult = err.as_error() + #end try + if asyncio.iscoroutine(setresult) : + assert bus.loop != None, "no event loop to attach coroutine to" + async def wait_set_done() : + await setresult + reply = message.new_method_return() + bus.connection.send(reply) + notify_changed() + #end wait_set_done + bus.create_task(wait_set_done()) + reply = None # for now + elif isinstance(setresult, dbus.Error) : + assert setresult.is_set, "unset Error object returned" + reply = message.new_error(setresult.name, setresult.message) + elif setresult == None : + reply = message.new_method_return() + notify_changed() + else : + raise ValueError("invalid propsetter result %s" % repr(setresult)) + #end if + else : + reply = message.new_error \ + ( + name = DBUS.ERROR_PROPERTY_READ_ONLY, + message = "property “%s” cannot be written" % propname + ) + #end if + else : + reply = message.new_error \ + ( + name = DBUS.ERROR_UNKNOWN_PROPERTY, + message = "property “%s” cannot be found" % propname + ) + #end if + if reply != None : + bus.connection.send(reply) + #end if + return \ + DBUS.HANDLER_RESULT_HANDLED + #end setprop + + @method \ + ( + name = "GetAll", + in_signature = "s", + out_signature = "a{sv}", + args_keyword = "args", + path_keyword = "path", + message_keyword = "message", + bus_keyword = "bus" + ) + def get_all_props(self, bus, message, path, args) : + + properror = None + propvalues = {} + to_await = [] + + def return_result() : + if properror != None : + reply = message.new_error(properror.name, properror.message) + bus.connection.send(reply) + else : + _send_method_return(bus.connection, message, "a{sv}", [propvalues]) + #end if + #end return_result + + async def await_propvalues() : + nonlocal properror + for propname, fute in to_await : + try : + propvalue = await fute + except ErrorReturn as err : + properror = err.as_error() + break + #end try + propvalues[propname] = (propvalues[propname][0], propvalue) + #end for + return_result() + #end await_propvalues + + #begin get_all_props + interface_name, = args + try : + propvalues, to_await = bus._get_all_my_props(message, path, interface_name) + except ErrorReturn as err : + properror = err.as_error() + #end try + if len(to_await) != 0 : + bus.create_task(await_propvalues()) + else : + return_result() + #end if + return \ + DBUS.HANDLER_RESULT_HANDLED + #end get_all_props + + @signal(name = "PropertiesChanged", in_signature = "sa{sv}as", stub = True) + def properties_changed(self) : + "for use with Connection.send_signal." + pass + #end properties_changed + +#end PropertyHandler + +@interface(INTERFACE.CLIENT_AND_SERVER, name = DBUSX.INTERFACE_OBJECT_MANAGER) +class ManagedObjectsHandler : + "Register this as a fallback at the root of your object tree to provide" \ + " handling of the ObjectManager interface." + + @method \ + ( + name = "GetManagedObjects", + in_signature = "", + out_signature = "a{oa{sa{sv}}}", + set_result_keyword = "set_result", + bus_keyword = "bus", + message_keyword = "message", + path_keyword = "base_path", + ) + def get_managed_objects(self, bus, message, base_path, set_result) : + + result = {} + to_await = [] + + async def await_propvalues() : + for path, interface_name, propname, fute in to_await : + propvalue = await fute + # any ErrorReturn raised will be automatically converted + # to error reply by _message_interface_dispatch (above) + propvalues = result[path][interface_name] + propvalues[propname] = (propvalues[propname][0], propvalue) + #end for + set_result([result]) + #end await_propvalues + + #begin get_managed_objects + "returns supported interfaces and current property values for all" \ + " currently-existent managed objects." + for path, interfaces in bus._managed_objects.items() : + if base_path == "/" and path != "/" or path.startswith(base_path + "/") : + obj_entry = {} + for interface_name in interfaces : + obj_entry[interface_name], await_props = \ + bus._get_all_my_props(message, path, interface_name) + for propname, propvalue in await_props : + to_await.append((path, interface_name, propname, propvalue)) + #end for + #end for + result[path] = obj_entry + #end if + #end for + if len(to_await) != 0 : + return_result = await_propvalues() # result will be available when this is done + else : + set_result([result]) + return_result = None + #end if + return \ + return_result + #end get_managed_objects + + @signal(name = "InterfacesAdded", in_signature = "oa{sa{sv}}", stub = True) + def interfaces_added(self) : + "for use with Connection.send_signal." + pass + #end interfaces_added + + @signal(name = "InterfacesRemoved", in_signature = "oas", stub = True) + def interfaces_removed(self) : + "for use with Connection.send_signal." + pass + #end interfaces_removed + +#end ManagedObjectsHandler + +#+ +# Cleanup +#- + +def _atexit() : + # disable all __del__ methods at process termination to avoid unpredictable behaviour + for cls in Connection, Server : + delattr(cls, "__del__") + #end for +#end _atexit +atexit.register(_atexit) +del _atexit diff --git a/defaults/dbussy/setup.cfg b/defaults/dbussy/setup.cfg new file mode 100644 index 0000000..3445449 --- /dev/null +++ b/defaults/dbussy/setup.cfg @@ -0,0 +1,16 @@ +[metadata] +license_file = COPYING +platform = Linux +classifiers = + Programming Language :: Python :: 3.5 + Development Status :: 4 - Beta + Intended Audience :: Developers + License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) + Operating System :: POSIX :: Linux + Topic :: Software Development :: Libraries + Topic :: Desktop Environment + Topic :: Software Development :: Object Brokering + +[bdist_wheel] +python-tag = py35 +#plat-name = linux diff --git a/defaults/dbussy/setup.py b/defaults/dbussy/setup.py new file mode 100644 index 0000000..31dafa9 --- /dev/null +++ b/defaults/dbussy/setup.py @@ -0,0 +1,52 @@ +#+ +# Setuptools script to install DBussy. Make sure setuptools +# is installed. +# Invoke from the command line in this directory as follows: +# +# python3 setup.py build +# sudo python3 setup.py install +# +# Written by Lawrence D'Oliveiro . +#- + +import sys +import setuptools +from setuptools.command.build_py import \ + build_py as std_build_py + +class my_build_py(std_build_py) : + "customization of build to perform additional validation." + + def run(self) : + try : + exec \ + ( + "async def dummy() :\n" + " pass\n" + "#end dummy\n" + ) + except SyntaxError : + sys.stderr.write("This module requires Python 3.5 or later.\n") + sys.exit(-1) + #end try + super().run() + #end run + +#end my_build_py + +setuptools.setup \ + ( + name = "DBussy", + version = "1.3", + description = "language bindings for libdbus, for Python 3.5 or later", + long_description = "language bindings for libdbus, for Python 3.5 or later", + author = "Lawrence D'Oliveiro", + author_email = "ldo@geek-central.gen.nz", + url = "https://github.com/ldo/dbussy", + license = "LGPL v2.1+", + py_modules = ["dbussy", "ravel"], + cmdclass = + { + "build_py" : my_build_py, + }, + ) diff --git a/defaults/lib/x/__init__.py b/defaults/lib/x/__init__.py new file mode 100644 index 0000000..bf6d8dd --- /dev/null +++ b/defaults/lib/x/__init__.py @@ -0,0 +1,20 @@ +"""Core XML support for Python. + +This package contains four sub-packages: + +dom -- The W3C Document Object Model. This supports DOM Level 1 + + Namespaces. + +parsers -- Python wrappers for XML parsers (currently only supports Expat). + +sax -- The Simple API for XML, developed by XML-Dev, led by David + Megginson and ported to Python by Lars Marius Garshol. This + supports the SAX 2 API. + +etree -- The ElementTree XML library. This is a subset of the full + ElementTree XML release. + +""" + + +__all__ = ["dom", "parsers", "sax", "etree"] diff --git a/defaults/lib/x/dom/NodeFilter.py b/defaults/lib/x/dom/NodeFilter.py new file mode 100644 index 0000000..640e0bf --- /dev/null +++ b/defaults/lib/x/dom/NodeFilter.py @@ -0,0 +1,27 @@ +# This is the Python mapping for interface NodeFilter from +# DOM2-Traversal-Range. It contains only constants. + +class NodeFilter: + """ + This is the DOM2 NodeFilter interface. It contains only constants. + """ + FILTER_ACCEPT = 1 + FILTER_REJECT = 2 + FILTER_SKIP = 3 + + SHOW_ALL = 0xFFFFFFFF + SHOW_ELEMENT = 0x00000001 + SHOW_ATTRIBUTE = 0x00000002 + SHOW_TEXT = 0x00000004 + SHOW_CDATA_SECTION = 0x00000008 + SHOW_ENTITY_REFERENCE = 0x00000010 + SHOW_ENTITY = 0x00000020 + SHOW_PROCESSING_INSTRUCTION = 0x00000040 + SHOW_COMMENT = 0x00000080 + SHOW_DOCUMENT = 0x00000100 + SHOW_DOCUMENT_TYPE = 0x00000200 + SHOW_DOCUMENT_FRAGMENT = 0x00000400 + SHOW_NOTATION = 0x00000800 + + def acceptNode(self, node): + raise NotImplementedError diff --git a/defaults/lib/x/dom/__init__.py b/defaults/lib/x/dom/__init__.py new file mode 100644 index 0000000..97cf9a6 --- /dev/null +++ b/defaults/lib/x/dom/__init__.py @@ -0,0 +1,140 @@ +"""W3C Document Object Model implementation for Python. + +The Python mapping of the Document Object Model is documented in the +Python Library Reference in the section on the xml.dom package. + +This package contains the following modules: + +minidom -- A simple implementation of the Level 1 DOM with namespace + support added (based on the Level 2 specification) and other + minor Level 2 functionality. + +pulldom -- DOM builder supporting on-demand tree-building for selected + subtrees of the document. + +""" + + +class Node: + """Class giving the NodeType constants.""" + __slots__ = () + + # DOM implementations may use this as a base class for their own + # Node implementations. If they don't, the constants defined here + # should still be used as the canonical definitions as they match + # the values given in the W3C recommendation. Client code can + # safely refer to these values in all tests of Node.nodeType + # values. + + ELEMENT_NODE = 1 + ATTRIBUTE_NODE = 2 + TEXT_NODE = 3 + CDATA_SECTION_NODE = 4 + ENTITY_REFERENCE_NODE = 5 + ENTITY_NODE = 6 + PROCESSING_INSTRUCTION_NODE = 7 + COMMENT_NODE = 8 + DOCUMENT_NODE = 9 + DOCUMENT_TYPE_NODE = 10 + DOCUMENT_FRAGMENT_NODE = 11 + NOTATION_NODE = 12 + + +#ExceptionCode +INDEX_SIZE_ERR = 1 +DOMSTRING_SIZE_ERR = 2 +HIERARCHY_REQUEST_ERR = 3 +WRONG_DOCUMENT_ERR = 4 +INVALID_CHARACTER_ERR = 5 +NO_DATA_ALLOWED_ERR = 6 +NO_MODIFICATION_ALLOWED_ERR = 7 +NOT_FOUND_ERR = 8 +NOT_SUPPORTED_ERR = 9 +INUSE_ATTRIBUTE_ERR = 10 +INVALID_STATE_ERR = 11 +SYNTAX_ERR = 12 +INVALID_MODIFICATION_ERR = 13 +NAMESPACE_ERR = 14 +INVALID_ACCESS_ERR = 15 +VALIDATION_ERR = 16 + + +class DOMException(Exception): + """Abstract base class for DOM exceptions. + Exceptions with specific codes are specializations of this class.""" + + def __init__(self, *args, **kw): + if self.__class__ is DOMException: + raise RuntimeError( + "DOMException should not be instantiated directly") + Exception.__init__(self, *args, **kw) + + def _get_code(self): + return self.code + + +class IndexSizeErr(DOMException): + code = INDEX_SIZE_ERR + +class DomstringSizeErr(DOMException): + code = DOMSTRING_SIZE_ERR + +class HierarchyRequestErr(DOMException): + code = HIERARCHY_REQUEST_ERR + +class WrongDocumentErr(DOMException): + code = WRONG_DOCUMENT_ERR + +class InvalidCharacterErr(DOMException): + code = INVALID_CHARACTER_ERR + +class NoDataAllowedErr(DOMException): + code = NO_DATA_ALLOWED_ERR + +class NoModificationAllowedErr(DOMException): + code = NO_MODIFICATION_ALLOWED_ERR + +class NotFoundErr(DOMException): + code = NOT_FOUND_ERR + +class NotSupportedErr(DOMException): + code = NOT_SUPPORTED_ERR + +class InuseAttributeErr(DOMException): + code = INUSE_ATTRIBUTE_ERR + +class InvalidStateErr(DOMException): + code = INVALID_STATE_ERR + +class SyntaxErr(DOMException): + code = SYNTAX_ERR + +class InvalidModificationErr(DOMException): + code = INVALID_MODIFICATION_ERR + +class NamespaceErr(DOMException): + code = NAMESPACE_ERR + +class InvalidAccessErr(DOMException): + code = INVALID_ACCESS_ERR + +class ValidationErr(DOMException): + code = VALIDATION_ERR + +class UserDataHandler: + """Class giving the operation constants for UserDataHandler.handle().""" + + # Based on DOM Level 3 (WD 9 April 2002) + + NODE_CLONED = 1 + NODE_IMPORTED = 2 + NODE_DELETED = 3 + NODE_RENAMED = 4 + +XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" +XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/" +XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml" +EMPTY_NAMESPACE = None +EMPTY_PREFIX = None + +from .domreg import getDOMImplementation, registerDOMImplementation diff --git a/defaults/lib/x/dom/domreg.py b/defaults/lib/x/dom/domreg.py new file mode 100644 index 0000000..69c17ee --- /dev/null +++ b/defaults/lib/x/dom/domreg.py @@ -0,0 +1,99 @@ +"""Registration facilities for DOM. This module should not be used +directly. Instead, the functions getDOMImplementation and +registerDOMImplementation should be imported from xml.dom.""" + +# This is a list of well-known implementations. Well-known names +# should be published by posting to xml-sig@python.org, and are +# subsequently recorded in this file. + +import sys + +well_known_implementations = { + 'minidom':'xml.dom.minidom', + '4DOM': 'xml.dom.DOMImplementation', + } + +# DOM implementations not officially registered should register +# themselves with their + +registered = {} + +def registerDOMImplementation(name, factory): + """registerDOMImplementation(name, factory) + + Register the factory function with the name. The factory function + should return an object which implements the DOMImplementation + interface. The factory function can either return the same object, + or a new one (e.g. if that implementation supports some + customization).""" + + registered[name] = factory + +def _good_enough(dom, features): + "_good_enough(dom, features) -> Return 1 if the dom offers the features" + for f,v in features: + if not dom.hasFeature(f,v): + return 0 + return 1 + +def getDOMImplementation(name=None, features=()): + """getDOMImplementation(name = None, features = ()) -> DOM implementation. + + Return a suitable DOM implementation. The name is either + well-known, the module name of a DOM implementation, or None. If + it is not None, imports the corresponding module and returns + DOMImplementation object if the import succeeds. + + If name is not given, consider the available implementations to + find one with the required feature set. If no implementation can + be found, raise an ImportError. The features list must be a sequence + of (feature, version) pairs which are passed to hasFeature.""" + + import os + creator = None + mod = well_known_implementations.get(name) + if mod: + mod = __import__(mod, {}, {}, ['getDOMImplementation']) + return mod.getDOMImplementation() + elif name: + return registered[name]() + elif not sys.flags.ignore_environment and "PYTHON_DOM" in os.environ: + return getDOMImplementation(name = os.environ["PYTHON_DOM"]) + + # User did not specify a name, try implementations in arbitrary + # order, returning the one that has the required features + if isinstance(features, str): + features = _parse_feature_string(features) + for creator in registered.values(): + dom = creator() + if _good_enough(dom, features): + return dom + + for creator in well_known_implementations.keys(): + try: + dom = getDOMImplementation(name = creator) + except Exception: # typically ImportError, or AttributeError + continue + if _good_enough(dom, features): + return dom + + raise ImportError("no suitable DOM implementation found") + +def _parse_feature_string(s): + features = [] + parts = s.split() + i = 0 + length = len(parts) + while i < length: + feature = parts[i] + if feature[0] in "0123456789": + raise ValueError("bad feature name: %r" % (feature,)) + i = i + 1 + version = None + if i < length: + v = parts[i] + if v[0] in "0123456789": + i = i + 1 + version = v + features.append((feature, version)) + return tuple(features) diff --git a/defaults/lib/x/dom/expatbuilder.py b/defaults/lib/x/dom/expatbuilder.py new file mode 100644 index 0000000..7dd667b --- /dev/null +++ b/defaults/lib/x/dom/expatbuilder.py @@ -0,0 +1,962 @@ +"""Facility to use the Expat parser to load a minidom instance +from a string or file. + +This avoids all the overhead of SAX and pulldom to gain performance. +""" + +# Warning! +# +# This module is tightly bound to the implementation details of the +# minidom DOM and can't be used with other DOM implementations. This +# is due, in part, to a lack of appropriate methods in the DOM (there is +# no way to create Entity and Notation nodes via the DOM Level 2 +# interface), and for performance. The latter is the cause of some fairly +# cryptic code. +# +# Performance hacks: +# +# - .character_data_handler() has an extra case in which continuing +# data is appended to an existing Text node; this can be a +# speedup since pyexpat can break up character data into multiple +# callbacks even though we set the buffer_text attribute on the +# parser. This also gives us the advantage that we don't need a +# separate normalization pass. +# +# - Determining that a node exists is done using an identity comparison +# with None rather than a truth test; this avoids searching for and +# calling any methods on the node object if it exists. (A rather +# nice speedup is achieved this way as well!) + +from xml.dom import xmlbuilder, minidom, Node +from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE +from xml.parsers import expat +from xml.dom.minidom import _append_child, _set_attribute_node +from xml.dom.NodeFilter import NodeFilter + +TEXT_NODE = Node.TEXT_NODE +CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE +DOCUMENT_NODE = Node.DOCUMENT_NODE + +FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT +FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT +FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP +FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT + +theDOMImplementation = minidom.getDOMImplementation() + +# Expat typename -> TypeInfo +_typeinfo_map = { + "CDATA": minidom.TypeInfo(None, "cdata"), + "ENUM": minidom.TypeInfo(None, "enumeration"), + "ENTITY": minidom.TypeInfo(None, "entity"), + "ENTITIES": minidom.TypeInfo(None, "entities"), + "ID": minidom.TypeInfo(None, "id"), + "IDREF": minidom.TypeInfo(None, "idref"), + "IDREFS": minidom.TypeInfo(None, "idrefs"), + "NMTOKEN": minidom.TypeInfo(None, "nmtoken"), + "NMTOKENS": minidom.TypeInfo(None, "nmtokens"), + } + +class ElementInfo(object): + __slots__ = '_attr_info', '_model', 'tagName' + + def __init__(self, tagName, model=None): + self.tagName = tagName + self._attr_info = [] + self._model = model + + def __getstate__(self): + return self._attr_info, self._model, self.tagName + + def __setstate__(self, state): + self._attr_info, self._model, self.tagName = state + + def getAttributeType(self, aname): + for info in self._attr_info: + if info[1] == aname: + t = info[-2] + if t[0] == "(": + return _typeinfo_map["ENUM"] + else: + return _typeinfo_map[info[-2]] + return minidom._no_type + + def getAttributeTypeNS(self, namespaceURI, localName): + return minidom._no_type + + def isElementContent(self): + if self._model: + type = self._model[0] + return type not in (expat.model.XML_CTYPE_ANY, + expat.model.XML_CTYPE_MIXED) + else: + return False + + def isEmpty(self): + if self._model: + return self._model[0] == expat.model.XML_CTYPE_EMPTY + else: + return False + + def isId(self, aname): + for info in self._attr_info: + if info[1] == aname: + return info[-2] == "ID" + return False + + def isIdNS(self, euri, ename, auri, aname): + # not sure this is meaningful + return self.isId((auri, aname)) + +def _intern(builder, s): + return builder._intern_setdefault(s, s) + +def _parse_ns_name(builder, name): + assert ' ' in name + parts = name.split(' ') + intern = builder._intern_setdefault + if len(parts) == 3: + uri, localname, prefix = parts + prefix = intern(prefix, prefix) + qname = "%s:%s" % (prefix, localname) + qname = intern(qname, qname) + localname = intern(localname, localname) + elif len(parts) == 2: + uri, localname = parts + prefix = EMPTY_PREFIX + qname = localname = intern(localname, localname) + else: + raise ValueError("Unsupported syntax: spaces in URIs not supported: %r" % name) + return intern(uri, uri), localname, prefix, qname + + +class ExpatBuilder: + """Document builder that uses Expat to build a ParsedXML.DOM document + instance.""" + + def __init__(self, options=None): + if options is None: + options = xmlbuilder.Options() + self._options = options + if self._options.filter is not None: + self._filter = FilterVisibilityController(self._options.filter) + else: + self._filter = None + # This *really* doesn't do anything in this case, so + # override it with something fast & minimal. + self._finish_start_element = id + self._parser = None + self.reset() + + def createParser(self): + """Create a new parser object.""" + return expat.ParserCreate() + + def getParser(self): + """Return the parser object, creating a new one if needed.""" + if not self._parser: + self._parser = self.createParser() + self._intern_setdefault = self._parser.intern.setdefault + self._parser.buffer_text = True + self._parser.ordered_attributes = True + self._parser.specified_attributes = True + self.install(self._parser) + return self._parser + + def reset(self): + """Free all data structures used during DOM construction.""" + self.document = theDOMImplementation.createDocument( + EMPTY_NAMESPACE, None, None) + self.curNode = self.document + self._elem_info = self.document._elem_info + self._cdata = False + + def install(self, parser): + """Install the callbacks needed to build the DOM into the parser.""" + # This creates circular references! + parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler + parser.StartElementHandler = self.first_element_handler + parser.EndElementHandler = self.end_element_handler + parser.ProcessingInstructionHandler = self.pi_handler + if self._options.entities: + parser.EntityDeclHandler = self.entity_decl_handler + parser.NotationDeclHandler = self.notation_decl_handler + if self._options.comments: + parser.CommentHandler = self.comment_handler + if self._options.cdata_sections: + parser.StartCdataSectionHandler = self.start_cdata_section_handler + parser.EndCdataSectionHandler = self.end_cdata_section_handler + parser.CharacterDataHandler = self.character_data_handler_cdata + else: + parser.CharacterDataHandler = self.character_data_handler + parser.ExternalEntityRefHandler = self.external_entity_ref_handler + parser.XmlDeclHandler = self.xml_decl_handler + parser.ElementDeclHandler = self.element_decl_handler + parser.AttlistDeclHandler = self.attlist_decl_handler + + def parseFile(self, file): + """Parse a document from a file object, returning the document + node.""" + parser = self.getParser() + first_buffer = True + try: + while buffer := file.read(16*1024): + parser.Parse(buffer, False) + if first_buffer and self.document.documentElement: + self._setup_subset(buffer) + first_buffer = False + parser.Parse(b"", True) + except ParseEscape: + pass + doc = self.document + self.reset() + self._parser = None + return doc + + def parseString(self, string): + """Parse a document from a string, returning the document node.""" + parser = self.getParser() + try: + parser.Parse(string, True) + self._setup_subset(string) + except ParseEscape: + pass + doc = self.document + self.reset() + self._parser = None + return doc + + def _setup_subset(self, buffer): + """Load the internal subset if there might be one.""" + if self.document.doctype: + extractor = InternalSubsetExtractor() + extractor.parseString(buffer) + subset = extractor.getSubset() + self.document.doctype.internalSubset = subset + + def start_doctype_decl_handler(self, doctypeName, systemId, publicId, + has_internal_subset): + doctype = self.document.implementation.createDocumentType( + doctypeName, publicId, systemId) + doctype.ownerDocument = self.document + _append_child(self.document, doctype) + self.document.doctype = doctype + if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT: + self.document.doctype = None + del self.document.childNodes[-1] + doctype = None + self._parser.EntityDeclHandler = None + self._parser.NotationDeclHandler = None + if has_internal_subset: + if doctype is not None: + doctype.entities._seq = [] + doctype.notations._seq = [] + self._parser.CommentHandler = None + self._parser.ProcessingInstructionHandler = None + self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler + + def end_doctype_decl_handler(self): + if self._options.comments: + self._parser.CommentHandler = self.comment_handler + self._parser.ProcessingInstructionHandler = self.pi_handler + if not (self._elem_info or self._filter): + self._finish_end_element = id + + def pi_handler(self, target, data): + node = self.document.createProcessingInstruction(target, data) + _append_child(self.curNode, node) + if self._filter and self._filter.acceptNode(node) == FILTER_REJECT: + self.curNode.removeChild(node) + + def character_data_handler_cdata(self, data): + childNodes = self.curNode.childNodes + if self._cdata: + if ( self._cdata_continue + and childNodes[-1].nodeType == CDATA_SECTION_NODE): + childNodes[-1].appendData(data) + return + node = self.document.createCDATASection(data) + self._cdata_continue = True + elif childNodes and childNodes[-1].nodeType == TEXT_NODE: + node = childNodes[-1] + value = node.data + data + node.data = value + return + else: + node = minidom.Text() + node.data = data + node.ownerDocument = self.document + _append_child(self.curNode, node) + + def character_data_handler(self, data): + childNodes = self.curNode.childNodes + if childNodes and childNodes[-1].nodeType == TEXT_NODE: + node = childNodes[-1] + node.data = node.data + data + return + node = minidom.Text() + node.data = node.data + data + node.ownerDocument = self.document + _append_child(self.curNode, node) + + def entity_decl_handler(self, entityName, is_parameter_entity, value, + base, systemId, publicId, notationName): + if is_parameter_entity: + # we don't care about parameter entities for the DOM + return + if not self._options.entities: + return + node = self.document._create_entity(entityName, publicId, + systemId, notationName) + if value is not None: + # internal entity + # node *should* be readonly, but we'll cheat + child = self.document.createTextNode(value) + node.childNodes.append(child) + self.document.doctype.entities._seq.append(node) + if self._filter and self._filter.acceptNode(node) == FILTER_REJECT: + del self.document.doctype.entities._seq[-1] + + def notation_decl_handler(self, notationName, base, systemId, publicId): + node = self.document._create_notation(notationName, publicId, systemId) + self.document.doctype.notations._seq.append(node) + if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT: + del self.document.doctype.notations._seq[-1] + + def comment_handler(self, data): + node = self.document.createComment(data) + _append_child(self.curNode, node) + if self._filter and self._filter.acceptNode(node) == FILTER_REJECT: + self.curNode.removeChild(node) + + def start_cdata_section_handler(self): + self._cdata = True + self._cdata_continue = False + + def end_cdata_section_handler(self): + self._cdata = False + self._cdata_continue = False + + def external_entity_ref_handler(self, context, base, systemId, publicId): + return 1 + + def first_element_handler(self, name, attributes): + if self._filter is None and not self._elem_info: + self._finish_end_element = id + self.getParser().StartElementHandler = self.start_element_handler + self.start_element_handler(name, attributes) + + def start_element_handler(self, name, attributes): + node = self.document.createElement(name) + _append_child(self.curNode, node) + self.curNode = node + + if attributes: + for i in range(0, len(attributes), 2): + a = minidom.Attr(attributes[i], EMPTY_NAMESPACE, + None, EMPTY_PREFIX) + value = attributes[i+1] + a.value = value + a.ownerDocument = self.document + _set_attribute_node(node, a) + + if node is not self.document.documentElement: + self._finish_start_element(node) + + def _finish_start_element(self, node): + if self._filter: + # To be general, we'd have to call isSameNode(), but this + # is sufficient for minidom: + if node is self.document.documentElement: + return + filt = self._filter.startContainer(node) + if filt == FILTER_REJECT: + # ignore this node & all descendents + Rejecter(self) + elif filt == FILTER_SKIP: + # ignore this node, but make it's children become + # children of the parent node + Skipper(self) + else: + return + self.curNode = node.parentNode + node.parentNode.removeChild(node) + node.unlink() + + # If this ever changes, Namespaces.end_element_handler() needs to + # be changed to match. + # + def end_element_handler(self, name): + curNode = self.curNode + self.curNode = curNode.parentNode + self._finish_end_element(curNode) + + def _finish_end_element(self, curNode): + info = self._elem_info.get(curNode.tagName) + if info: + self._handle_white_text_nodes(curNode, info) + if self._filter: + if curNode is self.document.documentElement: + return + if self._filter.acceptNode(curNode) == FILTER_REJECT: + self.curNode.removeChild(curNode) + curNode.unlink() + + def _handle_white_text_nodes(self, node, info): + if (self._options.whitespace_in_element_content + or not info.isElementContent()): + return + + # We have element type information and should remove ignorable + # whitespace; identify for text nodes which contain only + # whitespace. + L = [] + for child in node.childNodes: + if child.nodeType == TEXT_NODE and not child.data.strip(): + L.append(child) + + # Remove ignorable whitespace from the tree. + for child in L: + node.removeChild(child) + + def element_decl_handler(self, name, model): + info = self._elem_info.get(name) + if info is None: + self._elem_info[name] = ElementInfo(name, model) + else: + assert info._model is None + info._model = model + + def attlist_decl_handler(self, elem, name, type, default, required): + info = self._elem_info.get(elem) + if info is None: + info = ElementInfo(elem) + self._elem_info[elem] = info + info._attr_info.append( + [None, name, None, None, default, 0, type, required]) + + def xml_decl_handler(self, version, encoding, standalone): + self.document.version = version + self.document.encoding = encoding + # This is still a little ugly, thanks to the pyexpat API. ;-( + if standalone >= 0: + if standalone: + self.document.standalone = True + else: + self.document.standalone = False + + +# Don't include FILTER_INTERRUPT, since that's checked separately +# where allowed. +_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP) + +class FilterVisibilityController(object): + """Wrapper around a DOMBuilderFilter which implements the checks + to make the whatToShow filter attribute work.""" + + __slots__ = 'filter', + + def __init__(self, filter): + self.filter = filter + + def startContainer(self, node): + mask = self._nodetype_mask[node.nodeType] + if self.filter.whatToShow & mask: + val = self.filter.startContainer(node) + if val == FILTER_INTERRUPT: + raise ParseEscape + if val not in _ALLOWED_FILTER_RETURNS: + raise ValueError( + "startContainer() returned illegal value: " + repr(val)) + return val + else: + return FILTER_ACCEPT + + def acceptNode(self, node): + mask = self._nodetype_mask[node.nodeType] + if self.filter.whatToShow & mask: + val = self.filter.acceptNode(node) + if val == FILTER_INTERRUPT: + raise ParseEscape + if val == FILTER_SKIP: + # move all child nodes to the parent, and remove this node + parent = node.parentNode + for child in node.childNodes[:]: + parent.appendChild(child) + # node is handled by the caller + return FILTER_REJECT + if val not in _ALLOWED_FILTER_RETURNS: + raise ValueError( + "acceptNode() returned illegal value: " + repr(val)) + return val + else: + return FILTER_ACCEPT + + _nodetype_mask = { + Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT, + Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE, + Node.TEXT_NODE: NodeFilter.SHOW_TEXT, + Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION, + Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE, + Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY, + Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION, + Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT, + Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT, + Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE, + Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT, + Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION, + } + + +class FilterCrutch(object): + __slots__ = '_builder', '_level', '_old_start', '_old_end' + + def __init__(self, builder): + self._level = 0 + self._builder = builder + parser = builder._parser + self._old_start = parser.StartElementHandler + self._old_end = parser.EndElementHandler + parser.StartElementHandler = self.start_element_handler + parser.EndElementHandler = self.end_element_handler + +class Rejecter(FilterCrutch): + __slots__ = () + + def __init__(self, builder): + FilterCrutch.__init__(self, builder) + parser = builder._parser + for name in ("ProcessingInstructionHandler", + "CommentHandler", + "CharacterDataHandler", + "StartCdataSectionHandler", + "EndCdataSectionHandler", + "ExternalEntityRefHandler", + ): + setattr(parser, name, None) + + def start_element_handler(self, *args): + self._level = self._level + 1 + + def end_element_handler(self, *args): + if self._level == 0: + # restore the old handlers + parser = self._builder._parser + self._builder.install(parser) + parser.StartElementHandler = self._old_start + parser.EndElementHandler = self._old_end + else: + self._level = self._level - 1 + +class Skipper(FilterCrutch): + __slots__ = () + + def start_element_handler(self, *args): + node = self._builder.curNode + self._old_start(*args) + if self._builder.curNode is not node: + self._level = self._level + 1 + + def end_element_handler(self, *args): + if self._level == 0: + # We're popping back out of the node we're skipping, so we + # shouldn't need to do anything but reset the handlers. + self._builder._parser.StartElementHandler = self._old_start + self._builder._parser.EndElementHandler = self._old_end + self._builder = None + else: + self._level = self._level - 1 + self._old_end(*args) + + +# framework document used by the fragment builder. +# Takes a string for the doctype, subset string, and namespace attrs string. + +_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \ + "http://xml.python.org/entities/fragment-builder/internal" + +_FRAGMENT_BUILDER_TEMPLATE = ( + '''\ + +%%s +]> +&fragment-builder-internal;''' + % _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID) + + +class FragmentBuilder(ExpatBuilder): + """Builder which constructs document fragments given XML source + text and a context node. + + The context node is expected to provide information about the + namespace declarations which are in scope at the start of the + fragment. + """ + + def __init__(self, context, options=None): + if context.nodeType == DOCUMENT_NODE: + self.originalDocument = context + self.context = context + else: + self.originalDocument = context.ownerDocument + self.context = context + ExpatBuilder.__init__(self, options) + + def reset(self): + ExpatBuilder.reset(self) + self.fragment = None + + def parseFile(self, file): + """Parse a document fragment from a file object, returning the + fragment node.""" + return self.parseString(file.read()) + + def parseString(self, string): + """Parse a document fragment from a string, returning the + fragment node.""" + self._source = string + parser = self.getParser() + doctype = self.originalDocument.doctype + ident = "" + if doctype: + subset = doctype.internalSubset or self._getDeclarations() + if doctype.publicId: + ident = ('PUBLIC "%s" "%s"' + % (doctype.publicId, doctype.systemId)) + elif doctype.systemId: + ident = 'SYSTEM "%s"' % doctype.systemId + else: + subset = "" + nsattrs = self._getNSattrs() # get ns decls from node's ancestors + document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs) + try: + parser.Parse(document, True) + except: + self.reset() + raise + fragment = self.fragment + self.reset() +## self._parser = None + return fragment + + def _getDeclarations(self): + """Re-create the internal subset from the DocumentType node. + + This is only needed if we don't already have the + internalSubset as a string. + """ + doctype = self.context.ownerDocument.doctype + s = "" + if doctype: + for i in range(doctype.notations.length): + notation = doctype.notations.item(i) + if s: + s = s + "\n " + s = "%s' \ + % (s, notation.publicId, notation.systemId) + else: + s = '%s SYSTEM "%s">' % (s, notation.systemId) + for i in range(doctype.entities.length): + entity = doctype.entities.item(i) + if s: + s = s + "\n " + s = "%s" + return s + + def _getNSattrs(self): + return "" + + def external_entity_ref_handler(self, context, base, systemId, publicId): + if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID: + # this entref is the one that we made to put the subtree + # in; all of our given input is parsed in here. + old_document = self.document + old_cur_node = self.curNode + parser = self._parser.ExternalEntityParserCreate(context) + # put the real document back, parse into the fragment to return + self.document = self.originalDocument + self.fragment = self.document.createDocumentFragment() + self.curNode = self.fragment + try: + parser.Parse(self._source, True) + finally: + self.curNode = old_cur_node + self.document = old_document + self._source = None + return -1 + else: + return ExpatBuilder.external_entity_ref_handler( + self, context, base, systemId, publicId) + + +class Namespaces: + """Mix-in class for builders; adds support for namespaces.""" + + def _initNamespaces(self): + # list of (prefix, uri) ns declarations. Namespace attrs are + # constructed from this and added to the element's attrs. + self._ns_ordered_prefixes = [] + + def createParser(self): + """Create a new namespace-handling parser.""" + parser = expat.ParserCreate(namespace_separator=" ") + parser.namespace_prefixes = True + return parser + + def install(self, parser): + """Insert the namespace-handlers onto the parser.""" + ExpatBuilder.install(self, parser) + if self._options.namespace_declarations: + parser.StartNamespaceDeclHandler = ( + self.start_namespace_decl_handler) + + def start_namespace_decl_handler(self, prefix, uri): + """Push this namespace declaration on our storage.""" + self._ns_ordered_prefixes.append((prefix, uri)) + + def start_element_handler(self, name, attributes): + if ' ' in name: + uri, localname, prefix, qname = _parse_ns_name(self, name) + else: + uri = EMPTY_NAMESPACE + qname = name + localname = None + prefix = EMPTY_PREFIX + node = minidom.Element(qname, uri, prefix, localname) + node.ownerDocument = self.document + _append_child(self.curNode, node) + self.curNode = node + + if self._ns_ordered_prefixes: + for prefix, uri in self._ns_ordered_prefixes: + if prefix: + a = minidom.Attr(_intern(self, 'xmlns:' + prefix), + XMLNS_NAMESPACE, prefix, "xmlns") + else: + a = minidom.Attr("xmlns", XMLNS_NAMESPACE, + "xmlns", EMPTY_PREFIX) + a.value = uri + a.ownerDocument = self.document + _set_attribute_node(node, a) + del self._ns_ordered_prefixes[:] + + if attributes: + node._ensure_attributes() + _attrs = node._attrs + _attrsNS = node._attrsNS + for i in range(0, len(attributes), 2): + aname = attributes[i] + value = attributes[i+1] + if ' ' in aname: + uri, localname, prefix, qname = _parse_ns_name(self, aname) + a = minidom.Attr(qname, uri, localname, prefix) + _attrs[qname] = a + _attrsNS[(uri, localname)] = a + else: + a = minidom.Attr(aname, EMPTY_NAMESPACE, + aname, EMPTY_PREFIX) + _attrs[aname] = a + _attrsNS[(EMPTY_NAMESPACE, aname)] = a + a.ownerDocument = self.document + a.value = value + a.ownerElement = node + + if __debug__: + # This only adds some asserts to the original + # end_element_handler(), so we only define this when -O is not + # used. If changing one, be sure to check the other to see if + # it needs to be changed as well. + # + def end_element_handler(self, name): + curNode = self.curNode + if ' ' in name: + uri, localname, prefix, qname = _parse_ns_name(self, name) + assert (curNode.namespaceURI == uri + and curNode.localName == localname + and curNode.prefix == prefix), \ + "element stack messed up! (namespace)" + else: + assert curNode.nodeName == name, \ + "element stack messed up - bad nodeName" + assert curNode.namespaceURI == EMPTY_NAMESPACE, \ + "element stack messed up - bad namespaceURI" + self.curNode = curNode.parentNode + self._finish_end_element(curNode) + + +class ExpatBuilderNS(Namespaces, ExpatBuilder): + """Document builder that supports namespaces.""" + + def reset(self): + ExpatBuilder.reset(self) + self._initNamespaces() + + +class FragmentBuilderNS(Namespaces, FragmentBuilder): + """Fragment builder that supports namespaces.""" + + def reset(self): + FragmentBuilder.reset(self) + self._initNamespaces() + + def _getNSattrs(self): + """Return string of namespace attributes from this element and + ancestors.""" + # XXX This needs to be re-written to walk the ancestors of the + # context to build up the namespace information from + # declarations, elements, and attributes found in context. + # Otherwise we have to store a bunch more data on the DOM + # (though that *might* be more reliable -- not clear). + attrs = "" + context = self.context + L = [] + while context: + if hasattr(context, '_ns_prefix_uri'): + for prefix, uri in context._ns_prefix_uri.items(): + # add every new NS decl from context to L and attrs string + if prefix in L: + continue + L.append(prefix) + if prefix: + declname = "xmlns:" + prefix + else: + declname = "xmlns" + if attrs: + attrs = "%s\n %s='%s'" % (attrs, declname, uri) + else: + attrs = " %s='%s'" % (declname, uri) + context = context.parentNode + return attrs + + +class ParseEscape(Exception): + """Exception raised to short-circuit parsing in InternalSubsetExtractor.""" + pass + +class InternalSubsetExtractor(ExpatBuilder): + """XML processor which can rip out the internal document type subset.""" + + subset = None + + def getSubset(self): + """Return the internal subset as a string.""" + return self.subset + + def parseFile(self, file): + try: + ExpatBuilder.parseFile(self, file) + except ParseEscape: + pass + + def parseString(self, string): + try: + ExpatBuilder.parseString(self, string) + except ParseEscape: + pass + + def install(self, parser): + parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler + parser.StartElementHandler = self.start_element_handler + + def start_doctype_decl_handler(self, name, publicId, systemId, + has_internal_subset): + if has_internal_subset: + parser = self.getParser() + self.subset = [] + parser.DefaultHandler = self.subset.append + parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler + else: + raise ParseEscape() + + def end_doctype_decl_handler(self): + s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n') + self.subset = s + raise ParseEscape() + + def start_element_handler(self, name, attrs): + raise ParseEscape() + + +def parse(file, namespaces=True): + """Parse a document, returning the resulting Document node. + + 'file' may be either a file name or an open file object. + """ + if namespaces: + builder = ExpatBuilderNS() + else: + builder = ExpatBuilder() + + if isinstance(file, str): + with open(file, 'rb') as fp: + result = builder.parseFile(fp) + else: + result = builder.parseFile(file) + return result + + +def parseString(string, namespaces=True): + """Parse a document from a string, returning the resulting + Document node. + """ + if namespaces: + builder = ExpatBuilderNS() + else: + builder = ExpatBuilder() + return builder.parseString(string) + + +def parseFragment(file, context, namespaces=True): + """Parse a fragment of a document, given the context from which it + was originally extracted. context should be the parent of the + node(s) which are in the fragment. + + 'file' may be either a file name or an open file object. + """ + if namespaces: + builder = FragmentBuilderNS(context) + else: + builder = FragmentBuilder(context) + + if isinstance(file, str): + with open(file, 'rb') as fp: + result = builder.parseFile(fp) + else: + result = builder.parseFile(file) + return result + + +def parseFragmentString(string, context, namespaces=True): + """Parse a fragment of a document from a string, given the context + from which it was originally extracted. context should be the + parent of the node(s) which are in the fragment. + """ + if namespaces: + builder = FragmentBuilderNS(context) + else: + builder = FragmentBuilder(context) + return builder.parseString(string) + + +def makeBuilder(options): + """Create a builder based on an Options object.""" + if options.namespaces: + return ExpatBuilderNS(options) + else: + return ExpatBuilder(options) diff --git a/defaults/lib/x/dom/minicompat.py b/defaults/lib/x/dom/minicompat.py new file mode 100644 index 0000000..5d6fae9 --- /dev/null +++ b/defaults/lib/x/dom/minicompat.py @@ -0,0 +1,109 @@ +"""Python version compatibility support for minidom. + +This module contains internal implementation details and +should not be imported; use xml.dom.minidom instead. +""" + +# This module should only be imported using "import *". +# +# The following names are defined: +# +# NodeList -- lightest possible NodeList implementation +# +# EmptyNodeList -- lightest possible NodeList that is guaranteed to +# remain empty (immutable) +# +# StringTypes -- tuple of defined string types +# +# defproperty -- function used in conjunction with GetattrMagic; +# using these together is needed to make them work +# as efficiently as possible in both Python 2.2+ +# and older versions. For example: +# +# class MyClass(GetattrMagic): +# def _get_myattr(self): +# return something +# +# defproperty(MyClass, "myattr", +# "return some value") +# +# For Python 2.2 and newer, this will construct a +# property object on the class, which avoids +# needing to override __getattr__(). It will only +# work for read-only attributes. +# +# For older versions of Python, inheriting from +# GetattrMagic will use the traditional +# __getattr__() hackery to achieve the same effect, +# but less efficiently. +# +# defproperty() should be used for each version of +# the relevant _get_() function. + +__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"] + +import xml.dom + +StringTypes = (str,) + + +class NodeList(list): + __slots__ = () + + def item(self, index): + if 0 <= index < len(self): + return self[index] + + def _get_length(self): + return len(self) + + def _set_length(self, value): + raise xml.dom.NoModificationAllowedErr( + "attempt to modify read-only attribute 'length'") + + length = property(_get_length, _set_length, + doc="The number of nodes in the NodeList.") + + # For backward compatibility + def __setstate__(self, state): + if state is None: + state = [] + self[:] = state + + +class EmptyNodeList(tuple): + __slots__ = () + + def __add__(self, other): + NL = NodeList() + NL.extend(other) + return NL + + def __radd__(self, other): + NL = NodeList() + NL.extend(other) + return NL + + def item(self, index): + return None + + def _get_length(self): + return 0 + + def _set_length(self, value): + raise xml.dom.NoModificationAllowedErr( + "attempt to modify read-only attribute 'length'") + + length = property(_get_length, _set_length, + doc="The number of nodes in the NodeList.") + + +def defproperty(klass, name, doc): + get = getattr(klass, ("_get_" + name)) + def set(self, value, name=name): + raise xml.dom.NoModificationAllowedErr( + "attempt to modify read-only attribute " + repr(name)) + assert not hasattr(klass, "_set_" + name), \ + "expected not to find _set_" + name + prop = property(get, set, doc=doc) + setattr(klass, name, prop) diff --git a/defaults/lib/x/dom/minidom.py b/defaults/lib/x/dom/minidom.py new file mode 100644 index 0000000..ef8a159 --- /dev/null +++ b/defaults/lib/x/dom/minidom.py @@ -0,0 +1,2013 @@ +"""Simple implementation of the Level 1 DOM. + +Namespaces and other minor Level 2 features are also supported. + +parse("foo.xml") + +parseString("") + +Todo: +===== + * convenience methods for getting elements and text. + * more testing + * bring some of the writer and linearizer code into conformance with this + interface + * SAX 2 namespaces +""" + +import io +import xml.dom + +from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg +from xml.dom.minicompat import * +from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS + +# This is used by the ID-cache invalidation checks; the list isn't +# actually complete, since the nodes being checked will never be the +# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is +# the node being added or removed, not the node being modified.) +# +_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE, + xml.dom.Node.ENTITY_REFERENCE_NODE) + + +class Node(xml.dom.Node): + namespaceURI = None # this is non-null only for elements and attributes + parentNode = None + ownerDocument = None + nextSibling = None + previousSibling = None + + prefix = EMPTY_PREFIX # non-null only for NS elements and attributes + + def __bool__(self): + return True + + def toxml(self, encoding=None, standalone=None): + return self.toprettyxml("", "", encoding, standalone) + + def toprettyxml(self, indent="\t", newl="\n", encoding=None, + standalone=None): + if encoding is None: + writer = io.StringIO() + else: + writer = io.TextIOWrapper(io.BytesIO(), + encoding=encoding, + errors="xmlcharrefreplace", + newline='\n') + if self.nodeType == Node.DOCUMENT_NODE: + # Can pass encoding only to document, to put it into XML header + self.writexml(writer, "", indent, newl, encoding, standalone) + else: + self.writexml(writer, "", indent, newl) + if encoding is None: + return writer.getvalue() + else: + return writer.detach().getvalue() + + def hasChildNodes(self): + return bool(self.childNodes) + + def _get_childNodes(self): + return self.childNodes + + def _get_firstChild(self): + if self.childNodes: + return self.childNodes[0] + + def _get_lastChild(self): + if self.childNodes: + return self.childNodes[-1] + + def insertBefore(self, newChild, refChild): + if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE: + for c in tuple(newChild.childNodes): + self.insertBefore(c, refChild) + ### The DOM does not clearly specify what to return in this case + return newChild + if newChild.nodeType not in self._child_node_types: + raise xml.dom.HierarchyRequestErr( + "%s cannot be child of %s" % (repr(newChild), repr(self))) + if newChild.parentNode is not None: + newChild.parentNode.removeChild(newChild) + if refChild is None: + self.appendChild(newChild) + else: + try: + index = self.childNodes.index(refChild) + except ValueError: + raise xml.dom.NotFoundErr() + if newChild.nodeType in _nodeTypes_with_children: + _clear_id_cache(self) + self.childNodes.insert(index, newChild) + newChild.nextSibling = refChild + refChild.previousSibling = newChild + if index: + node = self.childNodes[index-1] + node.nextSibling = newChild + newChild.previousSibling = node + else: + newChild.previousSibling = None + newChild.parentNode = self + return newChild + + def appendChild(self, node): + if node.nodeType == self.DOCUMENT_FRAGMENT_NODE: + for c in tuple(node.childNodes): + self.appendChild(c) + ### The DOM does not clearly specify what to return in this case + return node + if node.nodeType not in self._child_node_types: + raise xml.dom.HierarchyRequestErr( + "%s cannot be child of %s" % (repr(node), repr(self))) + elif node.nodeType in _nodeTypes_with_children: + _clear_id_cache(self) + if node.parentNode is not None: + node.parentNode.removeChild(node) + _append_child(self, node) + node.nextSibling = None + return node + + def replaceChild(self, newChild, oldChild): + if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE: + refChild = oldChild.nextSibling + self.removeChild(oldChild) + return self.insertBefore(newChild, refChild) + if newChild.nodeType not in self._child_node_types: + raise xml.dom.HierarchyRequestErr( + "%s cannot be child of %s" % (repr(newChild), repr(self))) + if newChild is oldChild: + return + if newChild.parentNode is not None: + newChild.parentNode.removeChild(newChild) + try: + index = self.childNodes.index(oldChild) + except ValueError: + raise xml.dom.NotFoundErr() + self.childNodes[index] = newChild + newChild.parentNode = self + oldChild.parentNode = None + if (newChild.nodeType in _nodeTypes_with_children + or oldChild.nodeType in _nodeTypes_with_children): + _clear_id_cache(self) + newChild.nextSibling = oldChild.nextSibling + newChild.previousSibling = oldChild.previousSibling + oldChild.nextSibling = None + oldChild.previousSibling = None + if newChild.previousSibling: + newChild.previousSibling.nextSibling = newChild + if newChild.nextSibling: + newChild.nextSibling.previousSibling = newChild + return oldChild + + def removeChild(self, oldChild): + try: + self.childNodes.remove(oldChild) + except ValueError: + raise xml.dom.NotFoundErr() + if oldChild.nextSibling is not None: + oldChild.nextSibling.previousSibling = oldChild.previousSibling + if oldChild.previousSibling is not None: + oldChild.previousSibling.nextSibling = oldChild.nextSibling + oldChild.nextSibling = oldChild.previousSibling = None + if oldChild.nodeType in _nodeTypes_with_children: + _clear_id_cache(self) + + oldChild.parentNode = None + return oldChild + + def normalize(self): + L = [] + for child in self.childNodes: + if child.nodeType == Node.TEXT_NODE: + if not child.data: + # empty text node; discard + if L: + L[-1].nextSibling = child.nextSibling + if child.nextSibling: + child.nextSibling.previousSibling = child.previousSibling + child.unlink() + elif L and L[-1].nodeType == child.nodeType: + # collapse text node + node = L[-1] + node.data = node.data + child.data + node.nextSibling = child.nextSibling + if child.nextSibling: + child.nextSibling.previousSibling = node + child.unlink() + else: + L.append(child) + else: + L.append(child) + if child.nodeType == Node.ELEMENT_NODE: + child.normalize() + self.childNodes[:] = L + + def cloneNode(self, deep): + return _clone_node(self, deep, self.ownerDocument or self) + + def isSupported(self, feature, version): + return self.ownerDocument.implementation.hasFeature(feature, version) + + def _get_localName(self): + # Overridden in Element and Attr where localName can be Non-Null + return None + + # Node interfaces from Level 3 (WD 9 April 2002) + + def isSameNode(self, other): + return self is other + + def getInterface(self, feature): + if self.isSupported(feature, None): + return self + else: + return None + + # The "user data" functions use a dictionary that is only present + # if some user data has been set, so be careful not to assume it + # exists. + + def getUserData(self, key): + try: + return self._user_data[key][0] + except (AttributeError, KeyError): + return None + + def setUserData(self, key, data, handler): + old = None + try: + d = self._user_data + except AttributeError: + d = {} + self._user_data = d + if key in d: + old = d[key][0] + if data is None: + # ignore handlers passed for None + handler = None + if old is not None: + del d[key] + else: + d[key] = (data, handler) + return old + + def _call_user_data_handler(self, operation, src, dst): + if hasattr(self, "_user_data"): + for key, (data, handler) in list(self._user_data.items()): + if handler is not None: + handler.handle(operation, key, data, src, dst) + + # minidom-specific API: + + def unlink(self): + self.parentNode = self.ownerDocument = None + if self.childNodes: + for child in self.childNodes: + child.unlink() + self.childNodes = NodeList() + self.previousSibling = None + self.nextSibling = None + + # A Node is its own context manager, to ensure that an unlink() call occurs. + # This is similar to how a file object works. + def __enter__(self): + return self + + def __exit__(self, et, ev, tb): + self.unlink() + +defproperty(Node, "firstChild", doc="First child node, or None.") +defproperty(Node, "lastChild", doc="Last child node, or None.") +defproperty(Node, "localName", doc="Namespace-local name of this node.") + + +def _append_child(self, node): + # fast path with less checks; usable by DOM builders if careful + childNodes = self.childNodes + if childNodes: + last = childNodes[-1] + node.previousSibling = last + last.nextSibling = node + childNodes.append(node) + node.parentNode = self + +def _in_document(node): + # return True iff node is part of a document tree + while node is not None: + if node.nodeType == Node.DOCUMENT_NODE: + return True + node = node.parentNode + return False + +def _write_data(writer, data): + "Writes datachars to writer." + if data: + data = data.replace("&", "&").replace("<", "<"). \ + replace("\"", """).replace(">", ">") + writer.write(data) + +def _get_elements_by_tagName_helper(parent, name, rc): + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE and \ + (name == "*" or node.tagName == name): + rc.append(node) + _get_elements_by_tagName_helper(node, name, rc) + return rc + +def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc): + for node in parent.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + if ((localName == "*" or node.localName == localName) and + (nsURI == "*" or node.namespaceURI == nsURI)): + rc.append(node) + _get_elements_by_tagName_ns_helper(node, nsURI, localName, rc) + return rc + +class DocumentFragment(Node): + nodeType = Node.DOCUMENT_FRAGMENT_NODE + nodeName = "#document-fragment" + nodeValue = None + attributes = None + parentNode = None + _child_node_types = (Node.ELEMENT_NODE, + Node.TEXT_NODE, + Node.CDATA_SECTION_NODE, + Node.ENTITY_REFERENCE_NODE, + Node.PROCESSING_INSTRUCTION_NODE, + Node.COMMENT_NODE, + Node.NOTATION_NODE) + + def __init__(self): + self.childNodes = NodeList() + + +class Attr(Node): + __slots__=('_name', '_value', 'namespaceURI', + '_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement') + nodeType = Node.ATTRIBUTE_NODE + attributes = None + specified = False + _is_id = False + + _child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE) + + def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None, + prefix=None): + self.ownerElement = None + self._name = qName + self.namespaceURI = namespaceURI + self._prefix = prefix + if localName is not None: + self._localName = localName + self.childNodes = NodeList() + + # Add the single child node that represents the value of the attr + self.childNodes.append(Text()) + + # nodeValue and value are set elsewhere + + def _get_localName(self): + try: + return self._localName + except AttributeError: + return self.nodeName.split(":", 1)[-1] + + def _get_specified(self): + return self.specified + + def _get_name(self): + return self._name + + def _set_name(self, value): + self._name = value + if self.ownerElement is not None: + _clear_id_cache(self.ownerElement) + + nodeName = name = property(_get_name, _set_name) + + def _get_value(self): + return self._value + + def _set_value(self, value): + self._value = value + self.childNodes[0].data = value + if self.ownerElement is not None: + _clear_id_cache(self.ownerElement) + self.childNodes[0].data = value + + nodeValue = value = property(_get_value, _set_value) + + def _get_prefix(self): + return self._prefix + + def _set_prefix(self, prefix): + nsuri = self.namespaceURI + if prefix == "xmlns": + if nsuri and nsuri != XMLNS_NAMESPACE: + raise xml.dom.NamespaceErr( + "illegal use of 'xmlns' prefix for the wrong namespace") + self._prefix = prefix + if prefix is None: + newName = self.localName + else: + newName = "%s:%s" % (prefix, self.localName) + if self.ownerElement: + _clear_id_cache(self.ownerElement) + self.name = newName + + prefix = property(_get_prefix, _set_prefix) + + def unlink(self): + # This implementation does not call the base implementation + # since most of that is not needed, and the expense of the + # method call is not warranted. We duplicate the removal of + # children, but that's all we needed from the base class. + elem = self.ownerElement + if elem is not None: + del elem._attrs[self.nodeName] + del elem._attrsNS[(self.namespaceURI, self.localName)] + if self._is_id: + self._is_id = False + elem._magic_id_nodes -= 1 + self.ownerDocument._magic_id_count -= 1 + for child in self.childNodes: + child.unlink() + del self.childNodes[:] + + def _get_isId(self): + if self._is_id: + return True + doc = self.ownerDocument + elem = self.ownerElement + if doc is None or elem is None: + return False + + info = doc._get_elem_info(elem) + if info is None: + return False + if self.namespaceURI: + return info.isIdNS(self.namespaceURI, self.localName) + else: + return info.isId(self.nodeName) + + def _get_schemaType(self): + doc = self.ownerDocument + elem = self.ownerElement + if doc is None or elem is None: + return _no_type + + info = doc._get_elem_info(elem) + if info is None: + return _no_type + if self.namespaceURI: + return info.getAttributeTypeNS(self.namespaceURI, self.localName) + else: + return info.getAttributeType(self.nodeName) + +defproperty(Attr, "isId", doc="True if this attribute is an ID.") +defproperty(Attr, "localName", doc="Namespace-local name of this attribute.") +defproperty(Attr, "schemaType", doc="Schema type for this attribute.") + + +class NamedNodeMap(object): + """The attribute list is a transient interface to the underlying + dictionaries. Mutations here will change the underlying element's + dictionary. + + Ordering is imposed artificially and does not reflect the order of + attributes as found in an input document. + """ + + __slots__ = ('_attrs', '_attrsNS', '_ownerElement') + + def __init__(self, attrs, attrsNS, ownerElement): + self._attrs = attrs + self._attrsNS = attrsNS + self._ownerElement = ownerElement + + def _get_length(self): + return len(self._attrs) + + def item(self, index): + try: + return self[list(self._attrs.keys())[index]] + except IndexError: + return None + + def items(self): + L = [] + for node in self._attrs.values(): + L.append((node.nodeName, node.value)) + return L + + def itemsNS(self): + L = [] + for node in self._attrs.values(): + L.append(((node.namespaceURI, node.localName), node.value)) + return L + + def __contains__(self, key): + if isinstance(key, str): + return key in self._attrs + else: + return key in self._attrsNS + + def keys(self): + return self._attrs.keys() + + def keysNS(self): + return self._attrsNS.keys() + + def values(self): + return self._attrs.values() + + def get(self, name, value=None): + return self._attrs.get(name, value) + + __len__ = _get_length + + def _cmp(self, other): + if self._attrs is getattr(other, "_attrs", None): + return 0 + else: + return (id(self) > id(other)) - (id(self) < id(other)) + + def __eq__(self, other): + return self._cmp(other) == 0 + + def __ge__(self, other): + return self._cmp(other) >= 0 + + def __gt__(self, other): + return self._cmp(other) > 0 + + def __le__(self, other): + return self._cmp(other) <= 0 + + def __lt__(self, other): + return self._cmp(other) < 0 + + def __getitem__(self, attname_or_tuple): + if isinstance(attname_or_tuple, tuple): + return self._attrsNS[attname_or_tuple] + else: + return self._attrs[attname_or_tuple] + + # same as set + def __setitem__(self, attname, value): + if isinstance(value, str): + try: + node = self._attrs[attname] + except KeyError: + node = Attr(attname) + node.ownerDocument = self._ownerElement.ownerDocument + self.setNamedItem(node) + node.value = value + else: + if not isinstance(value, Attr): + raise TypeError("value must be a string or Attr object") + node = value + self.setNamedItem(node) + + def getNamedItem(self, name): + try: + return self._attrs[name] + except KeyError: + return None + + def getNamedItemNS(self, namespaceURI, localName): + try: + return self._attrsNS[(namespaceURI, localName)] + except KeyError: + return None + + def removeNamedItem(self, name): + n = self.getNamedItem(name) + if n is not None: + _clear_id_cache(self._ownerElement) + del self._attrs[n.nodeName] + del self._attrsNS[(n.namespaceURI, n.localName)] + if hasattr(n, 'ownerElement'): + n.ownerElement = None + return n + else: + raise xml.dom.NotFoundErr() + + def removeNamedItemNS(self, namespaceURI, localName): + n = self.getNamedItemNS(namespaceURI, localName) + if n is not None: + _clear_id_cache(self._ownerElement) + del self._attrsNS[(n.namespaceURI, n.localName)] + del self._attrs[n.nodeName] + if hasattr(n, 'ownerElement'): + n.ownerElement = None + return n + else: + raise xml.dom.NotFoundErr() + + def setNamedItem(self, node): + if not isinstance(node, Attr): + raise xml.dom.HierarchyRequestErr( + "%s cannot be child of %s" % (repr(node), repr(self))) + old = self._attrs.get(node.name) + if old: + old.unlink() + self._attrs[node.name] = node + self._attrsNS[(node.namespaceURI, node.localName)] = node + node.ownerElement = self._ownerElement + _clear_id_cache(node.ownerElement) + return old + + def setNamedItemNS(self, node): + return self.setNamedItem(node) + + def __delitem__(self, attname_or_tuple): + node = self[attname_or_tuple] + _clear_id_cache(node.ownerElement) + node.unlink() + + def __getstate__(self): + return self._attrs, self._attrsNS, self._ownerElement + + def __setstate__(self, state): + self._attrs, self._attrsNS, self._ownerElement = state + +defproperty(NamedNodeMap, "length", + doc="Number of nodes in the NamedNodeMap.") + +AttributeList = NamedNodeMap + + +class TypeInfo(object): + __slots__ = 'namespace', 'name' + + def __init__(self, namespace, name): + self.namespace = namespace + self.name = name + + def __repr__(self): + if self.namespace: + return "<%s %r (from %r)>" % (self.__class__.__name__, self.name, + self.namespace) + else: + return "<%s %r>" % (self.__class__.__name__, self.name) + + def _get_name(self): + return self.name + + def _get_namespace(self): + return self.namespace + +_no_type = TypeInfo(None, None) + +class Element(Node): + __slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix', + 'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS', + 'nextSibling', 'previousSibling') + nodeType = Node.ELEMENT_NODE + nodeValue = None + schemaType = _no_type + + _magic_id_nodes = 0 + + _child_node_types = (Node.ELEMENT_NODE, + Node.PROCESSING_INSTRUCTION_NODE, + Node.COMMENT_NODE, + Node.TEXT_NODE, + Node.CDATA_SECTION_NODE, + Node.ENTITY_REFERENCE_NODE) + + def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None, + localName=None): + self.parentNode = None + self.tagName = self.nodeName = tagName + self.prefix = prefix + self.namespaceURI = namespaceURI + self.childNodes = NodeList() + self.nextSibling = self.previousSibling = None + + # Attribute dictionaries are lazily created + # attributes are double-indexed: + # tagName -> Attribute + # URI,localName -> Attribute + # in the future: consider lazy generation + # of attribute objects this is too tricky + # for now because of headaches with + # namespaces. + self._attrs = None + self._attrsNS = None + + def _ensure_attributes(self): + if self._attrs is None: + self._attrs = {} + self._attrsNS = {} + + def _get_localName(self): + try: + return self._localName + except AttributeError: + return self.tagName.split(":", 1)[-1] + + def _get_tagName(self): + return self.tagName + + def unlink(self): + if self._attrs is not None: + for attr in list(self._attrs.values()): + attr.unlink() + self._attrs = None + self._attrsNS = None + Node.unlink(self) + + def getAttribute(self, attname): + """Returns the value of the specified attribute. + + Returns the value of the element's attribute named attname as + a string. An empty string is returned if the element does not + have such an attribute. Note that an empty string may also be + returned as an explicitly given attribute value, use the + hasAttribute method to distinguish these two cases. + """ + if self._attrs is None: + return "" + try: + return self._attrs[attname].value + except KeyError: + return "" + + def getAttributeNS(self, namespaceURI, localName): + if self._attrsNS is None: + return "" + try: + return self._attrsNS[(namespaceURI, localName)].value + except KeyError: + return "" + + def setAttribute(self, attname, value): + attr = self.getAttributeNode(attname) + if attr is None: + attr = Attr(attname) + attr.value = value # also sets nodeValue + attr.ownerDocument = self.ownerDocument + self.setAttributeNode(attr) + elif value != attr.value: + attr.value = value + if attr.isId: + _clear_id_cache(self) + + def setAttributeNS(self, namespaceURI, qualifiedName, value): + prefix, localname = _nssplit(qualifiedName) + attr = self.getAttributeNodeNS(namespaceURI, localname) + if attr is None: + attr = Attr(qualifiedName, namespaceURI, localname, prefix) + attr.value = value + attr.ownerDocument = self.ownerDocument + self.setAttributeNode(attr) + else: + if value != attr.value: + attr.value = value + if attr.isId: + _clear_id_cache(self) + if attr.prefix != prefix: + attr.prefix = prefix + attr.nodeName = qualifiedName + + def getAttributeNode(self, attrname): + if self._attrs is None: + return None + return self._attrs.get(attrname) + + def getAttributeNodeNS(self, namespaceURI, localName): + if self._attrsNS is None: + return None + return self._attrsNS.get((namespaceURI, localName)) + + def setAttributeNode(self, attr): + if attr.ownerElement not in (None, self): + raise xml.dom.InuseAttributeErr("attribute node already owned") + self._ensure_attributes() + old1 = self._attrs.get(attr.name, None) + if old1 is not None: + self.removeAttributeNode(old1) + old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None) + if old2 is not None and old2 is not old1: + self.removeAttributeNode(old2) + _set_attribute_node(self, attr) + + if old1 is not attr: + # It might have already been part of this node, in which case + # it doesn't represent a change, and should not be returned. + return old1 + if old2 is not attr: + return old2 + + setAttributeNodeNS = setAttributeNode + + def removeAttribute(self, name): + if self._attrsNS is None: + raise xml.dom.NotFoundErr() + try: + attr = self._attrs[name] + except KeyError: + raise xml.dom.NotFoundErr() + self.removeAttributeNode(attr) + + def removeAttributeNS(self, namespaceURI, localName): + if self._attrsNS is None: + raise xml.dom.NotFoundErr() + try: + attr = self._attrsNS[(namespaceURI, localName)] + except KeyError: + raise xml.dom.NotFoundErr() + self.removeAttributeNode(attr) + + def removeAttributeNode(self, node): + if node is None: + raise xml.dom.NotFoundErr() + try: + self._attrs[node.name] + except KeyError: + raise xml.dom.NotFoundErr() + _clear_id_cache(self) + node.unlink() + # Restore this since the node is still useful and otherwise + # unlinked + node.ownerDocument = self.ownerDocument + return node + + removeAttributeNodeNS = removeAttributeNode + + def hasAttribute(self, name): + """Checks whether the element has an attribute with the specified name. + + Returns True if the element has an attribute with the specified name. + Otherwise, returns False. + """ + if self._attrs is None: + return False + return name in self._attrs + + def hasAttributeNS(self, namespaceURI, localName): + if self._attrsNS is None: + return False + return (namespaceURI, localName) in self._attrsNS + + def getElementsByTagName(self, name): + """Returns all descendant elements with the given tag name. + + Returns the list of all descendant elements (not direct children + only) with the specified tag name. + """ + return _get_elements_by_tagName_helper(self, name, NodeList()) + + def getElementsByTagNameNS(self, namespaceURI, localName): + return _get_elements_by_tagName_ns_helper( + self, namespaceURI, localName, NodeList()) + + def __repr__(self): + return "" % (self.tagName, id(self)) + + def writexml(self, writer, indent="", addindent="", newl=""): + """Write an XML element to a file-like object + + Write the element to the writer object that must provide + a write method (e.g. a file or StringIO object). + """ + # indent = current indentation + # addindent = indentation to add to higher levels + # newl = newline string + writer.write(indent+"<" + self.tagName) + + attrs = self._get_attributes() + + for a_name in attrs.keys(): + writer.write(" %s=\"" % a_name) + _write_data(writer, attrs[a_name].value) + writer.write("\"") + if self.childNodes: + writer.write(">") + if (len(self.childNodes) == 1 and + self.childNodes[0].nodeType in ( + Node.TEXT_NODE, Node.CDATA_SECTION_NODE)): + self.childNodes[0].writexml(writer, '', '', '') + else: + writer.write(newl) + for node in self.childNodes: + node.writexml(writer, indent+addindent, addindent, newl) + writer.write(indent) + writer.write("%s" % (self.tagName, newl)) + else: + writer.write("/>%s"%(newl)) + + def _get_attributes(self): + self._ensure_attributes() + return NamedNodeMap(self._attrs, self._attrsNS, self) + + def hasAttributes(self): + if self._attrs: + return True + else: + return False + + # DOM Level 3 attributes, based on the 22 Oct 2002 draft + + def setIdAttribute(self, name): + idAttr = self.getAttributeNode(name) + self.setIdAttributeNode(idAttr) + + def setIdAttributeNS(self, namespaceURI, localName): + idAttr = self.getAttributeNodeNS(namespaceURI, localName) + self.setIdAttributeNode(idAttr) + + def setIdAttributeNode(self, idAttr): + if idAttr is None or not self.isSameNode(idAttr.ownerElement): + raise xml.dom.NotFoundErr() + if _get_containing_entref(self) is not None: + raise xml.dom.NoModificationAllowedErr() + if not idAttr._is_id: + idAttr._is_id = True + self._magic_id_nodes += 1 + self.ownerDocument._magic_id_count += 1 + _clear_id_cache(self) + +defproperty(Element, "attributes", + doc="NamedNodeMap of attributes on the element.") +defproperty(Element, "localName", + doc="Namespace-local name of this element.") + + +def _set_attribute_node(element, attr): + _clear_id_cache(element) + element._ensure_attributes() + element._attrs[attr.name] = attr + element._attrsNS[(attr.namespaceURI, attr.localName)] = attr + + # This creates a circular reference, but Element.unlink() + # breaks the cycle since the references to the attribute + # dictionaries are tossed. + attr.ownerElement = element + +class Childless: + """Mixin that makes childless-ness easy to implement and avoids + the complexity of the Node methods that deal with children. + """ + __slots__ = () + + attributes = None + childNodes = EmptyNodeList() + firstChild = None + lastChild = None + + def _get_firstChild(self): + return None + + def _get_lastChild(self): + return None + + def appendChild(self, node): + raise xml.dom.HierarchyRequestErr( + self.nodeName + " nodes cannot have children") + + def hasChildNodes(self): + return False + + def insertBefore(self, newChild, refChild): + raise xml.dom.HierarchyRequestErr( + self.nodeName + " nodes do not have children") + + def removeChild(self, oldChild): + raise xml.dom.NotFoundErr( + self.nodeName + " nodes do not have children") + + def normalize(self): + # For childless nodes, normalize() has nothing to do. + pass + + def replaceChild(self, newChild, oldChild): + raise xml.dom.HierarchyRequestErr( + self.nodeName + " nodes do not have children") + + +class ProcessingInstruction(Childless, Node): + nodeType = Node.PROCESSING_INSTRUCTION_NODE + __slots__ = ('target', 'data') + + def __init__(self, target, data): + self.target = target + self.data = data + + # nodeValue is an alias for data + def _get_nodeValue(self): + return self.data + def _set_nodeValue(self, value): + self.data = value + nodeValue = property(_get_nodeValue, _set_nodeValue) + + # nodeName is an alias for target + def _get_nodeName(self): + return self.target + def _set_nodeName(self, value): + self.target = value + nodeName = property(_get_nodeName, _set_nodeName) + + def writexml(self, writer, indent="", addindent="", newl=""): + writer.write("%s%s" % (indent,self.target, self.data, newl)) + + +class CharacterData(Childless, Node): + __slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling') + + def __init__(self): + self.ownerDocument = self.parentNode = None + self.previousSibling = self.nextSibling = None + self._data = '' + Node.__init__(self) + + def _get_length(self): + return len(self.data) + __len__ = _get_length + + def _get_data(self): + return self._data + def _set_data(self, data): + self._data = data + + data = nodeValue = property(_get_data, _set_data) + + def __repr__(self): + data = self.data + if len(data) > 10: + dotdotdot = "..." + else: + dotdotdot = "" + return '' % ( + self.__class__.__name__, data[0:10], dotdotdot) + + def substringData(self, offset, count): + if offset < 0: + raise xml.dom.IndexSizeErr("offset cannot be negative") + if offset >= len(self.data): + raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") + if count < 0: + raise xml.dom.IndexSizeErr("count cannot be negative") + return self.data[offset:offset+count] + + def appendData(self, arg): + self.data = self.data + arg + + def insertData(self, offset, arg): + if offset < 0: + raise xml.dom.IndexSizeErr("offset cannot be negative") + if offset >= len(self.data): + raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") + if arg: + self.data = "%s%s%s" % ( + self.data[:offset], arg, self.data[offset:]) + + def deleteData(self, offset, count): + if offset < 0: + raise xml.dom.IndexSizeErr("offset cannot be negative") + if offset >= len(self.data): + raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") + if count < 0: + raise xml.dom.IndexSizeErr("count cannot be negative") + if count: + self.data = self.data[:offset] + self.data[offset+count:] + + def replaceData(self, offset, count, arg): + if offset < 0: + raise xml.dom.IndexSizeErr("offset cannot be negative") + if offset >= len(self.data): + raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") + if count < 0: + raise xml.dom.IndexSizeErr("count cannot be negative") + if count: + self.data = "%s%s%s" % ( + self.data[:offset], arg, self.data[offset+count:]) + +defproperty(CharacterData, "length", doc="Length of the string data.") + + +class Text(CharacterData): + __slots__ = () + + nodeType = Node.TEXT_NODE + nodeName = "#text" + attributes = None + + def splitText(self, offset): + if offset < 0 or offset > len(self.data): + raise xml.dom.IndexSizeErr("illegal offset value") + newText = self.__class__() + newText.data = self.data[offset:] + newText.ownerDocument = self.ownerDocument + next = self.nextSibling + if self.parentNode and self in self.parentNode.childNodes: + if next is None: + self.parentNode.appendChild(newText) + else: + self.parentNode.insertBefore(newText, next) + self.data = self.data[:offset] + return newText + + def writexml(self, writer, indent="", addindent="", newl=""): + _write_data(writer, "%s%s%s" % (indent, self.data, newl)) + + # DOM Level 3 (WD 9 April 2002) + + def _get_wholeText(self): + L = [self.data] + n = self.previousSibling + while n is not None: + if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + L.insert(0, n.data) + n = n.previousSibling + else: + break + n = self.nextSibling + while n is not None: + if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + L.append(n.data) + n = n.nextSibling + else: + break + return ''.join(L) + + def replaceWholeText(self, content): + # XXX This needs to be seriously changed if minidom ever + # supports EntityReference nodes. + parent = self.parentNode + n = self.previousSibling + while n is not None: + if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + next = n.previousSibling + parent.removeChild(n) + n = next + else: + break + n = self.nextSibling + if not content: + parent.removeChild(self) + while n is not None: + if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + next = n.nextSibling + parent.removeChild(n) + n = next + else: + break + if content: + self.data = content + return self + else: + return None + + def _get_isWhitespaceInElementContent(self): + if self.data.strip(): + return False + elem = _get_containing_element(self) + if elem is None: + return False + info = self.ownerDocument._get_elem_info(elem) + if info is None: + return False + else: + return info.isElementContent() + +defproperty(Text, "isWhitespaceInElementContent", + doc="True iff this text node contains only whitespace" + " and is in element content.") +defproperty(Text, "wholeText", + doc="The text of all logically-adjacent text nodes.") + + +def _get_containing_element(node): + c = node.parentNode + while c is not None: + if c.nodeType == Node.ELEMENT_NODE: + return c + c = c.parentNode + return None + +def _get_containing_entref(node): + c = node.parentNode + while c is not None: + if c.nodeType == Node.ENTITY_REFERENCE_NODE: + return c + c = c.parentNode + return None + + +class Comment(CharacterData): + nodeType = Node.COMMENT_NODE + nodeName = "#comment" + + def __init__(self, data): + CharacterData.__init__(self) + self._data = data + + def writexml(self, writer, indent="", addindent="", newl=""): + if "--" in self.data: + raise ValueError("'--' is not allowed in a comment node") + writer.write("%s%s" % (indent, self.data, newl)) + + +class CDATASection(Text): + __slots__ = () + + nodeType = Node.CDATA_SECTION_NODE + nodeName = "#cdata-section" + + def writexml(self, writer, indent="", addindent="", newl=""): + if self.data.find("]]>") >= 0: + raise ValueError("']]>' not allowed in a CDATA section") + writer.write("" % self.data) + + +class ReadOnlySequentialNamedNodeMap(object): + __slots__ = '_seq', + + def __init__(self, seq=()): + # seq should be a list or tuple + self._seq = seq + + def __len__(self): + return len(self._seq) + + def _get_length(self): + return len(self._seq) + + def getNamedItem(self, name): + for n in self._seq: + if n.nodeName == name: + return n + + def getNamedItemNS(self, namespaceURI, localName): + for n in self._seq: + if n.namespaceURI == namespaceURI and n.localName == localName: + return n + + def __getitem__(self, name_or_tuple): + if isinstance(name_or_tuple, tuple): + node = self.getNamedItemNS(*name_or_tuple) + else: + node = self.getNamedItem(name_or_tuple) + if node is None: + raise KeyError(name_or_tuple) + return node + + def item(self, index): + if index < 0: + return None + try: + return self._seq[index] + except IndexError: + return None + + def removeNamedItem(self, name): + raise xml.dom.NoModificationAllowedErr( + "NamedNodeMap instance is read-only") + + def removeNamedItemNS(self, namespaceURI, localName): + raise xml.dom.NoModificationAllowedErr( + "NamedNodeMap instance is read-only") + + def setNamedItem(self, node): + raise xml.dom.NoModificationAllowedErr( + "NamedNodeMap instance is read-only") + + def setNamedItemNS(self, node): + raise xml.dom.NoModificationAllowedErr( + "NamedNodeMap instance is read-only") + + def __getstate__(self): + return [self._seq] + + def __setstate__(self, state): + self._seq = state[0] + +defproperty(ReadOnlySequentialNamedNodeMap, "length", + doc="Number of entries in the NamedNodeMap.") + + +class Identified: + """Mix-in class that supports the publicId and systemId attributes.""" + + __slots__ = 'publicId', 'systemId' + + def _identified_mixin_init(self, publicId, systemId): + self.publicId = publicId + self.systemId = systemId + + def _get_publicId(self): + return self.publicId + + def _get_systemId(self): + return self.systemId + +class DocumentType(Identified, Childless, Node): + nodeType = Node.DOCUMENT_TYPE_NODE + nodeValue = None + name = None + publicId = None + systemId = None + internalSubset = None + + def __init__(self, qualifiedName): + self.entities = ReadOnlySequentialNamedNodeMap() + self.notations = ReadOnlySequentialNamedNodeMap() + if qualifiedName: + prefix, localname = _nssplit(qualifiedName) + self.name = localname + self.nodeName = self.name + + def _get_internalSubset(self): + return self.internalSubset + + def cloneNode(self, deep): + if self.ownerDocument is None: + # it's ok + clone = DocumentType(None) + clone.name = self.name + clone.nodeName = self.name + operation = xml.dom.UserDataHandler.NODE_CLONED + if deep: + clone.entities._seq = [] + clone.notations._seq = [] + for n in self.notations._seq: + notation = Notation(n.nodeName, n.publicId, n.systemId) + clone.notations._seq.append(notation) + n._call_user_data_handler(operation, n, notation) + for e in self.entities._seq: + entity = Entity(e.nodeName, e.publicId, e.systemId, + e.notationName) + entity.actualEncoding = e.actualEncoding + entity.encoding = e.encoding + entity.version = e.version + clone.entities._seq.append(entity) + e._call_user_data_handler(operation, e, entity) + self._call_user_data_handler(operation, self, clone) + return clone + else: + return None + + def writexml(self, writer, indent="", addindent="", newl=""): + writer.write(""+newl) + +class Entity(Identified, Node): + attributes = None + nodeType = Node.ENTITY_NODE + nodeValue = None + + actualEncoding = None + encoding = None + version = None + + def __init__(self, name, publicId, systemId, notation): + self.nodeName = name + self.notationName = notation + self.childNodes = NodeList() + self._identified_mixin_init(publicId, systemId) + + def _get_actualEncoding(self): + return self.actualEncoding + + def _get_encoding(self): + return self.encoding + + def _get_version(self): + return self.version + + def appendChild(self, newChild): + raise xml.dom.HierarchyRequestErr( + "cannot append children to an entity node") + + def insertBefore(self, newChild, refChild): + raise xml.dom.HierarchyRequestErr( + "cannot insert children below an entity node") + + def removeChild(self, oldChild): + raise xml.dom.HierarchyRequestErr( + "cannot remove children from an entity node") + + def replaceChild(self, newChild, oldChild): + raise xml.dom.HierarchyRequestErr( + "cannot replace children of an entity node") + +class Notation(Identified, Childless, Node): + nodeType = Node.NOTATION_NODE + nodeValue = None + + def __init__(self, name, publicId, systemId): + self.nodeName = name + self._identified_mixin_init(publicId, systemId) + + +class DOMImplementation(DOMImplementationLS): + _features = [("core", "1.0"), + ("core", "2.0"), + ("core", None), + ("xml", "1.0"), + ("xml", "2.0"), + ("xml", None), + ("ls-load", "3.0"), + ("ls-load", None), + ] + + def hasFeature(self, feature, version): + if version == "": + version = None + return (feature.lower(), version) in self._features + + def createDocument(self, namespaceURI, qualifiedName, doctype): + if doctype and doctype.parentNode is not None: + raise xml.dom.WrongDocumentErr( + "doctype object owned by another DOM tree") + doc = self._create_document() + + add_root_element = not (namespaceURI is None + and qualifiedName is None + and doctype is None) + + if not qualifiedName and add_root_element: + # The spec is unclear what to raise here; SyntaxErr + # would be the other obvious candidate. Since Xerces raises + # InvalidCharacterErr, and since SyntaxErr is not listed + # for createDocument, that seems to be the better choice. + # XXX: need to check for illegal characters here and in + # createElement. + + # DOM Level III clears this up when talking about the return value + # of this function. If namespaceURI, qName and DocType are + # Null the document is returned without a document element + # Otherwise if doctype or namespaceURI are not None + # Then we go back to the above problem + raise xml.dom.InvalidCharacterErr("Element with no name") + + if add_root_element: + prefix, localname = _nssplit(qualifiedName) + if prefix == "xml" \ + and namespaceURI != "http://www.w3.org/XML/1998/namespace": + raise xml.dom.NamespaceErr("illegal use of 'xml' prefix") + if prefix and not namespaceURI: + raise xml.dom.NamespaceErr( + "illegal use of prefix without namespaces") + element = doc.createElementNS(namespaceURI, qualifiedName) + if doctype: + doc.appendChild(doctype) + doc.appendChild(element) + + if doctype: + doctype.parentNode = doctype.ownerDocument = doc + + doc.doctype = doctype + doc.implementation = self + return doc + + def createDocumentType(self, qualifiedName, publicId, systemId): + doctype = DocumentType(qualifiedName) + doctype.publicId = publicId + doctype.systemId = systemId + return doctype + + # DOM Level 3 (WD 9 April 2002) + + def getInterface(self, feature): + if self.hasFeature(feature, None): + return self + else: + return None + + # internal + def _create_document(self): + return Document() + +class ElementInfo(object): + """Object that represents content-model information for an element. + + This implementation is not expected to be used in practice; DOM + builders should provide implementations which do the right thing + using information available to it. + + """ + + __slots__ = 'tagName', + + def __init__(self, name): + self.tagName = name + + def getAttributeType(self, aname): + return _no_type + + def getAttributeTypeNS(self, namespaceURI, localName): + return _no_type + + def isElementContent(self): + return False + + def isEmpty(self): + """Returns true iff this element is declared to have an EMPTY + content model.""" + return False + + def isId(self, aname): + """Returns true iff the named attribute is a DTD-style ID.""" + return False + + def isIdNS(self, namespaceURI, localName): + """Returns true iff the identified attribute is a DTD-style ID.""" + return False + + def __getstate__(self): + return self.tagName + + def __setstate__(self, state): + self.tagName = state + +def _clear_id_cache(node): + if node.nodeType == Node.DOCUMENT_NODE: + node._id_cache.clear() + node._id_search_stack = None + elif _in_document(node): + node.ownerDocument._id_cache.clear() + node.ownerDocument._id_search_stack= None + +class Document(Node, DocumentLS): + __slots__ = ('_elem_info', 'doctype', + '_id_search_stack', 'childNodes', '_id_cache') + _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE, + Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE) + + implementation = DOMImplementation() + nodeType = Node.DOCUMENT_NODE + nodeName = "#document" + nodeValue = None + attributes = None + parentNode = None + previousSibling = nextSibling = None + + + # Document attributes from Level 3 (WD 9 April 2002) + + actualEncoding = None + encoding = None + standalone = None + version = None + strictErrorChecking = False + errorHandler = None + documentURI = None + + _magic_id_count = 0 + + def __init__(self): + self.doctype = None + self.childNodes = NodeList() + # mapping of (namespaceURI, localName) -> ElementInfo + # and tagName -> ElementInfo + self._elem_info = {} + self._id_cache = {} + self._id_search_stack = None + + def _get_elem_info(self, element): + if element.namespaceURI: + key = element.namespaceURI, element.localName + else: + key = element.tagName + return self._elem_info.get(key) + + def _get_actualEncoding(self): + return self.actualEncoding + + def _get_doctype(self): + return self.doctype + + def _get_documentURI(self): + return self.documentURI + + def _get_encoding(self): + return self.encoding + + def _get_errorHandler(self): + return self.errorHandler + + def _get_standalone(self): + return self.standalone + + def _get_strictErrorChecking(self): + return self.strictErrorChecking + + def _get_version(self): + return self.version + + def appendChild(self, node): + if node.nodeType not in self._child_node_types: + raise xml.dom.HierarchyRequestErr( + "%s cannot be child of %s" % (repr(node), repr(self))) + if node.parentNode is not None: + # This needs to be done before the next test since this + # may *be* the document element, in which case it should + # end up re-ordered to the end. + node.parentNode.removeChild(node) + + if node.nodeType == Node.ELEMENT_NODE \ + and self._get_documentElement(): + raise xml.dom.HierarchyRequestErr( + "two document elements disallowed") + return Node.appendChild(self, node) + + def removeChild(self, oldChild): + try: + self.childNodes.remove(oldChild) + except ValueError: + raise xml.dom.NotFoundErr() + oldChild.nextSibling = oldChild.previousSibling = None + oldChild.parentNode = None + if self.documentElement is oldChild: + self.documentElement = None + + return oldChild + + def _get_documentElement(self): + for node in self.childNodes: + if node.nodeType == Node.ELEMENT_NODE: + return node + + def unlink(self): + if self.doctype is not None: + self.doctype.unlink() + self.doctype = None + Node.unlink(self) + + def cloneNode(self, deep): + if not deep: + return None + clone = self.implementation.createDocument(None, None, None) + clone.encoding = self.encoding + clone.standalone = self.standalone + clone.version = self.version + for n in self.childNodes: + childclone = _clone_node(n, deep, clone) + assert childclone.ownerDocument.isSameNode(clone) + clone.childNodes.append(childclone) + if childclone.nodeType == Node.DOCUMENT_NODE: + assert clone.documentElement is None + elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE: + assert clone.doctype is None + clone.doctype = childclone + childclone.parentNode = clone + self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED, + self, clone) + return clone + + def createDocumentFragment(self): + d = DocumentFragment() + d.ownerDocument = self + return d + + def createElement(self, tagName): + e = Element(tagName) + e.ownerDocument = self + return e + + def createTextNode(self, data): + if not isinstance(data, str): + raise TypeError("node contents must be a string") + t = Text() + t.data = data + t.ownerDocument = self + return t + + def createCDATASection(self, data): + if not isinstance(data, str): + raise TypeError("node contents must be a string") + c = CDATASection() + c.data = data + c.ownerDocument = self + return c + + def createComment(self, data): + c = Comment(data) + c.ownerDocument = self + return c + + def createProcessingInstruction(self, target, data): + p = ProcessingInstruction(target, data) + p.ownerDocument = self + return p + + def createAttribute(self, qName): + a = Attr(qName) + a.ownerDocument = self + a.value = "" + return a + + def createElementNS(self, namespaceURI, qualifiedName): + prefix, localName = _nssplit(qualifiedName) + e = Element(qualifiedName, namespaceURI, prefix) + e.ownerDocument = self + return e + + def createAttributeNS(self, namespaceURI, qualifiedName): + prefix, localName = _nssplit(qualifiedName) + a = Attr(qualifiedName, namespaceURI, localName, prefix) + a.ownerDocument = self + a.value = "" + return a + + # A couple of implementation-specific helpers to create node types + # not supported by the W3C DOM specs: + + def _create_entity(self, name, publicId, systemId, notationName): + e = Entity(name, publicId, systemId, notationName) + e.ownerDocument = self + return e + + def _create_notation(self, name, publicId, systemId): + n = Notation(name, publicId, systemId) + n.ownerDocument = self + return n + + def getElementById(self, id): + if id in self._id_cache: + return self._id_cache[id] + if not (self._elem_info or self._magic_id_count): + return None + + stack = self._id_search_stack + if stack is None: + # we never searched before, or the cache has been cleared + stack = [self.documentElement] + self._id_search_stack = stack + elif not stack: + # Previous search was completed and cache is still valid; + # no matching node. + return None + + result = None + while stack: + node = stack.pop() + # add child elements to stack for continued searching + stack.extend([child for child in node.childNodes + if child.nodeType in _nodeTypes_with_children]) + # check this node + info = self._get_elem_info(node) + if info: + # We have to process all ID attributes before + # returning in order to get all the attributes set to + # be IDs using Element.setIdAttribute*(). + for attr in node.attributes.values(): + if attr.namespaceURI: + if info.isIdNS(attr.namespaceURI, attr.localName): + self._id_cache[attr.value] = node + if attr.value == id: + result = node + elif not node._magic_id_nodes: + break + elif info.isId(attr.name): + self._id_cache[attr.value] = node + if attr.value == id: + result = node + elif not node._magic_id_nodes: + break + elif attr._is_id: + self._id_cache[attr.value] = node + if attr.value == id: + result = node + elif node._magic_id_nodes == 1: + break + elif node._magic_id_nodes: + for attr in node.attributes.values(): + if attr._is_id: + self._id_cache[attr.value] = node + if attr.value == id: + result = node + if result is not None: + break + return result + + def getElementsByTagName(self, name): + return _get_elements_by_tagName_helper(self, name, NodeList()) + + def getElementsByTagNameNS(self, namespaceURI, localName): + return _get_elements_by_tagName_ns_helper( + self, namespaceURI, localName, NodeList()) + + def isSupported(self, feature, version): + return self.implementation.hasFeature(feature, version) + + def importNode(self, node, deep): + if node.nodeType == Node.DOCUMENT_NODE: + raise xml.dom.NotSupportedErr("cannot import document nodes") + elif node.nodeType == Node.DOCUMENT_TYPE_NODE: + raise xml.dom.NotSupportedErr("cannot import document type nodes") + return _clone_node(node, deep, self) + + def writexml(self, writer, indent="", addindent="", newl="", encoding=None, + standalone=None): + declarations = [] + + if encoding: + declarations.append(f'encoding="{encoding}"') + if standalone is not None: + declarations.append(f'standalone="{"yes" if standalone else "no"}"') + + writer.write(f'{newl}') + + for node in self.childNodes: + node.writexml(writer, indent, addindent, newl) + + # DOM Level 3 (WD 9 April 2002) + + def renameNode(self, n, namespaceURI, name): + if n.ownerDocument is not self: + raise xml.dom.WrongDocumentErr( + "cannot rename nodes from other documents;\n" + "expected %s,\nfound %s" % (self, n.ownerDocument)) + if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE): + raise xml.dom.NotSupportedErr( + "renameNode() only applies to element and attribute nodes") + if namespaceURI != EMPTY_NAMESPACE: + if ':' in name: + prefix, localName = name.split(':', 1) + if ( prefix == "xmlns" + and namespaceURI != xml.dom.XMLNS_NAMESPACE): + raise xml.dom.NamespaceErr( + "illegal use of 'xmlns' prefix") + else: + if ( name == "xmlns" + and namespaceURI != xml.dom.XMLNS_NAMESPACE + and n.nodeType == Node.ATTRIBUTE_NODE): + raise xml.dom.NamespaceErr( + "illegal use of the 'xmlns' attribute") + prefix = None + localName = name + else: + prefix = None + localName = None + if n.nodeType == Node.ATTRIBUTE_NODE: + element = n.ownerElement + if element is not None: + is_id = n._is_id + element.removeAttributeNode(n) + else: + element = None + n.prefix = prefix + n._localName = localName + n.namespaceURI = namespaceURI + n.nodeName = name + if n.nodeType == Node.ELEMENT_NODE: + n.tagName = name + else: + # attribute node + n.name = name + if element is not None: + element.setAttributeNode(n) + if is_id: + element.setIdAttributeNode(n) + # It's not clear from a semantic perspective whether we should + # call the user data handlers for the NODE_RENAMED event since + # we're re-using the existing node. The draft spec has been + # interpreted as meaning "no, don't call the handler unless a + # new node is created." + return n + +defproperty(Document, "documentElement", + doc="Top-level element of this document.") + + +def _clone_node(node, deep, newOwnerDocument): + """ + Clone a node and give it the new owner document. + Called by Node.cloneNode and Document.importNode + """ + if node.ownerDocument.isSameNode(newOwnerDocument): + operation = xml.dom.UserDataHandler.NODE_CLONED + else: + operation = xml.dom.UserDataHandler.NODE_IMPORTED + if node.nodeType == Node.ELEMENT_NODE: + clone = newOwnerDocument.createElementNS(node.namespaceURI, + node.nodeName) + for attr in node.attributes.values(): + clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value) + a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName) + a.specified = attr.specified + + if deep: + for child in node.childNodes: + c = _clone_node(child, deep, newOwnerDocument) + clone.appendChild(c) + + elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: + clone = newOwnerDocument.createDocumentFragment() + if deep: + for child in node.childNodes: + c = _clone_node(child, deep, newOwnerDocument) + clone.appendChild(c) + + elif node.nodeType == Node.TEXT_NODE: + clone = newOwnerDocument.createTextNode(node.data) + elif node.nodeType == Node.CDATA_SECTION_NODE: + clone = newOwnerDocument.createCDATASection(node.data) + elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE: + clone = newOwnerDocument.createProcessingInstruction(node.target, + node.data) + elif node.nodeType == Node.COMMENT_NODE: + clone = newOwnerDocument.createComment(node.data) + elif node.nodeType == Node.ATTRIBUTE_NODE: + clone = newOwnerDocument.createAttributeNS(node.namespaceURI, + node.nodeName) + clone.specified = True + clone.value = node.value + elif node.nodeType == Node.DOCUMENT_TYPE_NODE: + assert node.ownerDocument is not newOwnerDocument + operation = xml.dom.UserDataHandler.NODE_IMPORTED + clone = newOwnerDocument.implementation.createDocumentType( + node.name, node.publicId, node.systemId) + clone.ownerDocument = newOwnerDocument + if deep: + clone.entities._seq = [] + clone.notations._seq = [] + for n in node.notations._seq: + notation = Notation(n.nodeName, n.publicId, n.systemId) + notation.ownerDocument = newOwnerDocument + clone.notations._seq.append(notation) + if hasattr(n, '_call_user_data_handler'): + n._call_user_data_handler(operation, n, notation) + for e in node.entities._seq: + entity = Entity(e.nodeName, e.publicId, e.systemId, + e.notationName) + entity.actualEncoding = e.actualEncoding + entity.encoding = e.encoding + entity.version = e.version + entity.ownerDocument = newOwnerDocument + clone.entities._seq.append(entity) + if hasattr(e, '_call_user_data_handler'): + e._call_user_data_handler(operation, e, entity) + else: + # Note the cloning of Document and DocumentType nodes is + # implementation specific. minidom handles those cases + # directly in the cloneNode() methods. + raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node)) + + # Check for _call_user_data_handler() since this could conceivably + # used with other DOM implementations (one of the FourThought + # DOMs, perhaps?). + if hasattr(node, '_call_user_data_handler'): + node._call_user_data_handler(operation, node, clone) + return clone + + +def _nssplit(qualifiedName): + fields = qualifiedName.split(':', 1) + if len(fields) == 2: + return fields + else: + return (None, fields[0]) + + +def _do_pulldom_parse(func, args, kwargs): + events = func(*args, **kwargs) + toktype, rootNode = events.getEvent() + events.expandNode(rootNode) + events.clear() + return rootNode + +def parse(file, parser=None, bufsize=None): + """Parse a file into a DOM by filename or file object.""" + if parser is None and not bufsize: + from xml.dom import expatbuilder + return expatbuilder.parse(file) + else: + from xml.dom import pulldom + return _do_pulldom_parse(pulldom.parse, (file,), + {'parser': parser, 'bufsize': bufsize}) + +def parseString(string, parser=None): + """Parse a file into a DOM from a string.""" + if parser is None: + from xml.dom import expatbuilder + return expatbuilder.parseString(string) + else: + from xml.dom import pulldom + return _do_pulldom_parse(pulldom.parseString, (string,), + {'parser': parser}) + +def getDOMImplementation(features=None): + if features: + if isinstance(features, str): + features = domreg._parse_feature_string(features) + for f, v in features: + if not Document.implementation.hasFeature(f, v): + return None + return Document.implementation diff --git a/defaults/lib/x/dom/pulldom.py b/defaults/lib/x/dom/pulldom.py new file mode 100644 index 0000000..913141c --- /dev/null +++ b/defaults/lib/x/dom/pulldom.py @@ -0,0 +1,336 @@ +import xml.sax +import xml.sax.handler + +START_ELEMENT = "START_ELEMENT" +END_ELEMENT = "END_ELEMENT" +COMMENT = "COMMENT" +START_DOCUMENT = "START_DOCUMENT" +END_DOCUMENT = "END_DOCUMENT" +PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION" +IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE" +CHARACTERS = "CHARACTERS" + +class PullDOM(xml.sax.ContentHandler): + _locator = None + document = None + + def __init__(self, documentFactory=None): + from xml.dom import XML_NAMESPACE + self.documentFactory = documentFactory + self.firstEvent = [None, None] + self.lastEvent = self.firstEvent + self.elementStack = [] + self.push = self.elementStack.append + try: + self.pop = self.elementStack.pop + except AttributeError: + # use class' pop instead + pass + self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts + self._current_context = self._ns_contexts[-1] + self.pending_events = [] + + def pop(self): + result = self.elementStack[-1] + del self.elementStack[-1] + return result + + def setDocumentLocator(self, locator): + self._locator = locator + + def startPrefixMapping(self, prefix, uri): + if not hasattr(self, '_xmlns_attrs'): + self._xmlns_attrs = [] + self._xmlns_attrs.append((prefix or 'xmlns', uri)) + self._ns_contexts.append(self._current_context.copy()) + self._current_context[uri] = prefix or None + + def endPrefixMapping(self, prefix): + self._current_context = self._ns_contexts.pop() + + def startElementNS(self, name, tagName , attrs): + # Retrieve xml namespace declaration attributes. + xmlns_uri = 'http://www.w3.org/2000/xmlns/' + xmlns_attrs = getattr(self, '_xmlns_attrs', None) + if xmlns_attrs is not None: + for aname, value in xmlns_attrs: + attrs._attrs[(xmlns_uri, aname)] = value + self._xmlns_attrs = [] + uri, localname = name + if uri: + # When using namespaces, the reader may or may not + # provide us with the original name. If not, create + # *a* valid tagName from the current context. + if tagName is None: + prefix = self._current_context[uri] + if prefix: + tagName = prefix + ":" + localname + else: + tagName = localname + if self.document: + node = self.document.createElementNS(uri, tagName) + else: + node = self.buildDocument(uri, tagName) + else: + # When the tagname is not prefixed, it just appears as + # localname + if self.document: + node = self.document.createElement(localname) + else: + node = self.buildDocument(None, localname) + + for aname,value in attrs.items(): + a_uri, a_localname = aname + if a_uri == xmlns_uri: + if a_localname == 'xmlns': + qname = a_localname + else: + qname = 'xmlns:' + a_localname + attr = self.document.createAttributeNS(a_uri, qname) + node.setAttributeNodeNS(attr) + elif a_uri: + prefix = self._current_context[a_uri] + if prefix: + qname = prefix + ":" + a_localname + else: + qname = a_localname + attr = self.document.createAttributeNS(a_uri, qname) + node.setAttributeNodeNS(attr) + else: + attr = self.document.createAttribute(a_localname) + node.setAttributeNode(attr) + attr.value = value + + self.lastEvent[1] = [(START_ELEMENT, node), None] + self.lastEvent = self.lastEvent[1] + self.push(node) + + def endElementNS(self, name, tagName): + self.lastEvent[1] = [(END_ELEMENT, self.pop()), None] + self.lastEvent = self.lastEvent[1] + + def startElement(self, name, attrs): + if self.document: + node = self.document.createElement(name) + else: + node = self.buildDocument(None, name) + + for aname,value in attrs.items(): + attr = self.document.createAttribute(aname) + attr.value = value + node.setAttributeNode(attr) + + self.lastEvent[1] = [(START_ELEMENT, node), None] + self.lastEvent = self.lastEvent[1] + self.push(node) + + def endElement(self, name): + self.lastEvent[1] = [(END_ELEMENT, self.pop()), None] + self.lastEvent = self.lastEvent[1] + + def comment(self, s): + if self.document: + node = self.document.createComment(s) + self.lastEvent[1] = [(COMMENT, node), None] + self.lastEvent = self.lastEvent[1] + else: + event = [(COMMENT, s), None] + self.pending_events.append(event) + + def processingInstruction(self, target, data): + if self.document: + node = self.document.createProcessingInstruction(target, data) + self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None] + self.lastEvent = self.lastEvent[1] + else: + event = [(PROCESSING_INSTRUCTION, target, data), None] + self.pending_events.append(event) + + def ignorableWhitespace(self, chars): + node = self.document.createTextNode(chars) + self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None] + self.lastEvent = self.lastEvent[1] + + def characters(self, chars): + node = self.document.createTextNode(chars) + self.lastEvent[1] = [(CHARACTERS, node), None] + self.lastEvent = self.lastEvent[1] + + def startDocument(self): + if self.documentFactory is None: + import xml.dom.minidom + self.documentFactory = xml.dom.minidom.Document.implementation + + def buildDocument(self, uri, tagname): + # Can't do that in startDocument, since we need the tagname + # XXX: obtain DocumentType + node = self.documentFactory.createDocument(uri, tagname, None) + self.document = node + self.lastEvent[1] = [(START_DOCUMENT, node), None] + self.lastEvent = self.lastEvent[1] + self.push(node) + # Put everything we have seen so far into the document + for e in self.pending_events: + if e[0][0] == PROCESSING_INSTRUCTION: + _,target,data = e[0] + n = self.document.createProcessingInstruction(target, data) + e[0] = (PROCESSING_INSTRUCTION, n) + elif e[0][0] == COMMENT: + n = self.document.createComment(e[0][1]) + e[0] = (COMMENT, n) + else: + raise AssertionError("Unknown pending event ",e[0][0]) + self.lastEvent[1] = e + self.lastEvent = e + self.pending_events = None + return node.firstChild + + def endDocument(self): + self.lastEvent[1] = [(END_DOCUMENT, self.document), None] + self.pop() + + def clear(self): + "clear(): Explicitly release parsing structures" + self.document = None + +class ErrorHandler: + def warning(self, exception): + print(exception) + def error(self, exception): + raise exception + def fatalError(self, exception): + raise exception + +class DOMEventStream: + def __init__(self, stream, parser, bufsize): + self.stream = stream + self.parser = parser + self.bufsize = bufsize + if not hasattr(self.parser, 'feed'): + self.getEvent = self._slurp + self.reset() + + def reset(self): + self.pulldom = PullDOM() + # This content handler relies on namespace support + self.parser.setFeature(xml.sax.handler.feature_namespaces, 1) + self.parser.setContentHandler(self.pulldom) + + def __next__(self): + rc = self.getEvent() + if rc: + return rc + raise StopIteration + + def __iter__(self): + return self + + def expandNode(self, node): + event = self.getEvent() + parents = [node] + while event: + token, cur_node = event + if cur_node is node: + return + if token != END_ELEMENT: + parents[-1].appendChild(cur_node) + if token == START_ELEMENT: + parents.append(cur_node) + elif token == END_ELEMENT: + del parents[-1] + event = self.getEvent() + + def getEvent(self): + # use IncrementalParser interface, so we get the desired + # pull effect + if not self.pulldom.firstEvent[1]: + self.pulldom.lastEvent = self.pulldom.firstEvent + while not self.pulldom.firstEvent[1]: + buf = self.stream.read(self.bufsize) + if not buf: + self.parser.close() + return None + self.parser.feed(buf) + rc = self.pulldom.firstEvent[1][0] + self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1] + return rc + + def _slurp(self): + """ Fallback replacement for getEvent() using the + standard SAX2 interface, which means we slurp the + SAX events into memory (no performance gain, but + we are compatible to all SAX parsers). + """ + self.parser.parse(self.stream) + self.getEvent = self._emit + return self._emit() + + def _emit(self): + """ Fallback replacement for getEvent() that emits + the events that _slurp() read previously. + """ + rc = self.pulldom.firstEvent[1][0] + self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1] + return rc + + def clear(self): + """clear(): Explicitly release parsing objects""" + self.pulldom.clear() + del self.pulldom + self.parser = None + self.stream = None + +class SAX2DOM(PullDOM): + + def startElementNS(self, name, tagName , attrs): + PullDOM.startElementNS(self, name, tagName, attrs) + curNode = self.elementStack[-1] + parentNode = self.elementStack[-2] + parentNode.appendChild(curNode) + + def startElement(self, name, attrs): + PullDOM.startElement(self, name, attrs) + curNode = self.elementStack[-1] + parentNode = self.elementStack[-2] + parentNode.appendChild(curNode) + + def processingInstruction(self, target, data): + PullDOM.processingInstruction(self, target, data) + node = self.lastEvent[0][1] + parentNode = self.elementStack[-1] + parentNode.appendChild(node) + + def ignorableWhitespace(self, chars): + PullDOM.ignorableWhitespace(self, chars) + node = self.lastEvent[0][1] + parentNode = self.elementStack[-1] + parentNode.appendChild(node) + + def characters(self, chars): + PullDOM.characters(self, chars) + node = self.lastEvent[0][1] + parentNode = self.elementStack[-1] + parentNode.appendChild(node) + + +default_bufsize = (2 ** 14) - 20 + +def parse(stream_or_string, parser=None, bufsize=None): + if bufsize is None: + bufsize = default_bufsize + if isinstance(stream_or_string, str): + stream = open(stream_or_string, 'rb') + else: + stream = stream_or_string + if not parser: + parser = xml.sax.make_parser() + return DOMEventStream(stream, parser, bufsize) + +def parseString(string, parser=None): + from io import StringIO + + bufsize = len(string) + buf = StringIO(string) + if not parser: + parser = xml.sax.make_parser() + return DOMEventStream(buf, parser, bufsize) diff --git a/defaults/lib/x/dom/xmlbuilder.py b/defaults/lib/x/dom/xmlbuilder.py new file mode 100644 index 0000000..8a20026 --- /dev/null +++ b/defaults/lib/x/dom/xmlbuilder.py @@ -0,0 +1,387 @@ +"""Implementation of the DOM Level 3 'LS-Load' feature.""" + +import copy +import xml.dom + +from xml.dom.NodeFilter import NodeFilter + + +__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"] + + +class Options: + """Features object that has variables set for each DOMBuilder feature. + + The DOMBuilder class uses an instance of this class to pass settings to + the ExpatBuilder class. + """ + + # Note that the DOMBuilder class in LoadSave constrains which of these + # values can be set using the DOM Level 3 LoadSave feature. + + namespaces = 1 + namespace_declarations = True + validation = False + external_parameter_entities = True + external_general_entities = True + external_dtd_subset = True + validate_if_schema = False + validate = False + datatype_normalization = False + create_entity_ref_nodes = True + entities = True + whitespace_in_element_content = True + cdata_sections = True + comments = True + charset_overrides_xml_encoding = True + infoset = False + supported_mediatypes_only = False + + errorHandler = None + filter = None + + +class DOMBuilder: + entityResolver = None + errorHandler = None + filter = None + + ACTION_REPLACE = 1 + ACTION_APPEND_AS_CHILDREN = 2 + ACTION_INSERT_AFTER = 3 + ACTION_INSERT_BEFORE = 4 + + _legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN, + ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE) + + def __init__(self): + self._options = Options() + + def _get_entityResolver(self): + return self.entityResolver + def _set_entityResolver(self, entityResolver): + self.entityResolver = entityResolver + + def _get_errorHandler(self): + return self.errorHandler + def _set_errorHandler(self, errorHandler): + self.errorHandler = errorHandler + + def _get_filter(self): + return self.filter + def _set_filter(self, filter): + self.filter = filter + + def setFeature(self, name, state): + if self.supportsFeature(name): + state = state and 1 or 0 + try: + settings = self._settings[(_name_xform(name), state)] + except KeyError: + raise xml.dom.NotSupportedErr( + "unsupported feature: %r" % (name,)) from None + else: + for name, value in settings: + setattr(self._options, name, value) + else: + raise xml.dom.NotFoundErr("unknown feature: " + repr(name)) + + def supportsFeature(self, name): + return hasattr(self._options, _name_xform(name)) + + def canSetFeature(self, name, state): + key = (_name_xform(name), state and 1 or 0) + return key in self._settings + + # This dictionary maps from (feature,value) to a list of + # (option,value) pairs that should be set on the Options object. + # If a (feature,value) setting is not in this dictionary, it is + # not supported by the DOMBuilder. + # + _settings = { + ("namespace_declarations", 0): [ + ("namespace_declarations", 0)], + ("namespace_declarations", 1): [ + ("namespace_declarations", 1)], + ("validation", 0): [ + ("validation", 0)], + ("external_general_entities", 0): [ + ("external_general_entities", 0)], + ("external_general_entities", 1): [ + ("external_general_entities", 1)], + ("external_parameter_entities", 0): [ + ("external_parameter_entities", 0)], + ("external_parameter_entities", 1): [ + ("external_parameter_entities", 1)], + ("validate_if_schema", 0): [ + ("validate_if_schema", 0)], + ("create_entity_ref_nodes", 0): [ + ("create_entity_ref_nodes", 0)], + ("create_entity_ref_nodes", 1): [ + ("create_entity_ref_nodes", 1)], + ("entities", 0): [ + ("create_entity_ref_nodes", 0), + ("entities", 0)], + ("entities", 1): [ + ("entities", 1)], + ("whitespace_in_element_content", 0): [ + ("whitespace_in_element_content", 0)], + ("whitespace_in_element_content", 1): [ + ("whitespace_in_element_content", 1)], + ("cdata_sections", 0): [ + ("cdata_sections", 0)], + ("cdata_sections", 1): [ + ("cdata_sections", 1)], + ("comments", 0): [ + ("comments", 0)], + ("comments", 1): [ + ("comments", 1)], + ("charset_overrides_xml_encoding", 0): [ + ("charset_overrides_xml_encoding", 0)], + ("charset_overrides_xml_encoding", 1): [ + ("charset_overrides_xml_encoding", 1)], + ("infoset", 0): [], + ("infoset", 1): [ + ("namespace_declarations", 0), + ("validate_if_schema", 0), + ("create_entity_ref_nodes", 0), + ("entities", 0), + ("cdata_sections", 0), + ("datatype_normalization", 1), + ("whitespace_in_element_content", 1), + ("comments", 1), + ("charset_overrides_xml_encoding", 1)], + ("supported_mediatypes_only", 0): [ + ("supported_mediatypes_only", 0)], + ("namespaces", 0): [ + ("namespaces", 0)], + ("namespaces", 1): [ + ("namespaces", 1)], + } + + def getFeature(self, name): + xname = _name_xform(name) + try: + return getattr(self._options, xname) + except AttributeError: + if name == "infoset": + options = self._options + return (options.datatype_normalization + and options.whitespace_in_element_content + and options.comments + and options.charset_overrides_xml_encoding + and not (options.namespace_declarations + or options.validate_if_schema + or options.create_entity_ref_nodes + or options.entities + or options.cdata_sections)) + raise xml.dom.NotFoundErr("feature %s not known" % repr(name)) + + def parseURI(self, uri): + if self.entityResolver: + input = self.entityResolver.resolveEntity(None, uri) + else: + input = DOMEntityResolver().resolveEntity(None, uri) + return self.parse(input) + + def parse(self, input): + options = copy.copy(self._options) + options.filter = self.filter + options.errorHandler = self.errorHandler + fp = input.byteStream + if fp is None and options.systemId: + import urllib.request + fp = urllib.request.urlopen(input.systemId) + return self._parse_bytestream(fp, options) + + def parseWithContext(self, input, cnode, action): + if action not in self._legal_actions: + raise ValueError("not a legal action") + raise NotImplementedError("Haven't written this yet...") + + def _parse_bytestream(self, stream, options): + import xml.dom.expatbuilder + builder = xml.dom.expatbuilder.makeBuilder(options) + return builder.parseFile(stream) + + +def _name_xform(name): + return name.lower().replace('-', '_') + + +class DOMEntityResolver(object): + __slots__ = '_opener', + + def resolveEntity(self, publicId, systemId): + assert systemId is not None + source = DOMInputSource() + source.publicId = publicId + source.systemId = systemId + source.byteStream = self._get_opener().open(systemId) + + # determine the encoding if the transport provided it + source.encoding = self._guess_media_encoding(source) + + # determine the base URI is we can + import posixpath, urllib.parse + parts = urllib.parse.urlparse(systemId) + scheme, netloc, path, params, query, fragment = parts + # XXX should we check the scheme here as well? + if path and not path.endswith("/"): + path = posixpath.dirname(path) + "/" + parts = scheme, netloc, path, params, query, fragment + source.baseURI = urllib.parse.urlunparse(parts) + + return source + + def _get_opener(self): + try: + return self._opener + except AttributeError: + self._opener = self._create_opener() + return self._opener + + def _create_opener(self): + import urllib.request + return urllib.request.build_opener() + + def _guess_media_encoding(self, source): + info = source.byteStream.info() + if "Content-Type" in info: + for param in info.getplist(): + if param.startswith("charset="): + return param.split("=", 1)[1].lower() + + +class DOMInputSource(object): + __slots__ = ('byteStream', 'characterStream', 'stringData', + 'encoding', 'publicId', 'systemId', 'baseURI') + + def __init__(self): + self.byteStream = None + self.characterStream = None + self.stringData = None + self.encoding = None + self.publicId = None + self.systemId = None + self.baseURI = None + + def _get_byteStream(self): + return self.byteStream + def _set_byteStream(self, byteStream): + self.byteStream = byteStream + + def _get_characterStream(self): + return self.characterStream + def _set_characterStream(self, characterStream): + self.characterStream = characterStream + + def _get_stringData(self): + return self.stringData + def _set_stringData(self, data): + self.stringData = data + + def _get_encoding(self): + return self.encoding + def _set_encoding(self, encoding): + self.encoding = encoding + + def _get_publicId(self): + return self.publicId + def _set_publicId(self, publicId): + self.publicId = publicId + + def _get_systemId(self): + return self.systemId + def _set_systemId(self, systemId): + self.systemId = systemId + + def _get_baseURI(self): + return self.baseURI + def _set_baseURI(self, uri): + self.baseURI = uri + + +class DOMBuilderFilter: + """Element filter which can be used to tailor construction of + a DOM instance. + """ + + # There's really no need for this class; concrete implementations + # should just implement the endElement() and startElement() + # methods as appropriate. Using this makes it easy to only + # implement one of them. + + FILTER_ACCEPT = 1 + FILTER_REJECT = 2 + FILTER_SKIP = 3 + FILTER_INTERRUPT = 4 + + whatToShow = NodeFilter.SHOW_ALL + + def _get_whatToShow(self): + return self.whatToShow + + def acceptNode(self, element): + return self.FILTER_ACCEPT + + def startContainer(self, element): + return self.FILTER_ACCEPT + +del NodeFilter + + +class DocumentLS: + """Mixin to create documents that conform to the load/save spec.""" + + async_ = False + + def _get_async(self): + return False + + def _set_async(self, flag): + if flag: + raise xml.dom.NotSupportedErr( + "asynchronous document loading is not supported") + + def abort(self): + # What does it mean to "clear" a document? Does the + # documentElement disappear? + raise NotImplementedError( + "haven't figured out what this means yet") + + def load(self, uri): + raise NotImplementedError("haven't written this yet") + + def loadXML(self, source): + raise NotImplementedError("haven't written this yet") + + def saveXML(self, snode): + if snode is None: + snode = self + elif snode.ownerDocument is not self: + raise xml.dom.WrongDocumentErr() + return snode.toxml() + + +class DOMImplementationLS: + MODE_SYNCHRONOUS = 1 + MODE_ASYNCHRONOUS = 2 + + def createDOMBuilder(self, mode, schemaType): + if schemaType is not None: + raise xml.dom.NotSupportedErr( + "schemaType not yet supported") + if mode == self.MODE_SYNCHRONOUS: + return DOMBuilder() + if mode == self.MODE_ASYNCHRONOUS: + raise xml.dom.NotSupportedErr( + "asynchronous builders are not supported") + raise ValueError("unknown value for mode") + + def createDOMWriter(self): + raise NotImplementedError( + "the writer interface hasn't been written yet!") + + def createDOMInputSource(self): + return DOMInputSource() diff --git a/defaults/lib/x/etree/ElementInclude.py b/defaults/lib/x/etree/ElementInclude.py new file mode 100644 index 0000000..40a9b22 --- /dev/null +++ b/defaults/lib/x/etree/ElementInclude.py @@ -0,0 +1,185 @@ +# +# ElementTree +# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $ +# +# limited xinclude support for element trees +# +# history: +# 2003-08-15 fl created +# 2003-11-14 fl fixed default loader +# +# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. + +## +# Limited XInclude support for the ElementTree package. +## + +import copy +from . import ElementTree +from urllib.parse import urljoin + +XINCLUDE = "{http://www.w3.org/2001/XInclude}" + +XINCLUDE_INCLUDE = XINCLUDE + "include" +XINCLUDE_FALLBACK = XINCLUDE + "fallback" + +# For security reasons, the inclusion depth is limited to this read-only value by default. +DEFAULT_MAX_INCLUSION_DEPTH = 6 + + +## +# Fatal include error. + +class FatalIncludeError(SyntaxError): + pass + + +class LimitedRecursiveIncludeError(FatalIncludeError): + pass + + +## +# Default loader. This loader reads an included resource from disk. +# +# @param href Resource reference. +# @param parse Parse mode. Either "xml" or "text". +# @param encoding Optional text encoding (UTF-8 by default for "text"). +# @return The expanded resource. If the parse mode is "xml", this +# is an ElementTree instance. If the parse mode is "text", this +# is a Unicode string. If the loader fails, it can return None +# or raise an OSError exception. +# @throws OSError If the loader fails to load the resource. + +def default_loader(href, parse, encoding=None): + if parse == "xml": + with open(href, 'rb') as file: + data = ElementTree.parse(file).getroot() + else: + if not encoding: + encoding = 'UTF-8' + with open(href, 'r', encoding=encoding) as file: + data = file.read() + return data + +## +# Expand XInclude directives. +# +# @param elem Root element. +# @param loader Optional resource loader. If omitted, it defaults +# to {@link default_loader}. If given, it should be a callable +# that implements the same interface as default_loader. +# @param base_url The base URL of the original file, to resolve +# relative include file references. +# @param max_depth The maximum number of recursive inclusions. +# Limited to reduce the risk of malicious content explosion. +# Pass a negative value to disable the limitation. +# @throws LimitedRecursiveIncludeError If the {@link max_depth} was exceeded. +# @throws FatalIncludeError If the function fails to include a given +# resource, or if the tree contains malformed XInclude elements. +# @throws IOError If the function fails to load a given resource. +# @returns the node or its replacement if it was an XInclude node + +def include(elem, loader=None, base_url=None, + max_depth=DEFAULT_MAX_INCLUSION_DEPTH): + if max_depth is None: + max_depth = -1 + elif max_depth < 0: + raise ValueError("expected non-negative depth or None for 'max_depth', got %r" % max_depth) + + if hasattr(elem, 'getroot'): + elem = elem.getroot() + if loader is None: + loader = default_loader + + _include(elem, loader, base_url, max_depth, set()) + + +def _include(elem, loader, base_url, max_depth, _parent_hrefs): + # look for xinclude elements + i = 0 + while i < len(elem): + e = elem[i] + if e.tag == XINCLUDE_INCLUDE: + # process xinclude directive + href = e.get("href") + if base_url: + href = urljoin(base_url, href) + parse = e.get("parse", "xml") + if parse == "xml": + if href in _parent_hrefs: + raise FatalIncludeError("recursive include of %s" % href) + if max_depth == 0: + raise LimitedRecursiveIncludeError( + "maximum xinclude depth reached when including file %s" % href) + _parent_hrefs.add(href) + node = loader(href, parse) + if node is None: + raise FatalIncludeError( + "cannot load %r as %r" % (href, parse) + ) + node = copy.copy(node) # FIXME: this makes little sense with recursive includes + _include(node, loader, href, max_depth - 1, _parent_hrefs) + _parent_hrefs.remove(href) + if e.tail: + node.tail = (node.tail or "") + e.tail + elem[i] = node + elif parse == "text": + text = loader(href, parse, e.get("encoding")) + if text is None: + raise FatalIncludeError( + "cannot load %r as %r" % (href, parse) + ) + if e.tail: + text += e.tail + if i: + node = elem[i-1] + node.tail = (node.tail or "") + text + else: + elem.text = (elem.text or "") + text + del elem[i] + continue + else: + raise FatalIncludeError( + "unknown parse type in xi:include tag (%r)" % parse + ) + elif e.tag == XINCLUDE_FALLBACK: + raise FatalIncludeError( + "xi:fallback tag must be child of xi:include (%r)" % e.tag + ) + else: + _include(e, loader, base_url, max_depth, _parent_hrefs) + i += 1 diff --git a/defaults/lib/x/etree/ElementPath.py b/defaults/lib/x/etree/ElementPath.py new file mode 100644 index 0000000..dc6bd28 --- /dev/null +++ b/defaults/lib/x/etree/ElementPath.py @@ -0,0 +1,423 @@ +# +# ElementTree +# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $ +# +# limited xpath support for element trees +# +# history: +# 2003-05-23 fl created +# 2003-05-28 fl added support for // etc +# 2003-08-27 fl fixed parsing of periods in element names +# 2007-09-10 fl new selection engine +# 2007-09-12 fl fixed parent selector +# 2007-09-13 fl added iterfind; changed findall to return a list +# 2007-11-30 fl added namespaces support +# 2009-10-30 fl added child element value filter +# +# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2009 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. + +## +# Implementation module for XPath support. There's usually no reason +# to import this module directly; the ElementTree does this for +# you, if needed. +## + +import re + +xpath_tokenizer_re = re.compile( + r"(" + r"'[^']*'|\"[^\"]*\"|" + r"::|" + r"//?|" + r"\.\.|" + r"\(\)|" + r"!=|" + r"[/.*:\[\]\(\)@=])|" + r"((?:\{[^}]+\})?[^/\[\]\(\)@!=\s]+)|" + r"\s+" + ) + +def xpath_tokenizer(pattern, namespaces=None): + default_namespace = namespaces.get('') if namespaces else None + parsing_attribute = False + for token in xpath_tokenizer_re.findall(pattern): + ttype, tag = token + if tag and tag[0] != "{": + if ":" in tag: + prefix, uri = tag.split(":", 1) + try: + if not namespaces: + raise KeyError + yield ttype, "{%s}%s" % (namespaces[prefix], uri) + except KeyError: + raise SyntaxError("prefix %r not found in prefix map" % prefix) from None + elif default_namespace and not parsing_attribute: + yield ttype, "{%s}%s" % (default_namespace, tag) + else: + yield token + parsing_attribute = False + else: + yield token + parsing_attribute = ttype == '@' + + +def get_parent_map(context): + parent_map = context.parent_map + if parent_map is None: + context.parent_map = parent_map = {} + for p in context.root.iter(): + for e in p: + parent_map[e] = p + return parent_map + + +def _is_wildcard_tag(tag): + return tag[:3] == '{*}' or tag[-2:] == '}*' + + +def _prepare_tag(tag): + _isinstance, _str = isinstance, str + if tag == '{*}*': + # Same as '*', but no comments or processing instructions. + # It can be a surprise that '*' includes those, but there is no + # justification for '{*}*' doing the same. + def select(context, result): + for elem in result: + if _isinstance(elem.tag, _str): + yield elem + elif tag == '{}*': + # Any tag that is not in a namespace. + def select(context, result): + for elem in result: + el_tag = elem.tag + if _isinstance(el_tag, _str) and el_tag[0] != '{': + yield elem + elif tag[:3] == '{*}': + # The tag in any (or no) namespace. + suffix = tag[2:] # '}name' + no_ns = slice(-len(suffix), None) + tag = tag[3:] + def select(context, result): + for elem in result: + el_tag = elem.tag + if el_tag == tag or _isinstance(el_tag, _str) and el_tag[no_ns] == suffix: + yield elem + elif tag[-2:] == '}*': + # Any tag in the given namespace. + ns = tag[:-1] + ns_only = slice(None, len(ns)) + def select(context, result): + for elem in result: + el_tag = elem.tag + if _isinstance(el_tag, _str) and el_tag[ns_only] == ns: + yield elem + else: + raise RuntimeError(f"internal parser error, got {tag}") + return select + + +def prepare_child(next, token): + tag = token[1] + if _is_wildcard_tag(tag): + select_tag = _prepare_tag(tag) + def select(context, result): + def select_child(result): + for elem in result: + yield from elem + return select_tag(context, select_child(result)) + else: + if tag[:2] == '{}': + tag = tag[2:] # '{}tag' == 'tag' + def select(context, result): + for elem in result: + for e in elem: + if e.tag == tag: + yield e + return select + +def prepare_star(next, token): + def select(context, result): + for elem in result: + yield from elem + return select + +def prepare_self(next, token): + def select(context, result): + yield from result + return select + +def prepare_descendant(next, token): + try: + token = next() + except StopIteration: + return + if token[0] == "*": + tag = "*" + elif not token[0]: + tag = token[1] + else: + raise SyntaxError("invalid descendant") + + if _is_wildcard_tag(tag): + select_tag = _prepare_tag(tag) + def select(context, result): + def select_child(result): + for elem in result: + for e in elem.iter(): + if e is not elem: + yield e + return select_tag(context, select_child(result)) + else: + if tag[:2] == '{}': + tag = tag[2:] # '{}tag' == 'tag' + def select(context, result): + for elem in result: + for e in elem.iter(tag): + if e is not elem: + yield e + return select + +def prepare_parent(next, token): + def select(context, result): + # FIXME: raise error if .. is applied at toplevel? + parent_map = get_parent_map(context) + result_map = {} + for elem in result: + if elem in parent_map: + parent = parent_map[elem] + if parent not in result_map: + result_map[parent] = None + yield parent + return select + +def prepare_predicate(next, token): + # FIXME: replace with real parser!!! refs: + # http://javascript.crockford.com/tdop/tdop.html + signature = [] + predicate = [] + while 1: + try: + token = next() + except StopIteration: + return + if token[0] == "]": + break + if token == ('', ''): + # ignore whitespace + continue + if token[0] and token[0][:1] in "'\"": + token = "'", token[0][1:-1] + signature.append(token[0] or "-") + predicate.append(token[1]) + signature = "".join(signature) + # use signature to determine predicate type + if signature == "@-": + # [@attribute] predicate + key = predicate[1] + def select(context, result): + for elem in result: + if elem.get(key) is not None: + yield elem + return select + if signature == "@-='" or signature == "@-!='": + # [@attribute='value'] or [@attribute!='value'] + key = predicate[1] + value = predicate[-1] + def select(context, result): + for elem in result: + if elem.get(key) == value: + yield elem + def select_negated(context, result): + for elem in result: + if (attr_value := elem.get(key)) is not None and attr_value != value: + yield elem + return select_negated if '!=' in signature else select + if signature == "-" and not re.match(r"\-?\d+$", predicate[0]): + # [tag] + tag = predicate[0] + def select(context, result): + for elem in result: + if elem.find(tag) is not None: + yield elem + return select + if signature == ".='" or signature == ".!='" or ( + (signature == "-='" or signature == "-!='") + and not re.match(r"\-?\d+$", predicate[0])): + # [.='value'] or [tag='value'] or [.!='value'] or [tag!='value'] + tag = predicate[0] + value = predicate[-1] + if tag: + def select(context, result): + for elem in result: + for e in elem.findall(tag): + if "".join(e.itertext()) == value: + yield elem + break + def select_negated(context, result): + for elem in result: + for e in elem.iterfind(tag): + if "".join(e.itertext()) != value: + yield elem + break + else: + def select(context, result): + for elem in result: + if "".join(elem.itertext()) == value: + yield elem + def select_negated(context, result): + for elem in result: + if "".join(elem.itertext()) != value: + yield elem + return select_negated if '!=' in signature else select + if signature == "-" or signature == "-()" or signature == "-()-": + # [index] or [last()] or [last()-index] + if signature == "-": + # [index] + index = int(predicate[0]) - 1 + if index < 0: + raise SyntaxError("XPath position >= 1 expected") + else: + if predicate[0] != "last": + raise SyntaxError("unsupported function") + if signature == "-()-": + try: + index = int(predicate[2]) - 1 + except ValueError: + raise SyntaxError("unsupported expression") + if index > -2: + raise SyntaxError("XPath offset from last() must be negative") + else: + index = -1 + def select(context, result): + parent_map = get_parent_map(context) + for elem in result: + try: + parent = parent_map[elem] + # FIXME: what if the selector is "*" ? + elems = list(parent.findall(elem.tag)) + if elems[index] is elem: + yield elem + except (IndexError, KeyError): + pass + return select + raise SyntaxError("invalid predicate") + +ops = { + "": prepare_child, + "*": prepare_star, + ".": prepare_self, + "..": prepare_parent, + "//": prepare_descendant, + "[": prepare_predicate, + } + +_cache = {} + +class _SelectorContext: + parent_map = None + def __init__(self, root): + self.root = root + +# -------------------------------------------------------------------- + +## +# Generate all matching objects. + +def iterfind(elem, path, namespaces=None): + # compile selector pattern + if path[-1:] == "/": + path = path + "*" # implicit all (FIXME: keep this?) + + cache_key = (path,) + if namespaces: + cache_key += tuple(sorted(namespaces.items())) + + try: + selector = _cache[cache_key] + except KeyError: + if len(_cache) > 100: + _cache.clear() + if path[:1] == "/": + raise SyntaxError("cannot use absolute path on element") + next = iter(xpath_tokenizer(path, namespaces)).__next__ + try: + token = next() + except StopIteration: + return + selector = [] + while 1: + try: + selector.append(ops[token[0]](next, token)) + except StopIteration: + raise SyntaxError("invalid path") from None + try: + token = next() + if token[0] == "/": + token = next() + except StopIteration: + break + _cache[cache_key] = selector + # execute selector pattern + result = [elem] + context = _SelectorContext(elem) + for select in selector: + result = select(context, result) + return result + +## +# Find first matching object. + +def find(elem, path, namespaces=None): + return next(iterfind(elem, path, namespaces), None) + +## +# Find all matching objects. + +def findall(elem, path, namespaces=None): + return list(iterfind(elem, path, namespaces)) + +## +# Find text for first matching object. + +def findtext(elem, path, default=None, namespaces=None): + try: + elem = next(iterfind(elem, path, namespaces)) + if elem.text is None: + return "" + return elem.text + except StopIteration: + return default diff --git a/defaults/lib/x/etree/ElementTree.py b/defaults/lib/x/etree/ElementTree.py new file mode 100644 index 0000000..df5d519 --- /dev/null +++ b/defaults/lib/x/etree/ElementTree.py @@ -0,0 +1,2063 @@ +"""Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +""" + +#--------------------------------------------------------------------- +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. +# +# ElementTree +# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +__all__ = [ + # public symbols + "Comment", + "dump", + "Element", "ElementTree", + "fromstring", "fromstringlist", + "indent", "iselement", "iterparse", + "parse", "ParseError", + "PI", "ProcessingInstruction", + "QName", + "SubElement", + "tostring", "tostringlist", + "TreeBuilder", + "VERSION", + "XML", "XMLID", + "XMLParser", "XMLPullParser", + "register_namespace", + "canonicalize", "C14NWriterTarget", + ] + +VERSION = "1.3.0" + +import sys +import re +import warnings +import io +import collections +import collections.abc +import contextlib + +from . import ElementPath + + +class ParseError(SyntaxError): + """An error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + """ + pass + +# -------------------------------------------------------------------- + + +def iselement(element): + """Return True if *element* appears to be an Element.""" + return hasattr(element, 'tag') + + +class Element: + """An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + text...tail + + """ + + tag = None + """The element's name.""" + + attrib = None + """Dictionary of the element's attributes.""" + + text = None + """ + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + """ + + tail = None + """ + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + """ + + def __init__(self, tag, attrib={}, **extra): + if not isinstance(attrib, dict): + raise TypeError("attrib must be dict, not %s" % ( + attrib.__class__.__name__,)) + self.tag = tag + self.attrib = {**attrib, **extra} + self._children = [] + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self)) + + def makeelement(self, tag, attrib): + """Create a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + """ + return self.__class__(tag, attrib) + + def __copy__(self): + elem = self.makeelement(self.tag, self.attrib) + elem.text = self.text + elem.tail = self.tail + elem[:] = self + return elem + + def __len__(self): + return len(self._children) + + def __bool__(self): + warnings.warn( + "The behavior of this method will change in future versions. " + "Use specific 'len(elem)' or 'elem is not None' test instead.", + FutureWarning, stacklevel=2 + ) + return len(self._children) != 0 # emulate old behaviour, for now + + def __getitem__(self, index): + return self._children[index] + + def __setitem__(self, index, element): + if isinstance(index, slice): + for elt in element: + self._assert_is_element(elt) + else: + self._assert_is_element(element) + self._children[index] = element + + def __delitem__(self, index): + del self._children[index] + + def append(self, subelement): + """Add *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + """ + self._assert_is_element(subelement) + self._children.append(subelement) + + def extend(self, elements): + """Append subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + """ + for element in elements: + self._assert_is_element(element) + self._children.append(element) + + def insert(self, index, subelement): + """Insert *subelement* at position *index*.""" + self._assert_is_element(subelement) + self._children.insert(index, subelement) + + def _assert_is_element(self, e): + # Need to refer to the actual Python implementation, not the + # shadowing C implementation. + if not isinstance(e, _Element_Py): + raise TypeError('expected an Element, not %s' % type(e).__name__) + + def remove(self, subelement): + """Remove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + """ + # assert iselement(element) + self._children.remove(subelement) + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + return ElementPath.find(self, path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + """ + return ElementPath.findtext(self, path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + """ + return ElementPath.findall(self, path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + return ElementPath.iterfind(self, path, namespaces) + + def clear(self): + """Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + """ + self.attrib.clear() + self._children = [] + self.text = self.tail = None + + def get(self, key, default=None): + """Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + """ + return self.attrib.get(key, default) + + def set(self, key, value): + """Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + """ + self.attrib[key] = value + + def keys(self): + """Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + """ + return self.attrib.keys() + + def items(self): + """Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + """ + return self.attrib.items() + + def iter(self, tag=None): + """Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + """ + if tag == "*": + tag = None + if tag is None or self.tag == tag: + yield self + for e in self._children: + yield from e.iter(tag) + + def itertext(self): + """Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + """ + tag = self.tag + if not isinstance(tag, str) and tag is not None: + return + t = self.text + if t: + yield t + for e in self: + yield from e.itertext() + t = e.tail + if t: + yield t + + +def SubElement(parent, tag, attrib={}, **extra): + """Subelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + """ + attrib = {**attrib, **extra} + element = parent.makeelement(tag, attrib) + parent.append(element) + return element + + +def Comment(text=None): + """Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + """ + element = Element(Comment) + element.text = text + return element + + +def ProcessingInstruction(target, text=None): + """Processing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + """ + element = Element(ProcessingInstruction) + element.text = target + if text: + element.text = element.text + " " + text + return element + +PI = ProcessingInstruction + + +class QName: + """Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + """ + def __init__(self, text_or_uri, tag=None): + if tag: + text_or_uri = "{%s}%s" % (text_or_uri, tag) + self.text = text_or_uri + def __str__(self): + return self.text + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self.text) + def __hash__(self): + return hash(self.text) + def __le__(self, other): + if isinstance(other, QName): + return self.text <= other.text + return self.text <= other + def __lt__(self, other): + if isinstance(other, QName): + return self.text < other.text + return self.text < other + def __ge__(self, other): + if isinstance(other, QName): + return self.text >= other.text + return self.text >= other + def __gt__(self, other): + if isinstance(other, QName): + return self.text > other.text + return self.text > other + def __eq__(self, other): + if isinstance(other, QName): + return self.text == other.text + return self.text == other + +# -------------------------------------------------------------------- + + +class ElementTree: + """An XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + """ + def __init__(self, element=None, file=None): + # assert element is None or iselement(element) + self._root = element # first node + if file: + self.parse(file) + + def getroot(self): + """Return root element of this tree.""" + return self._root + + def _setroot(self, element): + """Replace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + """ + # assert iselement(element) + self._root = element + + def parse(self, source, parser=None): + """Load external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + """ + close_source = False + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + try: + if parser is None: + # If no parser was specified, create a default XMLParser + parser = XMLParser() + if hasattr(parser, '_parse_whole'): + # The default XMLParser, when it comes from an accelerator, + # can define an internal _parse_whole API for efficiency. + # It can be used to parse the whole source without feeding + # it with chunks. + self._root = parser._parse_whole(source) + return self._root + while data := source.read(65536): + parser.feed(data) + self._root = parser.close() + return self._root + finally: + if close_source: + source.close() + + def iter(self, tag=None): + """Create and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + """ + # assert self._root is not None + return self._root.iter(tag) + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.find(path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findtext(path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findall(path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.iterfind(path, namespaces) + + def write(self, file_or_filename, + encoding=None, + xml_declaration=None, + default_namespace=None, + method=None, *, + short_empty_elements=True): + """Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + """ + if not method: + method = "xml" + elif method not in _serialize: + raise ValueError("unknown method %r" % method) + if not encoding: + if method == "c14n": + encoding = "utf-8" + else: + encoding = "us-ascii" + with _get_writer(file_or_filename, encoding) as (write, declared_encoding): + if method == "xml" and (xml_declaration or + (xml_declaration is None and + encoding.lower() != "unicode" and + declared_encoding.lower() not in ("utf-8", "us-ascii"))): + write("\n" % ( + declared_encoding,)) + if method == "text": + _serialize_text(write, self._root) + else: + qnames, namespaces = _namespaces(self._root, default_namespace) + serialize = _serialize[method] + serialize(write, self._root, qnames, namespaces, + short_empty_elements=short_empty_elements) + + def write_c14n(self, file): + # lxml.etree compatibility. use output method instead + return self.write(file, method="c14n") + +# -------------------------------------------------------------------- +# serialization support + +@contextlib.contextmanager +def _get_writer(file_or_filename, encoding): + # returns text write method and release all resources after using + try: + write = file_or_filename.write + except AttributeError: + # file_or_filename is a file name + if encoding.lower() == "unicode": + encoding="utf-8" + with open(file_or_filename, "w", encoding=encoding, + errors="xmlcharrefreplace") as file: + yield file.write, encoding + else: + # file_or_filename is a file-like object + # encoding determines if it is a text or binary writer + if encoding.lower() == "unicode": + # use a text writer as is + yield write, getattr(file_or_filename, "encoding", None) or "utf-8" + else: + # wrap a binary writer with TextIOWrapper + with contextlib.ExitStack() as stack: + if isinstance(file_or_filename, io.BufferedIOBase): + file = file_or_filename + elif isinstance(file_or_filename, io.RawIOBase): + file = io.BufferedWriter(file_or_filename) + # Keep the original file open when the BufferedWriter is + # destroyed + stack.callback(file.detach) + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + file = io.BufferedIOBase() + file.writable = lambda: True + file.write = write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + file.seekable = file_or_filename.seekable + file.tell = file_or_filename.tell + except AttributeError: + pass + file = io.TextIOWrapper(file, + encoding=encoding, + errors="xmlcharrefreplace", + newline="\n") + # Keep the original file open when the TextIOWrapper is + # destroyed + stack.callback(file.detach) + yield file.write, encoding + +def _namespaces(elem, default_namespace=None): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + if default_namespace: + namespaces[default_namespace] = "" + + def add_qname(qname): + # calculate serialized qname representation + try: + if qname[:1] == "{": + uri, tag = qname[1:].rsplit("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = "%s:%s" % (prefix, tag) + else: + qnames[qname] = tag # default element + else: + if default_namespace: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with " + "default_namespace option" + ) + qnames[qname] = qname + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + for elem in elem.iter(): + tag = elem.tag + if isinstance(tag, QName): + if tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, str): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + +def _serialize_xml(write, elem, qnames, namespaces, + short_empty_elements, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % text) + elif tag is ProcessingInstruction: + write("" % text) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib(v) + write(" %s=\"%s\"" % (qnames[k], v)) + if text or len(elem) or not short_empty_elements: + write(">") + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + write("") + else: + write(" />") + if elem.tail: + write(_escape_cdata(elem.tail)) + +HTML_EMPTY = {"area", "base", "basefont", "br", "col", "embed", "frame", "hr", + "img", "input", "isindex", "link", "meta", "param", "source", + "track", "wbr"} + +def _serialize_html(write, elem, qnames, namespaces, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % _escape_cdata(text)) + elif tag is ProcessingInstruction: + write("" % _escape_cdata(text)) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib_html(v) + # FIXME: handle boolean attributes + write(" %s=\"%s\"" % (qnames[k], v)) + write(">") + ltag = tag.lower() + if text: + if ltag == "script" or ltag == "style": + write(text) + else: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + if ltag not in HTML_EMPTY: + write("") + if elem.tail: + write(_escape_cdata(elem.tail)) + +def _serialize_text(write, elem): + for part in elem.itertext(): + write(part) + if elem.tail: + write(elem.tail) + +_serialize = { + "xml": _serialize_xml, + "html": _serialize_html, + "text": _serialize_text, +# this optional method is imported at the end of the module +# "c14n": _serialize_c14n, +} + + +def register_namespace(prefix, uri): + """Register a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + """ + if re.match(r"ns\d+$", prefix): + raise ValueError("Prefix format reserved for internal use") + for k, v in list(_namespace_map.items()): + if k == uri or v == prefix: + del _namespace_map[k] + _namespace_map[uri] = prefix + +_namespace_map = { + # "well-known" namespace prefixes + "http://www.w3.org/XML/1998/namespace": "xml", + "http://www.w3.org/1999/xhtml": "html", + "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", + "http://schemas.xmlsoap.org/wsdl/": "wsdl", + # xml schema + "http://www.w3.org/2001/XMLSchema": "xs", + "http://www.w3.org/2001/XMLSchema-instance": "xsi", + # dublin core + "http://purl.org/dc/elements/1.1/": "dc", +} +# For tests and troubleshooting +register_namespace._namespace_map = _namespace_map + +def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + +def _escape_cdata(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 characters, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + # Although section 2.11 of the XML specification states that CR or + # CR LN should be replaced with just LN, it applies only to EOLNs + # which take part of organizing file into lines. Within attributes, + # we are replacing these with entity numbers, so they do not count. + # http://www.w3.org/TR/REC-xml/#sec-line-ends + # The current solution, contained in following six lines, was + # discussed in issue 17582 and 39011. + if "\r" in text: + text = text.replace("\r", " ") + if "\n" in text: + text = text.replace("\n", " ") + if "\t" in text: + text = text.replace("\t", " ") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib_html(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +# -------------------------------------------------------------------- + +def tostring(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + """Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + """ + stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return stream.getvalue() + +class _ListDataStream(io.BufferedIOBase): + """An auxiliary stream accumulating into a list reference.""" + def __init__(self, lst): + self.lst = lst + + def writable(self): + return True + + def seekable(self): + return True + + def write(self, b): + self.lst.append(b) + + def tell(self): + return len(self.lst) + +def tostringlist(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + lst = [] + stream = _ListDataStream(lst) + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return lst + + +def dump(elem): + """Write element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + """ + # debugging + if not isinstance(elem, ElementTree): + elem = ElementTree(elem) + elem.write(sys.stdout, encoding="unicode") + tail = elem.getroot().tail + if not tail or tail[-1] != "\n": + sys.stdout.write("\n") + + +def indent(tree, space=" ", level=0): + """Indent an XML document by inserting newlines and indentation space + after elements. + + *tree* is the ElementTree or Element to modify. The (root) element + itself will not be changed, but the tail text of all elements in its + subtree will be adapted. + + *space* is the whitespace to insert for each indentation level, two + space characters by default. + + *level* is the initial indentation level. Setting this to a higher + value than 0 can be used for indenting subtrees that are more deeply + nested inside of a document. + """ + if isinstance(tree, ElementTree): + tree = tree.getroot() + if level < 0: + raise ValueError(f"Initial indentation level must be >= 0, got {level}") + if not len(tree): + return + + # Reduce the memory consumption by reusing indentation strings. + indentations = ["\n" + level * space] + + def _indent_children(elem, level): + # Start a new indentation level for the first child. + child_level = level + 1 + try: + child_indentation = indentations[child_level] + except IndexError: + child_indentation = indentations[level] + space + indentations.append(child_indentation) + + if not elem.text or not elem.text.strip(): + elem.text = child_indentation + + for child in elem: + if len(child): + _indent_children(child, child_level) + if not child.tail or not child.tail.strip(): + child.tail = child_indentation + + # Dedent after the last child by overwriting the previous indentation. + if not child.tail.strip(): + child.tail = indentations[level] + + _indent_children(tree, 0) + + +# -------------------------------------------------------------------- +# parsing + + +def parse(source, parser=None): + """Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + """ + tree = ElementTree() + tree.parse(source, parser) + return tree + + +def iterparse(source, events=None, parser=None): + """Incrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + """ + # Use the internal, undocumented _parser argument for now; When the + # parser argument of iterparse is removed, this can be killed. + pullparser = XMLPullParser(events=events, _parser=parser) + + def iterator(source): + close_source = False + try: + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + yield None + while True: + yield from pullparser.read_events() + # load event buffer + data = source.read(16 * 1024) + if not data: + break + pullparser.feed(data) + root = pullparser._close_and_return_root() + yield from pullparser.read_events() + it.root = root + finally: + if close_source: + source.close() + + class IterParseIterator(collections.abc.Iterator): + __next__ = iterator(source).__next__ + it = IterParseIterator() + it.root = None + del iterator, IterParseIterator + + next(it) + return it + + +class XMLPullParser: + + def __init__(self, events=None, *, _parser=None): + # The _parser argument is for internal use only and must not be relied + # upon in user code. It will be removed in a future release. + # See https://bugs.python.org/issue17741 for more details. + + self._events_queue = collections.deque() + self._parser = _parser or XMLParser(target=TreeBuilder()) + # wire up the parser for event reporting + if events is None: + events = ("end",) + self._parser._setevents(self._events_queue, events) + + def feed(self, data): + """Feed encoded data to parser.""" + if self._parser is None: + raise ValueError("feed() called after end of stream") + if data: + try: + self._parser.feed(data) + except SyntaxError as exc: + self._events_queue.append(exc) + + def _close_and_return_root(self): + # iterparse needs this to set its root attribute properly :( + root = self._parser.close() + self._parser = None + return root + + def close(self): + """Finish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + """ + self._close_and_return_root() + + def read_events(self): + """Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + """ + events = self._events_queue + while events: + event = events.popleft() + if isinstance(event, Exception): + raise event + else: + yield event + + +def XML(text, parser=None): + """Parse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + return parser.close() + + +def XMLID(text, parser=None): + """Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + tree = parser.close() + ids = {} + for elem in tree.iter(): + id = elem.get("id") + if id: + ids[id] = elem + return tree, ids + +# Parse XML document from string constant. Alias for XML(). +fromstring = XML + +def fromstringlist(sequence, parser=None): + """Parse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + for text in sequence: + parser.feed(text) + return parser.close() + +# -------------------------------------------------------------------- + + +class TreeBuilder: + """Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + """ + def __init__(self, element_factory=None, *, + comment_factory=None, pi_factory=None, + insert_comments=False, insert_pis=False): + self._data = [] # data collector + self._elem = [] # element stack + self._last = None # last element + self._root = None # root element + self._tail = None # true if we're after an end tag + if comment_factory is None: + comment_factory = Comment + self._comment_factory = comment_factory + self.insert_comments = insert_comments + if pi_factory is None: + pi_factory = ProcessingInstruction + self._pi_factory = pi_factory + self.insert_pis = insert_pis + if element_factory is None: + element_factory = Element + self._factory = element_factory + + def close(self): + """Flush builder buffers and return toplevel document Element.""" + assert len(self._elem) == 0, "missing end tags" + assert self._root is not None, "missing toplevel element" + return self._root + + def _flush(self): + if self._data: + if self._last is not None: + text = "".join(self._data) + if self._tail: + assert self._last.tail is None, "internal error (tail)" + self._last.tail = text + else: + assert self._last.text is None, "internal error (text)" + self._last.text = text + self._data = [] + + def data(self, data): + """Add text to current element.""" + self._data.append(data) + + def start(self, tag, attrs): + """Open new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + """ + self._flush() + self._last = elem = self._factory(tag, attrs) + if self._elem: + self._elem[-1].append(elem) + elif self._root is None: + self._root = elem + self._elem.append(elem) + self._tail = 0 + return elem + + def end(self, tag): + """Close and return current Element. + + *tag* is the element name. + + """ + self._flush() + self._last = self._elem.pop() + assert self._last.tag == tag,\ + "end tag mismatch (expected %s, got %s)" % ( + self._last.tag, tag) + self._tail = 1 + return self._last + + def comment(self, text): + """Create a comment using the comment_factory. + + *text* is the text of the comment. + """ + return self._handle_single( + self._comment_factory, self.insert_comments, text) + + def pi(self, target, text=None): + """Create a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + """ + return self._handle_single( + self._pi_factory, self.insert_pis, target, text) + + def _handle_single(self, factory, insert, *args): + elem = factory(*args) + if insert: + self._flush() + self._last = elem + if self._elem: + self._elem[-1].append(elem) + self._tail = 1 + return elem + + +# also see ElementTree and TreeBuilder +class XMLParser: + """Element structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + """ + + def __init__(self, *, target=None, encoding=None): + try: + from xml.parsers import expat + except ImportError: + try: + import pyexpat as expat + except ImportError: + raise ImportError( + "No module named expat; use SimpleXMLTreeBuilder instead" + ) + parser = expat.ParserCreate(encoding, "}") + if target is None: + target = TreeBuilder() + # underscored names are provided for compatibility only + self.parser = self._parser = parser + self.target = self._target = target + self._error = expat.error + self._names = {} # name memo cache + # main callbacks + parser.DefaultHandlerExpand = self._default + if hasattr(target, 'start'): + parser.StartElementHandler = self._start + if hasattr(target, 'end'): + parser.EndElementHandler = self._end + if hasattr(target, 'start_ns'): + parser.StartNamespaceDeclHandler = self._start_ns + if hasattr(target, 'end_ns'): + parser.EndNamespaceDeclHandler = self._end_ns + if hasattr(target, 'data'): + parser.CharacterDataHandler = target.data + # miscellaneous callbacks + if hasattr(target, 'comment'): + parser.CommentHandler = target.comment + if hasattr(target, 'pi'): + parser.ProcessingInstructionHandler = target.pi + # Configure pyexpat: buffering, new-style attribute handling. + parser.buffer_text = 1 + parser.ordered_attributes = 1 + self._doctype = None + self.entity = {} + try: + self.version = "Expat %d.%d.%d" % expat.version_info + except AttributeError: + pass # unknown + + def _setevents(self, events_queue, events_to_report): + # Internal API for XMLPullParser + # events_to_report: a list of events to report during parsing (same as + # the *events* of XMLPullParser's constructor. + # events_queue: a list of actual parsing events that will be populated + # by the underlying parser. + # + parser = self._parser + append = events_queue.append + for event_name in events_to_report: + if event_name == "start": + parser.ordered_attributes = 1 + def handler(tag, attrib_in, event=event_name, append=append, + start=self._start): + append((event, start(tag, attrib_in))) + parser.StartElementHandler = handler + elif event_name == "end": + def handler(tag, event=event_name, append=append, + end=self._end): + append((event, end(tag))) + parser.EndElementHandler = handler + elif event_name == "start-ns": + # TreeBuilder does not implement .start_ns() + if hasattr(self.target, "start_ns"): + def handler(prefix, uri, event=event_name, append=append, + start_ns=self._start_ns): + append((event, start_ns(prefix, uri))) + else: + def handler(prefix, uri, event=event_name, append=append): + append((event, (prefix or '', uri or ''))) + parser.StartNamespaceDeclHandler = handler + elif event_name == "end-ns": + # TreeBuilder does not implement .end_ns() + if hasattr(self.target, "end_ns"): + def handler(prefix, event=event_name, append=append, + end_ns=self._end_ns): + append((event, end_ns(prefix))) + else: + def handler(prefix, event=event_name, append=append): + append((event, None)) + parser.EndNamespaceDeclHandler = handler + elif event_name == 'comment': + def handler(text, event=event_name, append=append, self=self): + append((event, self.target.comment(text))) + parser.CommentHandler = handler + elif event_name == 'pi': + def handler(pi_target, data, event=event_name, append=append, + self=self): + append((event, self.target.pi(pi_target, data))) + parser.ProcessingInstructionHandler = handler + else: + raise ValueError("unknown event %r" % event_name) + + def _raiseerror(self, value): + err = ParseError(value) + err.code = value.code + err.position = value.lineno, value.offset + raise err + + def _fixname(self, key): + # expand qname, and convert name string to ascii, if possible + try: + name = self._names[key] + except KeyError: + name = key + if "}" in name: + name = "{" + name + self._names[key] = name + return name + + def _start_ns(self, prefix, uri): + return self.target.start_ns(prefix or '', uri or '') + + def _end_ns(self, prefix): + return self.target.end_ns(prefix or '') + + def _start(self, tag, attr_list): + # Handler for expat's StartElementHandler. Since ordered_attributes + # is set, the attributes are reported as a list of alternating + # attribute name,value. + fixname = self._fixname + tag = fixname(tag) + attrib = {} + if attr_list: + for i in range(0, len(attr_list), 2): + attrib[fixname(attr_list[i])] = attr_list[i+1] + return self.target.start(tag, attrib) + + def _end(self, tag): + return self.target.end(self._fixname(tag)) + + def _default(self, text): + prefix = text[:1] + if prefix == "&": + # deal with undefined entities + try: + data_handler = self.target.data + except AttributeError: + return + try: + data_handler(self.entity[text[1:-1]]) + except KeyError: + from xml.parsers import expat + err = expat.error( + "undefined entity %s: line %d, column %d" % + (text, self.parser.ErrorLineNumber, + self.parser.ErrorColumnNumber) + ) + err.code = 11 # XML_ERROR_UNDEFINED_ENTITY + err.lineno = self.parser.ErrorLineNumber + err.offset = self.parser.ErrorColumnNumber + raise err + elif prefix == "<" and text[:9] == "": + self._doctype = None + return + text = text.strip() + if not text: + return + self._doctype.append(text) + n = len(self._doctype) + if n > 2: + type = self._doctype[1] + if type == "PUBLIC" and n == 4: + name, type, pubid, system = self._doctype + if pubid: + pubid = pubid[1:-1] + elif type == "SYSTEM" and n == 3: + name, type, system = self._doctype + pubid = None + else: + return + if hasattr(self.target, "doctype"): + self.target.doctype(name, pubid, system[1:-1]) + elif hasattr(self, "doctype"): + warnings.warn( + "The doctype() method of XMLParser is ignored. " + "Define doctype() method on the TreeBuilder target.", + RuntimeWarning) + + self._doctype = None + + def feed(self, data): + """Feed encoded data to parser.""" + try: + self.parser.Parse(data, False) + except self._error as v: + self._raiseerror(v) + + def close(self): + """Finish feeding data to parser and return element structure.""" + try: + self.parser.Parse(b"", True) # end of data + except self._error as v: + self._raiseerror(v) + try: + close_handler = self.target.close + except AttributeError: + pass + else: + return close_handler() + finally: + # get rid of circular references + del self.parser, self._parser + del self.target, self._target + + +# -------------------------------------------------------------------- +# C14N 2.0 + +def canonicalize(xml_data=None, *, out=None, from_file=None, **options): + """Convert XML to its C14N 2.0 serialised form. + + If *out* is provided, it must be a file or file-like object that receives + the serialised canonical XML output (text, not bytes) through its ``.write()`` + method. To write to a file, open it in text mode with encoding "utf-8". + If *out* is not provided, this function returns the output as text string. + + Either *xml_data* (an XML string) or *from_file* (a file path or + file-like object) must be provided as input. + + The configuration options are the same as for the ``C14NWriterTarget``. + """ + if xml_data is None and from_file is None: + raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") + sio = None + if out is None: + sio = out = io.StringIO() + + parser = XMLParser(target=C14NWriterTarget(out.write, **options)) + + if xml_data is not None: + parser.feed(xml_data) + parser.close() + elif from_file is not None: + parse(from_file, parser=parser) + + return sio.getvalue() if sio is not None else None + + +_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match + + +class C14NWriterTarget: + """ + Canonicalization writer target for the XMLParser. + + Serialises parse events to XML C14N 2.0. + + The *write* function is used for writing out the resulting data stream + as text (not bytes). To write to a file, open it in text mode with encoding + "utf-8" and pass its ``.write`` method. + + Configuration options: + + - *with_comments*: set to true to include comments + - *strip_text*: set to true to strip whitespace before and after text content + - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" + - *qname_aware_tags*: a set of qname aware tag names in which prefixes + should be replaced in text content + - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes + should be replaced in text content + - *exclude_attrs*: a set of attribute names that should not be serialised + - *exclude_tags*: a set of tag names that should not be serialised + """ + def __init__(self, write, *, + with_comments=False, strip_text=False, rewrite_prefixes=False, + qname_aware_tags=None, qname_aware_attrs=None, + exclude_attrs=None, exclude_tags=None): + self._write = write + self._data = [] + self._with_comments = with_comments + self._strip_text = strip_text + self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None + self._exclude_tags = set(exclude_tags) if exclude_tags else None + + self._rewrite_prefixes = rewrite_prefixes + if qname_aware_tags: + self._qname_aware_tags = set(qname_aware_tags) + else: + self._qname_aware_tags = None + if qname_aware_attrs: + self._find_qname_aware_attrs = set(qname_aware_attrs).intersection + else: + self._find_qname_aware_attrs = None + + # Stack with globally and newly declared namespaces as (uri, prefix) pairs. + self._declared_ns_stack = [[ + ("http://www.w3.org/XML/1998/namespace", "xml"), + ]] + # Stack with user declared namespace prefixes as (uri, prefix) pairs. + self._ns_stack = [] + if not rewrite_prefixes: + self._ns_stack.append(list(_namespace_map.items())) + self._ns_stack.append([]) + self._prefix_map = {} + self._preserve_space = [False] + self._pending_start = None + self._root_seen = False + self._root_done = False + self._ignored_depth = 0 + + def _iter_namespaces(self, ns_stack, _reversed=reversed): + for namespaces in _reversed(ns_stack): + if namespaces: # almost no element declares new namespaces + yield from namespaces + + def _resolve_prefix_name(self, prefixed_name): + prefix, name = prefixed_name.split(':', 1) + for uri, p in self._iter_namespaces(self._ns_stack): + if p == prefix: + return f'{{{uri}}}{name}' + raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') + + def _qname(self, qname, uri=None): + if uri is None: + uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) + else: + tag = qname + + prefixes_seen = set() + for u, prefix in self._iter_namespaces(self._declared_ns_stack): + if u == uri and prefix not in prefixes_seen: + return f'{prefix}:{tag}' if prefix else tag, tag, uri + prefixes_seen.add(prefix) + + # Not declared yet => add new declaration. + if self._rewrite_prefixes: + if uri in self._prefix_map: + prefix = self._prefix_map[uri] + else: + prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}', tag, uri + + if not uri and '' not in prefixes_seen: + # No default namespace declared => no prefix needed. + return tag, tag, uri + + for u, prefix in self._iter_namespaces(self._ns_stack): + if u == uri: + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}' if prefix else tag, tag, uri + + if not uri: + # As soon as a default namespace is defined, + # anything that has no namespace (and thus, no prefix) goes there. + return tag, tag, uri + + raise ValueError(f'Namespace "{uri}" is not declared in scope') + + def data(self, data): + if not self._ignored_depth: + self._data.append(data) + + def _flush(self, _join_text=''.join): + data = _join_text(self._data) + del self._data[:] + if self._strip_text and not self._preserve_space[-1]: + data = data.strip() + if self._pending_start is not None: + args, self._pending_start = self._pending_start, None + qname_text = data if data and _looks_like_prefix_name(data) else None + self._start(*args, qname_text) + if qname_text is not None: + return + if data and self._root_seen: + self._write(_escape_cdata_c14n(data)) + + def start_ns(self, prefix, uri): + if self._ignored_depth: + return + # we may have to resolve qnames in text content + if self._data: + self._flush() + self._ns_stack[-1].append((uri, prefix)) + + def start(self, tag, attrs): + if self._exclude_tags is not None and ( + self._ignored_depth or tag in self._exclude_tags): + self._ignored_depth += 1 + return + if self._data: + self._flush() + + new_namespaces = [] + self._declared_ns_stack.append(new_namespaces) + + if self._qname_aware_tags is not None and tag in self._qname_aware_tags: + # Need to parse text first to see if it requires a prefix declaration. + self._pending_start = (tag, attrs, new_namespaces) + return + self._start(tag, attrs, new_namespaces) + + def _start(self, tag, attrs, new_namespaces, qname_text=None): + if self._exclude_attrs is not None and attrs: + attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} + + qnames = {tag, *attrs} + resolved_names = {} + + # Resolve prefixes in attribute and tag text. + if qname_text is not None: + qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) + qnames.add(qname) + if self._find_qname_aware_attrs is not None and attrs: + qattrs = self._find_qname_aware_attrs(attrs) + if qattrs: + for attr_name in qattrs: + value = attrs[attr_name] + if _looks_like_prefix_name(value): + qname = resolved_names[value] = self._resolve_prefix_name(value) + qnames.add(qname) + else: + qattrs = None + else: + qattrs = None + + # Assign prefixes in lexicographical order of used URIs. + parse_qname = self._qname + parsed_qnames = {n: parse_qname(n) for n in sorted( + qnames, key=lambda n: n.split('}', 1))} + + # Write namespace declarations in prefix order ... + if new_namespaces: + attr_list = [ + ('xmlns:' + prefix if prefix else 'xmlns', uri) + for uri, prefix in new_namespaces + ] + attr_list.sort() + else: + # almost always empty + attr_list = [] + + # ... followed by attributes in URI+name order + if attrs: + for k, v in sorted(attrs.items()): + if qattrs is not None and k in qattrs and v in resolved_names: + v = parsed_qnames[resolved_names[v]][0] + attr_qname, attr_name, uri = parsed_qnames[k] + # No prefix for attributes in default ('') namespace. + attr_list.append((attr_qname if uri else attr_name, v)) + + # Honour xml:space attributes. + space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') + self._preserve_space.append( + space_behaviour == 'preserve' if space_behaviour + else self._preserve_space[-1]) + + # Write the tag. + write = self._write + write('<' + parsed_qnames[tag][0]) + if attr_list: + write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) + write('>') + + # Write the resolved qname text content. + if qname_text is not None: + write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) + + self._root_seen = True + self._ns_stack.append([]) + + def end(self, tag): + if self._ignored_depth: + self._ignored_depth -= 1 + return + if self._data: + self._flush() + self._write(f'') + self._preserve_space.pop() + self._root_done = len(self._preserve_space) == 1 + self._declared_ns_stack.pop() + self._ns_stack.pop() + + def comment(self, text): + if not self._with_comments: + return + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write(f'') + if not self._root_seen: + self._write('\n') + + def pi(self, target, data): + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write( + f'' if data else f'') + if not self._root_seen: + self._write('\n') + + +def _escape_cdata_c14n(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '>' in text: + text = text.replace('>', '>') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _escape_attrib_c14n(text): + # escape attribute value + try: + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '"' in text: + text = text.replace('"', '"') + if '\t' in text: + text = text.replace('\t', ' ') + if '\n' in text: + text = text.replace('\n', ' ') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +# -------------------------------------------------------------------- + +# Import the C accelerators +try: + # Element is going to be shadowed by the C implementation. We need to keep + # the Python version of it accessible for some "creative" by external code + # (see tests) + _Element_Py = Element + + # Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories + from _elementtree import * + from _elementtree import _set_factories +except ImportError: + pass +else: + _set_factories(Comment, ProcessingInstruction) diff --git a/defaults/lib/x/etree/__init__.py b/defaults/lib/x/etree/__init__.py new file mode 100644 index 0000000..e2ec534 --- /dev/null +++ b/defaults/lib/x/etree/__init__.py @@ -0,0 +1,33 @@ +# $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $ +# elementtree package + +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. diff --git a/defaults/lib/x/etree/cElementTree.py b/defaults/lib/x/etree/cElementTree.py new file mode 100644 index 0000000..368e679 --- /dev/null +++ b/defaults/lib/x/etree/cElementTree.py @@ -0,0 +1,3 @@ +# Deprecated alias for xml.etree.ElementTree + +from xml.etree.ElementTree import * diff --git a/defaults/lib/x/parsers/__init__.py b/defaults/lib/x/parsers/__init__.py new file mode 100644 index 0000000..eb314a3 --- /dev/null +++ b/defaults/lib/x/parsers/__init__.py @@ -0,0 +1,8 @@ +"""Python interfaces to XML parsers. + +This package contains one module: + +expat -- Python wrapper for James Clark's Expat parser, with namespace + support. + +""" diff --git a/defaults/lib/x/parsers/expat.py b/defaults/lib/x/parsers/expat.py new file mode 100644 index 0000000..bcbe9fb --- /dev/null +++ b/defaults/lib/x/parsers/expat.py @@ -0,0 +1,8 @@ +"""Interface to the Expat non-validating XML parser.""" +import sys + +from pyexpat import * + +# provide pyexpat submodules as xml.parsers.expat submodules +sys.modules['xml.parsers.expat.model'] = model +sys.modules['xml.parsers.expat.errors'] = errors diff --git a/defaults/lib/x/sax/__init__.py b/defaults/lib/x/sax/__init__.py new file mode 100644 index 0000000..b657310 --- /dev/null +++ b/defaults/lib/x/sax/__init__.py @@ -0,0 +1,94 @@ +"""Simple API for XML (SAX) implementation for Python. + +This module provides an implementation of the SAX 2 interface; +information about the Java version of the interface can be found at +http://www.megginson.com/SAX/. The Python version of the interface is +documented at <...>. + +This package contains the following modules: + +handler -- Base classes and constants which define the SAX 2 API for + the 'client-side' of SAX for Python. + +saxutils -- Implementation of the convenience classes commonly used to + work with SAX. + +xmlreader -- Base classes and constants which define the SAX 2 API for + the parsers used with SAX for Python. + +expatreader -- Driver that allows use of the Expat parser with SAX. +""" + +from .xmlreader import InputSource +from .handler import ContentHandler, ErrorHandler +from ._exceptions import SAXException, SAXNotRecognizedException, \ + SAXParseException, SAXNotSupportedException, \ + SAXReaderNotAvailable + + +def parse(source, handler, errorHandler=ErrorHandler()): + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + parser.parse(source) + +def parseString(string, handler, errorHandler=ErrorHandler()): + import io + if errorHandler is None: + errorHandler = ErrorHandler() + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + + inpsrc = InputSource() + if isinstance(string, str): + inpsrc.setCharacterStream(io.StringIO(string)) + else: + inpsrc.setByteStream(io.BytesIO(string)) + parser.parse(inpsrc) + +# this is the parser list used by the make_parser function if no +# alternatives are given as parameters to the function + +default_parser_list = ["xml.sax.expatreader"] + +# tell modulefinder that importing sax potentially imports expatreader +_false = 0 +if _false: + import xml.sax.expatreader + +import os, sys +if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ: + default_parser_list = os.environ["PY_SAX_PARSER"].split(",") +del os, sys + + +def make_parser(parser_list=()): + """Creates and returns a SAX parser. + + Creates the first parser it is able to instantiate of the ones + given in the iterable created by chaining parser_list and + default_parser_list. The iterables must contain the names of Python + modules containing both a SAX parser and a create_parser function.""" + + for parser_name in list(parser_list) + default_parser_list: + try: + return _create_parser(parser_name) + except ImportError: + import sys + if parser_name in sys.modules: + # The parser module was found, but importing it + # failed unexpectedly, pass this exception through + raise + except SAXReaderNotAvailable: + # The parser module detected that it won't work properly, + # so try the next one + pass + + raise SAXReaderNotAvailable("No parsers found", None) + +# --- Internal utility methods used by make_parser + +def _create_parser(parser_name): + drv_module = __import__(parser_name,{},{},['create_parser']) + return drv_module.create_parser() diff --git a/defaults/lib/x/sax/_exceptions.py b/defaults/lib/x/sax/_exceptions.py new file mode 100644 index 0000000..f292dc3 --- /dev/null +++ b/defaults/lib/x/sax/_exceptions.py @@ -0,0 +1,127 @@ +"""Different kinds of SAX Exceptions""" + +# ===== SAXEXCEPTION ===== + +class SAXException(Exception): + """Encapsulate an XML error or warning. This class can contain + basic error or warning information from either the XML parser or + the application: you can subclass it to provide additional + functionality, or to add localization. Note that although you will + receive a SAXException as the argument to the handlers in the + ErrorHandler interface, you are not actually required to raise + the exception; instead, you can simply read the information in + it.""" + + def __init__(self, msg, exception=None): + """Creates an exception. The message is required, but the exception + is optional.""" + self._msg = msg + self._exception = exception + Exception.__init__(self, msg) + + def getMessage(self): + "Return a message for this exception." + return self._msg + + def getException(self): + "Return the embedded exception, or None if there was none." + return self._exception + + def __str__(self): + "Create a string representation of the exception." + return self._msg + + def __getitem__(self, ix): + """Avoids weird error messages if someone does exception[ix] by + mistake, since Exception has __getitem__ defined.""" + raise AttributeError("__getitem__") + + +# ===== SAXPARSEEXCEPTION ===== + +class SAXParseException(SAXException): + """Encapsulate an XML parse error or warning. + + This exception will include information for locating the error in + the original XML document. Note that although the application will + receive a SAXParseException as the argument to the handlers in the + ErrorHandler interface, the application is not actually required + to raise the exception; instead, it can simply read the + information in it and take a different action. + + Since this exception is a subclass of SAXException, it inherits + the ability to wrap another exception.""" + + def __init__(self, msg, exception, locator): + "Creates the exception. The exception parameter is allowed to be None." + SAXException.__init__(self, msg, exception) + self._locator = locator + + # We need to cache this stuff at construction time. + # If this exception is raised, the objects through which we must + # traverse to get this information may be deleted by the time + # it gets caught. + self._systemId = self._locator.getSystemId() + self._colnum = self._locator.getColumnNumber() + self._linenum = self._locator.getLineNumber() + + def getColumnNumber(self): + """The column number of the end of the text where the exception + occurred.""" + return self._colnum + + def getLineNumber(self): + "The line number of the end of the text where the exception occurred." + return self._linenum + + def getPublicId(self): + "Get the public identifier of the entity where the exception occurred." + return self._locator.getPublicId() + + def getSystemId(self): + "Get the system identifier of the entity where the exception occurred." + return self._systemId + + def __str__(self): + "Create a string representation of the exception." + sysid = self.getSystemId() + if sysid is None: + sysid = "" + linenum = self.getLineNumber() + if linenum is None: + linenum = "?" + colnum = self.getColumnNumber() + if colnum is None: + colnum = "?" + return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg) + + +# ===== SAXNOTRECOGNIZEDEXCEPTION ===== + +class SAXNotRecognizedException(SAXException): + """Exception class for an unrecognized identifier. + + An XMLReader will raise this exception when it is confronted with an + unrecognized feature or property. SAX applications and extensions may + use this class for similar purposes.""" + + +# ===== SAXNOTSUPPORTEDEXCEPTION ===== + +class SAXNotSupportedException(SAXException): + """Exception class for an unsupported operation. + + An XMLReader will raise this exception when a service it cannot + perform is requested (specifically setting a state or value). SAX + applications and extensions may use this class for similar + purposes.""" + +# ===== SAXNOTSUPPORTEDEXCEPTION ===== + +class SAXReaderNotAvailable(SAXNotSupportedException): + """Exception class for a missing driver. + + An XMLReader module (driver) should raise this exception when it + is first imported, e.g. when a support module cannot be imported. + It also may be raised during parsing, e.g. if executing an external + program is not permitted.""" diff --git a/defaults/lib/x/sax/expatreader.py b/defaults/lib/x/sax/expatreader.py new file mode 100644 index 0000000..b9ad526 --- /dev/null +++ b/defaults/lib/x/sax/expatreader.py @@ -0,0 +1,440 @@ +""" +SAX driver for the pyexpat C module. This driver works with +pyexpat.__version__ == '2.22'. +""" + +version = "0.20" + +from xml.sax._exceptions import * +from xml.sax.handler import feature_validation, feature_namespaces +from xml.sax.handler import feature_namespace_prefixes +from xml.sax.handler import feature_external_ges, feature_external_pes +from xml.sax.handler import feature_string_interning +from xml.sax.handler import property_xml_string, property_interning_dict + +try: + from xml.parsers import expat +except ImportError: + raise SAXReaderNotAvailable("expat not supported", None) +else: + if not hasattr(expat, "ParserCreate"): + raise SAXReaderNotAvailable("expat not supported", None) +from xml.sax import xmlreader, saxutils, handler + +AttributesImpl = xmlreader.AttributesImpl +AttributesNSImpl = xmlreader.AttributesNSImpl + +# If we're using a sufficiently recent version of Python, we can use +# weak references to avoid cycles between the parser and content +# handler, otherwise we'll just have to pretend. +try: + import _weakref +except ImportError: + def _mkproxy(o): + return o +else: + import weakref + _mkproxy = weakref.proxy + del weakref, _weakref + +class _ClosedParser: + pass + +# --- ExpatLocator + +class ExpatLocator(xmlreader.Locator): + """Locator for use with the ExpatParser class. + + This uses a weak reference to the parser object to avoid creating + a circular reference between the parser and the content handler. + """ + def __init__(self, parser): + self._ref = _mkproxy(parser) + + def getColumnNumber(self): + parser = self._ref + if parser._parser is None: + return None + return parser._parser.ErrorColumnNumber + + def getLineNumber(self): + parser = self._ref + if parser._parser is None: + return 1 + return parser._parser.ErrorLineNumber + + def getPublicId(self): + parser = self._ref + if parser is None: + return None + return parser._source.getPublicId() + + def getSystemId(self): + parser = self._ref + if parser is None: + return None + return parser._source.getSystemId() + + +# --- ExpatParser + +class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator): + """SAX driver for the pyexpat C module.""" + + def __init__(self, namespaceHandling=0, bufsize=2**16-20): + xmlreader.IncrementalParser.__init__(self, bufsize) + self._source = xmlreader.InputSource() + self._parser = None + self._namespaces = namespaceHandling + self._lex_handler_prop = None + self._parsing = False + self._entity_stack = [] + self._external_ges = 0 + self._interning = None + + # XMLReader methods + + def parse(self, source): + "Parse an XML document from a URL or an InputSource." + source = saxutils.prepare_input_source(source) + + self._source = source + try: + self.reset() + self._cont_handler.setDocumentLocator(ExpatLocator(self)) + xmlreader.IncrementalParser.parse(self, source) + except: + # bpo-30264: Close the source on error to not leak resources: + # xml.sax.parse() doesn't give access to the underlying parser + # to the caller + self._close_source() + raise + + def prepareParser(self, source): + if source.getSystemId() is not None: + self._parser.SetBase(source.getSystemId()) + + # Redefined setContentHandler to allow changing handlers during parsing + + def setContentHandler(self, handler): + xmlreader.IncrementalParser.setContentHandler(self, handler) + if self._parsing: + self._reset_cont_handler() + + def getFeature(self, name): + if name == feature_namespaces: + return self._namespaces + elif name == feature_string_interning: + return self._interning is not None + elif name in (feature_validation, feature_external_pes, + feature_namespace_prefixes): + return 0 + elif name == feature_external_ges: + return self._external_ges + raise SAXNotRecognizedException("Feature '%s' not recognized" % name) + + def setFeature(self, name, state): + if self._parsing: + raise SAXNotSupportedException("Cannot set features while parsing") + + if name == feature_namespaces: + self._namespaces = state + elif name == feature_external_ges: + self._external_ges = state + elif name == feature_string_interning: + if state: + if self._interning is None: + self._interning = {} + else: + self._interning = None + elif name == feature_validation: + if state: + raise SAXNotSupportedException( + "expat does not support validation") + elif name == feature_external_pes: + if state: + raise SAXNotSupportedException( + "expat does not read external parameter entities") + elif name == feature_namespace_prefixes: + if state: + raise SAXNotSupportedException( + "expat does not report namespace prefixes") + else: + raise SAXNotRecognizedException( + "Feature '%s' not recognized" % name) + + def getProperty(self, name): + if name == handler.property_lexical_handler: + return self._lex_handler_prop + elif name == property_interning_dict: + return self._interning + elif name == property_xml_string: + if self._parser: + if hasattr(self._parser, "GetInputContext"): + return self._parser.GetInputContext() + else: + raise SAXNotRecognizedException( + "This version of expat does not support getting" + " the XML string") + else: + raise SAXNotSupportedException( + "XML string cannot be returned when not parsing") + raise SAXNotRecognizedException("Property '%s' not recognized" % name) + + def setProperty(self, name, value): + if name == handler.property_lexical_handler: + self._lex_handler_prop = value + if self._parsing: + self._reset_lex_handler_prop() + elif name == property_interning_dict: + self._interning = value + elif name == property_xml_string: + raise SAXNotSupportedException("Property '%s' cannot be set" % + name) + else: + raise SAXNotRecognizedException("Property '%s' not recognized" % + name) + + # IncrementalParser methods + + def feed(self, data, isFinal=False): + if not self._parsing: + self.reset() + self._parsing = True + self._cont_handler.startDocument() + + try: + # The isFinal parameter is internal to the expat reader. + # If it is set to true, expat will check validity of the entire + # document. When feeding chunks, they are not normally final - + # except when invoked from close. + self._parser.Parse(data, isFinal) + except expat.error as e: + exc = SAXParseException(expat.ErrorString(e.code), e, self) + # FIXME: when to invoke error()? + self._err_handler.fatalError(exc) + + def _close_source(self): + source = self._source + try: + file = source.getCharacterStream() + if file is not None: + file.close() + finally: + file = source.getByteStream() + if file is not None: + file.close() + + def close(self): + if (self._entity_stack or self._parser is None or + isinstance(self._parser, _ClosedParser)): + # If we are completing an external entity, do nothing here + return + try: + self.feed(b"", isFinal=True) + self._cont_handler.endDocument() + self._parsing = False + # break cycle created by expat handlers pointing to our methods + self._parser = None + finally: + self._parsing = False + if self._parser is not None: + # Keep ErrorColumnNumber and ErrorLineNumber after closing. + parser = _ClosedParser() + parser.ErrorColumnNumber = self._parser.ErrorColumnNumber + parser.ErrorLineNumber = self._parser.ErrorLineNumber + self._parser = parser + self._close_source() + + def _reset_cont_handler(self): + self._parser.ProcessingInstructionHandler = \ + self._cont_handler.processingInstruction + self._parser.CharacterDataHandler = self._cont_handler.characters + + def _reset_lex_handler_prop(self): + lex = self._lex_handler_prop + parser = self._parser + if lex is None: + parser.CommentHandler = None + parser.StartCdataSectionHandler = None + parser.EndCdataSectionHandler = None + parser.StartDoctypeDeclHandler = None + parser.EndDoctypeDeclHandler = None + else: + parser.CommentHandler = lex.comment + parser.StartCdataSectionHandler = lex.startCDATA + parser.EndCdataSectionHandler = lex.endCDATA + parser.StartDoctypeDeclHandler = self.start_doctype_decl + parser.EndDoctypeDeclHandler = lex.endDTD + + def reset(self): + if self._namespaces: + self._parser = expat.ParserCreate(self._source.getEncoding(), " ", + intern=self._interning) + self._parser.namespace_prefixes = 1 + self._parser.StartElementHandler = self.start_element_ns + self._parser.EndElementHandler = self.end_element_ns + else: + self._parser = expat.ParserCreate(self._source.getEncoding(), + intern = self._interning) + self._parser.StartElementHandler = self.start_element + self._parser.EndElementHandler = self.end_element + + self._reset_cont_handler() + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + self._parser.NotationDeclHandler = self.notation_decl + self._parser.StartNamespaceDeclHandler = self.start_namespace_decl + self._parser.EndNamespaceDeclHandler = self.end_namespace_decl + + self._decl_handler_prop = None + if self._lex_handler_prop: + self._reset_lex_handler_prop() +# self._parser.DefaultHandler = +# self._parser.DefaultHandlerExpand = +# self._parser.NotStandaloneHandler = + self._parser.ExternalEntityRefHandler = self.external_entity_ref + try: + self._parser.SkippedEntityHandler = self.skipped_entity_handler + except AttributeError: + # This pyexpat does not support SkippedEntity + pass + self._parser.SetParamEntityParsing( + expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE) + + self._parsing = False + self._entity_stack = [] + + # Locator methods + + def getColumnNumber(self): + if self._parser is None: + return None + return self._parser.ErrorColumnNumber + + def getLineNumber(self): + if self._parser is None: + return 1 + return self._parser.ErrorLineNumber + + def getPublicId(self): + return self._source.getPublicId() + + def getSystemId(self): + return self._source.getSystemId() + + # event handlers + def start_element(self, name, attrs): + self._cont_handler.startElement(name, AttributesImpl(attrs)) + + def end_element(self, name): + self._cont_handler.endElement(name) + + def start_element_ns(self, name, attrs): + pair = name.split() + if len(pair) == 1: + # no namespace + pair = (None, name) + elif len(pair) == 3: + pair = pair[0], pair[1] + else: + # default namespace + pair = tuple(pair) + + newattrs = {} + qnames = {} + for (aname, value) in attrs.items(): + parts = aname.split() + length = len(parts) + if length == 1: + # no namespace + qname = aname + apair = (None, aname) + elif length == 3: + qname = "%s:%s" % (parts[2], parts[1]) + apair = parts[0], parts[1] + else: + # default namespace + qname = parts[1] + apair = tuple(parts) + + newattrs[apair] = value + qnames[apair] = qname + + self._cont_handler.startElementNS(pair, None, + AttributesNSImpl(newattrs, qnames)) + + def end_element_ns(self, name): + pair = name.split() + if len(pair) == 1: + pair = (None, name) + elif len(pair) == 3: + pair = pair[0], pair[1] + else: + pair = tuple(pair) + + self._cont_handler.endElementNS(pair, None) + + # this is not used (call directly to ContentHandler) + def processing_instruction(self, target, data): + self._cont_handler.processingInstruction(target, data) + + # this is not used (call directly to ContentHandler) + def character_data(self, data): + self._cont_handler.characters(data) + + def start_namespace_decl(self, prefix, uri): + self._cont_handler.startPrefixMapping(prefix, uri) + + def end_namespace_decl(self, prefix): + self._cont_handler.endPrefixMapping(prefix) + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + self._lex_handler_prop.startDTD(name, pubid, sysid) + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name) + + def notation_decl(self, name, base, sysid, pubid): + self._dtd_handler.notationDecl(name, pubid, sysid) + + def external_entity_ref(self, context, base, sysid, pubid): + if not self._external_ges: + return 1 + + source = self._ent_handler.resolveEntity(pubid, sysid) + source = saxutils.prepare_input_source(source, + self._source.getSystemId() or + "") + + self._entity_stack.append((self._parser, self._source)) + self._parser = self._parser.ExternalEntityParserCreate(context) + self._source = source + + try: + xmlreader.IncrementalParser.parse(self, source) + except: + return 0 # FIXME: save error info here? + + (self._parser, self._source) = self._entity_stack[-1] + del self._entity_stack[-1] + return 1 + + def skipped_entity_handler(self, name, is_pe): + if is_pe: + # The SAX spec requires to report skipped PEs with a '%' + name = '%'+name + self._cont_handler.skippedEntity(name) + +# --- + +def create_parser(*args, **kwargs): + return ExpatParser(*args, **kwargs) + +# --- + +if __name__ == "__main__": + import xml.sax.saxutils + p = create_parser() + p.setContentHandler(xml.sax.saxutils.XMLGenerator()) + p.setErrorHandler(xml.sax.ErrorHandler()) + p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml") diff --git a/defaults/lib/x/sax/handler.py b/defaults/lib/x/sax/handler.py new file mode 100644 index 0000000..e8d417e --- /dev/null +++ b/defaults/lib/x/sax/handler.py @@ -0,0 +1,387 @@ +""" +This module contains the core classes of version 2.0 of SAX for Python. +This file provides only default classes with absolutely minimum +functionality, from which drivers and applications can be subclassed. + +Many of these classes are empty and are included only as documentation +of the interfaces. + +$Id$ +""" + +version = '2.0beta' + +#============================================================================ +# +# HANDLER INTERFACES +# +#============================================================================ + +# ===== ERRORHANDLER ===== + +class ErrorHandler: + """Basic interface for SAX error handlers. + + If you create an object that implements this interface, then + register the object with your XMLReader, the parser will call the + methods in your object to report all warnings and errors. There + are three levels of errors available: warnings, (possibly) + recoverable errors, and unrecoverable errors. All methods take a + SAXParseException as the only parameter.""" + + def error(self, exception): + "Handle a recoverable error." + raise exception + + def fatalError(self, exception): + "Handle a non-recoverable error." + raise exception + + def warning(self, exception): + "Handle a warning." + print(exception) + + +# ===== CONTENTHANDLER ===== + +class ContentHandler: + """Interface for receiving logical document content events. + + This is the main callback interface in SAX, and the one most + important to applications. The order of events in this interface + mirrors the order of the information in the document.""" + + def __init__(self): + self._locator = None + + def setDocumentLocator(self, locator): + """Called by the parser to give the application a locator for + locating the origin of document events. + + SAX parsers are strongly encouraged (though not absolutely + required) to supply a locator: if it does so, it must supply + the locator to the application by invoking this method before + invoking any of the other methods in the DocumentHandler + interface. + + The locator allows the application to determine the end + position of any document-related event, even if the parser is + not reporting an error. Typically, the application will use + this information for reporting its own errors (such as + character content that does not match an application's + business rules). The information returned by the locator is + probably not sufficient for use with a search engine. + + Note that the locator will return correct information only + during the invocation of the events in this interface. The + application should not attempt to use it at any other time.""" + self._locator = locator + + def startDocument(self): + """Receive notification of the beginning of a document. + + The SAX parser will invoke this method only once, before any + other methods in this interface or in DTDHandler (except for + setDocumentLocator).""" + + def endDocument(self): + """Receive notification of the end of a document. + + The SAX parser will invoke this method only once, and it will + be the last method invoked during the parse. The parser shall + not invoke this method until it has either abandoned parsing + (because of an unrecoverable error) or reached the end of + input.""" + + def startPrefixMapping(self, prefix, uri): + """Begin the scope of a prefix-URI Namespace mapping. + + The information from this event is not necessary for normal + Namespace processing: the SAX XML reader will automatically + replace prefixes for element and attribute names when the + http://xml.org/sax/features/namespaces feature is true (the + default). + + There are cases, however, when applications need to use + prefixes in character data or in attribute values, where they + cannot safely be expanded automatically; the + start/endPrefixMapping event supplies the information to the + application to expand prefixes in those contexts itself, if + necessary. + + Note that start/endPrefixMapping events are not guaranteed to + be properly nested relative to each-other: all + startPrefixMapping events will occur before the corresponding + startElement event, and all endPrefixMapping events will occur + after the corresponding endElement event, but their order is + not guaranteed.""" + + def endPrefixMapping(self, prefix): + """End the scope of a prefix-URI mapping. + + See startPrefixMapping for details. This event will always + occur after the corresponding endElement event, but the order + of endPrefixMapping events is not otherwise guaranteed.""" + + def startElement(self, name, attrs): + """Signals the start of an element in non-namespace mode. + + The name parameter contains the raw XML 1.0 name of the + element type as a string and the attrs parameter holds an + instance of the Attributes class containing the attributes of + the element.""" + + def endElement(self, name): + """Signals the end of an element in non-namespace mode. + + The name parameter contains the name of the element type, just + as with the startElement event.""" + + def startElementNS(self, name, qname, attrs): + """Signals the start of an element in namespace mode. + + The name parameter contains the name of the element type as a + (uri, localname) tuple, the qname parameter the raw XML 1.0 + name used in the source document, and the attrs parameter + holds an instance of the Attributes class containing the + attributes of the element. + + The uri part of the name tuple is None for elements which have + no namespace.""" + + def endElementNS(self, name, qname): + """Signals the end of an element in namespace mode. + + The name parameter contains the name of the element type, just + as with the startElementNS event.""" + + def characters(self, content): + """Receive notification of character data. + + The Parser will call this method to report each chunk of + character data. SAX parsers may return all contiguous + character data in a single chunk, or they may split it into + several chunks; however, all of the characters in any single + event must come from the same external entity so that the + Locator provides useful information.""" + + def ignorableWhitespace(self, whitespace): + """Receive notification of ignorable whitespace in element content. + + Validating Parsers must use this method to report each chunk + of ignorable whitespace (see the W3C XML 1.0 recommendation, + section 2.10): non-validating parsers may also use this method + if they are capable of parsing and using content models. + + SAX parsers may return all contiguous whitespace in a single + chunk, or they may split it into several chunks; however, all + of the characters in any single event must come from the same + external entity, so that the Locator provides useful + information.""" + + def processingInstruction(self, target, data): + """Receive notification of a processing instruction. + + The Parser will invoke this method once for each processing + instruction found: note that processing instructions may occur + before or after the main document element. + + A SAX parser should never report an XML declaration (XML 1.0, + section 2.8) or a text declaration (XML 1.0, section 4.3.1) + using this method.""" + + def skippedEntity(self, name): + """Receive notification of a skipped entity. + + The Parser will invoke this method once for each entity + skipped. Non-validating processors may skip entities if they + have not seen the declarations (because, for example, the + entity was declared in an external DTD subset). All processors + may skip external entities, depending on the values of the + http://xml.org/sax/features/external-general-entities and the + http://xml.org/sax/features/external-parameter-entities + properties.""" + + +# ===== DTDHandler ===== + +class DTDHandler: + """Handle DTD events. + + This interface specifies only those DTD events required for basic + parsing (unparsed entities and attributes).""" + + def notationDecl(self, name, publicId, systemId): + "Handle a notation declaration event." + + def unparsedEntityDecl(self, name, publicId, systemId, ndata): + "Handle an unparsed entity declaration event." + + +# ===== ENTITYRESOLVER ===== + +class EntityResolver: + """Basic interface for resolving entities. If you create an object + implementing this interface, then register the object with your + Parser, the parser will call the method in your object to + resolve all external entities. Note that DefaultHandler implements + this interface with the default behaviour.""" + + def resolveEntity(self, publicId, systemId): + """Resolve the system identifier of an entity and return either + the system identifier to read from as a string, or an InputSource + to read from.""" + return systemId + + +#============================================================================ +# +# CORE FEATURES +# +#============================================================================ + +feature_namespaces = "http://xml.org/sax/features/namespaces" +# true: Perform Namespace processing (default). +# false: Optionally do not perform Namespace processing +# (implies namespace-prefixes). +# access: (parsing) read-only; (not parsing) read/write + +feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes" +# true: Report the original prefixed names and attributes used for Namespace +# declarations. +# false: Do not report attributes used for Namespace declarations, and +# optionally do not report original prefixed names (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_string_interning = "http://xml.org/sax/features/string-interning" +# true: All element names, prefixes, attribute names, Namespace URIs, and +# local names are interned using the built-in intern function. +# false: Names are not necessarily interned, although they may be (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_validation = "http://xml.org/sax/features/validation" +# true: Report all validation errors (implies external-general-entities and +# external-parameter-entities). +# false: Do not report validation errors. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_ges = "http://xml.org/sax/features/external-general-entities" +# true: Include all external general (text) entities. +# false: Do not include external general entities. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_pes = "http://xml.org/sax/features/external-parameter-entities" +# true: Include all external parameter entities, including the external +# DTD subset. +# false: Do not include any external parameter entities, even the external +# DTD subset. +# access: (parsing) read-only; (not parsing) read/write + +all_features = [feature_namespaces, + feature_namespace_prefixes, + feature_string_interning, + feature_validation, + feature_external_ges, + feature_external_pes] + + +#============================================================================ +# +# CORE PROPERTIES +# +#============================================================================ + +property_lexical_handler = "http://xml.org/sax/properties/lexical-handler" +# data type: xml.sax.sax2lib.LexicalHandler +# description: An optional extension handler for lexical events like comments. +# access: read/write + +property_declaration_handler = "http://xml.org/sax/properties/declaration-handler" +# data type: xml.sax.sax2lib.DeclHandler +# description: An optional extension handler for DTD-related events other +# than notations and unparsed entities. +# access: read/write + +property_dom_node = "http://xml.org/sax/properties/dom-node" +# data type: org.w3c.dom.Node +# description: When parsing, the current DOM node being visited if this is +# a DOM iterator; when not parsing, the root DOM node for +# iteration. +# access: (parsing) read-only; (not parsing) read/write + +property_xml_string = "http://xml.org/sax/properties/xml-string" +# data type: String +# description: The literal string of characters that was the source for +# the current event. +# access: read-only + +property_encoding = "http://www.python.org/sax/properties/encoding" +# data type: String +# description: The name of the encoding to assume for input data. +# access: write: set the encoding, e.g. established by a higher-level +# protocol. May change during parsing (e.g. after +# processing a META tag) +# read: return the current encoding (possibly established through +# auto-detection. +# initial value: UTF-8 +# + +property_interning_dict = "http://www.python.org/sax/properties/interning-dict" +# data type: Dictionary +# description: The dictionary used to intern common strings in the document +# access: write: Request that the parser uses a specific dictionary, to +# allow interning across different documents +# read: return the current interning dictionary, or None +# + +all_properties = [property_lexical_handler, + property_dom_node, + property_declaration_handler, + property_xml_string, + property_encoding, + property_interning_dict] + + +class LexicalHandler: + """Optional SAX2 handler for lexical events. + + This handler is used to obtain lexical information about an XML + document, that is, information about how the document was encoded + (as opposed to what it contains, which is reported to the + ContentHandler), such as comments and CDATA marked section + boundaries. + + To set the LexicalHandler of an XMLReader, use the setProperty + method with the property identifier + 'http://xml.org/sax/properties/lexical-handler'.""" + + def comment(self, content): + """Reports a comment anywhere in the document (including the + DTD and outside the document element). + + content is a string that holds the contents of the comment.""" + + def startDTD(self, name, public_id, system_id): + """Report the start of the DTD declarations, if the document + has an associated DTD. + + A startEntity event will be reported before declaration events + from the external DTD subset are reported, and this can be + used to infer from which subset DTD declarations derive. + + name is the name of the document element type, public_id the + public identifier of the DTD (or None if none were supplied) + and system_id the system identfier of the external subset (or + None if none were supplied).""" + + def endDTD(self): + """Signals the end of DTD declarations.""" + + def startCDATA(self): + """Reports the beginning of a CDATA marked section. + + The contents of the CDATA marked section will be reported + through the characters event.""" + + def endCDATA(self): + """Reports the end of a CDATA marked section.""" diff --git a/defaults/lib/x/sax/saxutils.py b/defaults/lib/x/sax/saxutils.py new file mode 100644 index 0000000..c1612ea --- /dev/null +++ b/defaults/lib/x/sax/saxutils.py @@ -0,0 +1,369 @@ +"""\ +A library of useful helper classes to the SAX classes, for the +convenience of application and driver writers. +""" + +import os, urllib.parse, urllib.request +import io +import codecs +from . import handler +from . import xmlreader + +def __dict_replace(s, d): + """Replace substrings of a string using a dictionary.""" + for key, value in d.items(): + s = s.replace(key, value) + return s + +def escape(data, entities={}): + """Escape &, <, and > in a string of data. + + You can escape other strings of data by passing a dictionary as + the optional entities parameter. The keys and values must all be + strings; each key will be replaced with its corresponding value. + """ + + # must do ampersand first + data = data.replace("&", "&") + data = data.replace(">", ">") + data = data.replace("<", "<") + if entities: + data = __dict_replace(data, entities) + return data + +def unescape(data, entities={}): + """Unescape &, <, and > in a string of data. + + You can unescape other strings of data by passing a dictionary as + the optional entities parameter. The keys and values must all be + strings; each key will be replaced with its corresponding value. + """ + data = data.replace("<", "<") + data = data.replace(">", ">") + if entities: + data = __dict_replace(data, entities) + # must do ampersand last + return data.replace("&", "&") + +def quoteattr(data, entities={}): + """Escape and quote an attribute value. + + Escape &, <, and > in a string of data, then quote it for use as + an attribute value. The \" character will be escaped as well, if + necessary. + + You can escape other strings of data by passing a dictionary as + the optional entities parameter. The keys and values must all be + strings; each key will be replaced with its corresponding value. + """ + entities = {**entities, '\n': ' ', '\r': ' ', '\t':' '} + data = escape(data, entities) + if '"' in data: + if "'" in data: + data = '"%s"' % data.replace('"', """) + else: + data = "'%s'" % data + else: + data = '"%s"' % data + return data + + +def _gettextwriter(out, encoding): + if out is None: + import sys + return sys.stdout + + if isinstance(out, io.TextIOBase): + # use a text writer as is + return out + + if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)): + # use a codecs stream writer as is + return out + + # wrap a binary writer with TextIOWrapper + if isinstance(out, io.RawIOBase): + # Keep the original file open when the TextIOWrapper is + # destroyed + class _wrapper: + __class__ = out.__class__ + def __getattr__(self, name): + return getattr(out, name) + buffer = _wrapper() + buffer.close = lambda: None + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + buffer = io.BufferedIOBase() + buffer.writable = lambda: True + buffer.write = out.write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + buffer.seekable = out.seekable + buffer.tell = out.tell + except AttributeError: + pass + return io.TextIOWrapper(buffer, encoding=encoding, + errors='xmlcharrefreplace', + newline='\n', + write_through=True) + +class XMLGenerator(handler.ContentHandler): + + def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False): + handler.ContentHandler.__init__(self) + out = _gettextwriter(out, encoding) + self._write = out.write + self._flush = out.flush + self._ns_contexts = [{}] # contains uri -> prefix dicts + self._current_context = self._ns_contexts[-1] + self._undeclared_ns_maps = [] + self._encoding = encoding + self._short_empty_elements = short_empty_elements + self._pending_start_element = False + + def _qname(self, name): + """Builds a qualified name from a (ns_url, localname) pair""" + if name[0]: + # Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is + # bound by definition to http://www.w3.org/XML/1998/namespace. It + # does not need to be declared and will not usually be found in + # self._current_context. + if 'http://www.w3.org/XML/1998/namespace' == name[0]: + return 'xml:' + name[1] + # The name is in a non-empty namespace + prefix = self._current_context[name[0]] + if prefix: + # If it is not the default namespace, prepend the prefix + return prefix + ":" + name[1] + # Return the unqualified name + return name[1] + + def _finish_pending_start_element(self,endElement=False): + if self._pending_start_element: + self._write('>') + self._pending_start_element = False + + # ContentHandler methods + + def startDocument(self): + self._write('\n' % + self._encoding) + + def endDocument(self): + self._flush() + + def startPrefixMapping(self, prefix, uri): + self._ns_contexts.append(self._current_context.copy()) + self._current_context[uri] = prefix + self._undeclared_ns_maps.append((prefix, uri)) + + def endPrefixMapping(self, prefix): + self._current_context = self._ns_contexts[-1] + del self._ns_contexts[-1] + + def startElement(self, name, attrs): + self._finish_pending_start_element() + self._write('<' + name) + for (name, value) in attrs.items(): + self._write(' %s=%s' % (name, quoteattr(value))) + if self._short_empty_elements: + self._pending_start_element = True + else: + self._write(">") + + def endElement(self, name): + if self._pending_start_element: + self._write('/>') + self._pending_start_element = False + else: + self._write('' % name) + + def startElementNS(self, name, qname, attrs): + self._finish_pending_start_element() + self._write('<' + self._qname(name)) + + for prefix, uri in self._undeclared_ns_maps: + if prefix: + self._write(' xmlns:%s="%s"' % (prefix, uri)) + else: + self._write(' xmlns="%s"' % uri) + self._undeclared_ns_maps = [] + + for (name, value) in attrs.items(): + self._write(' %s=%s' % (self._qname(name), quoteattr(value))) + if self._short_empty_elements: + self._pending_start_element = True + else: + self._write(">") + + def endElementNS(self, name, qname): + if self._pending_start_element: + self._write('/>') + self._pending_start_element = False + else: + self._write('' % self._qname(name)) + + def characters(self, content): + if content: + self._finish_pending_start_element() + if not isinstance(content, str): + content = str(content, self._encoding) + self._write(escape(content)) + + def ignorableWhitespace(self, content): + if content: + self._finish_pending_start_element() + if not isinstance(content, str): + content = str(content, self._encoding) + self._write(content) + + def processingInstruction(self, target, data): + self._finish_pending_start_element() + self._write('' % (target, data)) + + +class XMLFilterBase(xmlreader.XMLReader): + """This class is designed to sit between an XMLReader and the + client application's event handlers. By default, it does nothing + but pass requests up to the reader and events on to the handlers + unmodified, but subclasses can override specific methods to modify + the event stream or the configuration requests as they pass + through.""" + + def __init__(self, parent = None): + xmlreader.XMLReader.__init__(self) + self._parent = parent + + # ErrorHandler methods + + def error(self, exception): + self._err_handler.error(exception) + + def fatalError(self, exception): + self._err_handler.fatalError(exception) + + def warning(self, exception): + self._err_handler.warning(exception) + + # ContentHandler methods + + def setDocumentLocator(self, locator): + self._cont_handler.setDocumentLocator(locator) + + def startDocument(self): + self._cont_handler.startDocument() + + def endDocument(self): + self._cont_handler.endDocument() + + def startPrefixMapping(self, prefix, uri): + self._cont_handler.startPrefixMapping(prefix, uri) + + def endPrefixMapping(self, prefix): + self._cont_handler.endPrefixMapping(prefix) + + def startElement(self, name, attrs): + self._cont_handler.startElement(name, attrs) + + def endElement(self, name): + self._cont_handler.endElement(name) + + def startElementNS(self, name, qname, attrs): + self._cont_handler.startElementNS(name, qname, attrs) + + def endElementNS(self, name, qname): + self._cont_handler.endElementNS(name, qname) + + def characters(self, content): + self._cont_handler.characters(content) + + def ignorableWhitespace(self, chars): + self._cont_handler.ignorableWhitespace(chars) + + def processingInstruction(self, target, data): + self._cont_handler.processingInstruction(target, data) + + def skippedEntity(self, name): + self._cont_handler.skippedEntity(name) + + # DTDHandler methods + + def notationDecl(self, name, publicId, systemId): + self._dtd_handler.notationDecl(name, publicId, systemId) + + def unparsedEntityDecl(self, name, publicId, systemId, ndata): + self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata) + + # EntityResolver methods + + def resolveEntity(self, publicId, systemId): + return self._ent_handler.resolveEntity(publicId, systemId) + + # XMLReader methods + + def parse(self, source): + self._parent.setContentHandler(self) + self._parent.setErrorHandler(self) + self._parent.setEntityResolver(self) + self._parent.setDTDHandler(self) + self._parent.parse(source) + + def setLocale(self, locale): + self._parent.setLocale(locale) + + def getFeature(self, name): + return self._parent.getFeature(name) + + def setFeature(self, name, state): + self._parent.setFeature(name, state) + + def getProperty(self, name): + return self._parent.getProperty(name) + + def setProperty(self, name, value): + self._parent.setProperty(name, value) + + # XMLFilter methods + + def getParent(self): + return self._parent + + def setParent(self, parent): + self._parent = parent + +# --- Utility functions + +def prepare_input_source(source, base=""): + """This function takes an InputSource and an optional base URL and + returns a fully resolved InputSource object ready for reading.""" + + if isinstance(source, os.PathLike): + source = os.fspath(source) + if isinstance(source, str): + source = xmlreader.InputSource(source) + elif hasattr(source, "read"): + f = source + source = xmlreader.InputSource() + if isinstance(f.read(0), str): + source.setCharacterStream(f) + else: + source.setByteStream(f) + if hasattr(f, "name") and isinstance(f.name, str): + source.setSystemId(f.name) + + if source.getCharacterStream() is None and source.getByteStream() is None: + sysid = source.getSystemId() + basehead = os.path.dirname(os.path.normpath(base)) + sysidfilename = os.path.join(basehead, sysid) + if os.path.isfile(sysidfilename): + source.setSystemId(sysidfilename) + f = open(sysidfilename, "rb") + else: + source.setSystemId(urllib.parse.urljoin(base, sysid)) + f = urllib.request.urlopen(source.getSystemId()) + + source.setByteStream(f) + + return source diff --git a/defaults/lib/x/sax/xmlreader.py b/defaults/lib/x/sax/xmlreader.py new file mode 100644 index 0000000..e906121 --- /dev/null +++ b/defaults/lib/x/sax/xmlreader.py @@ -0,0 +1,378 @@ +"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers +should be based on this code. """ + +from . import handler + +from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException + + +# ===== XMLREADER ===== + +class XMLReader: + """Interface for reading an XML document using callbacks. + + XMLReader is the interface that an XML parser's SAX2 driver must + implement. This interface allows an application to set and query + features and properties in the parser, to register event handlers + for document processing, and to initiate a document parse. + + All SAX interfaces are assumed to be synchronous: the parse + methods must not return until parsing is complete, and readers + must wait for an event-handler callback to return before reporting + the next event.""" + + def __init__(self): + self._cont_handler = handler.ContentHandler() + self._dtd_handler = handler.DTDHandler() + self._ent_handler = handler.EntityResolver() + self._err_handler = handler.ErrorHandler() + + def parse(self, source): + "Parse an XML document from a system identifier or an InputSource." + raise NotImplementedError("This method must be implemented!") + + def getContentHandler(self): + "Returns the current ContentHandler." + return self._cont_handler + + def setContentHandler(self, handler): + "Registers a new object to receive document content events." + self._cont_handler = handler + + def getDTDHandler(self): + "Returns the current DTD handler." + return self._dtd_handler + + def setDTDHandler(self, handler): + "Register an object to receive basic DTD-related events." + self._dtd_handler = handler + + def getEntityResolver(self): + "Returns the current EntityResolver." + return self._ent_handler + + def setEntityResolver(self, resolver): + "Register an object to resolve external entities." + self._ent_handler = resolver + + def getErrorHandler(self): + "Returns the current ErrorHandler." + return self._err_handler + + def setErrorHandler(self, handler): + "Register an object to receive error-message events." + self._err_handler = handler + + def setLocale(self, locale): + """Allow an application to set the locale for errors and warnings. + + SAX parsers are not required to provide localization for errors + and warnings; if they cannot support the requested locale, + however, they must raise a SAX exception. Applications may + request a locale change in the middle of a parse.""" + raise SAXNotSupportedException("Locale support not implemented") + + def getFeature(self, name): + "Looks up and returns the state of a SAX2 feature." + raise SAXNotRecognizedException("Feature '%s' not recognized" % name) + + def setFeature(self, name, state): + "Sets the state of a SAX2 feature." + raise SAXNotRecognizedException("Feature '%s' not recognized" % name) + + def getProperty(self, name): + "Looks up and returns the value of a SAX2 property." + raise SAXNotRecognizedException("Property '%s' not recognized" % name) + + def setProperty(self, name, value): + "Sets the value of a SAX2 property." + raise SAXNotRecognizedException("Property '%s' not recognized" % name) + +class IncrementalParser(XMLReader): + """This interface adds three extra methods to the XMLReader + interface that allow XML parsers to support incremental + parsing. Support for this interface is optional, since not all + underlying XML parsers support this functionality. + + When the parser is instantiated it is ready to begin accepting + data from the feed method immediately. After parsing has been + finished with a call to close the reset method must be called to + make the parser ready to accept new data, either from feed or + using the parse method. + + Note that these methods must _not_ be called during parsing, that + is, after parse has been called and before it returns. + + By default, the class also implements the parse method of the XMLReader + interface using the feed, close and reset methods of the + IncrementalParser interface as a convenience to SAX 2.0 driver + writers.""" + + def __init__(self, bufsize=2**16): + self._bufsize = bufsize + XMLReader.__init__(self) + + def parse(self, source): + from . import saxutils + source = saxutils.prepare_input_source(source) + + self.prepareParser(source) + file = source.getCharacterStream() + if file is None: + file = source.getByteStream() + while buffer := file.read(self._bufsize): + self.feed(buffer) + self.close() + + def feed(self, data): + """This method gives the raw XML data in the data parameter to + the parser and makes it parse the data, emitting the + corresponding events. It is allowed for XML constructs to be + split across several calls to feed. + + feed may raise SAXException.""" + raise NotImplementedError("This method must be implemented!") + + def prepareParser(self, source): + """This method is called by the parse implementation to allow + the SAX 2.0 driver to prepare itself for parsing.""" + raise NotImplementedError("prepareParser must be overridden!") + + def close(self): + """This method is called when the entire XML document has been + passed to the parser through the feed method, to notify the + parser that there are no more data. This allows the parser to + do the final checks on the document and empty the internal + data buffer. + + The parser will not be ready to parse another document until + the reset method has been called. + + close may raise SAXException.""" + raise NotImplementedError("This method must be implemented!") + + def reset(self): + """This method is called after close has been called to reset + the parser so that it is ready to parse new documents. The + results of calling parse or feed after close without calling + reset are undefined.""" + raise NotImplementedError("This method must be implemented!") + +# ===== LOCATOR ===== + +class Locator: + """Interface for associating a SAX event with a document + location. A locator object will return valid results only during + calls to DocumentHandler methods; at any other time, the + results are unpredictable.""" + + def getColumnNumber(self): + "Return the column number where the current event ends." + return -1 + + def getLineNumber(self): + "Return the line number where the current event ends." + return -1 + + def getPublicId(self): + "Return the public identifier for the current event." + return None + + def getSystemId(self): + "Return the system identifier for the current event." + return None + +# ===== INPUTSOURCE ===== + +class InputSource: + """Encapsulation of the information needed by the XMLReader to + read entities. + + This class may include information about the public identifier, + system identifier, byte stream (possibly with character encoding + information) and/or the character stream of an entity. + + Applications will create objects of this class for use in the + XMLReader.parse method and for returning from + EntityResolver.resolveEntity. + + An InputSource belongs to the application, the XMLReader is not + allowed to modify InputSource objects passed to it from the + application, although it may make copies and modify those.""" + + def __init__(self, system_id = None): + self.__system_id = system_id + self.__public_id = None + self.__encoding = None + self.__bytefile = None + self.__charfile = None + + def setPublicId(self, public_id): + "Sets the public identifier of this InputSource." + self.__public_id = public_id + + def getPublicId(self): + "Returns the public identifier of this InputSource." + return self.__public_id + + def setSystemId(self, system_id): + "Sets the system identifier of this InputSource." + self.__system_id = system_id + + def getSystemId(self): + "Returns the system identifier of this InputSource." + return self.__system_id + + def setEncoding(self, encoding): + """Sets the character encoding of this InputSource. + + The encoding must be a string acceptable for an XML encoding + declaration (see section 4.3.3 of the XML recommendation). + + The encoding attribute of the InputSource is ignored if the + InputSource also contains a character stream.""" + self.__encoding = encoding + + def getEncoding(self): + "Get the character encoding of this InputSource." + return self.__encoding + + def setByteStream(self, bytefile): + """Set the byte stream (a Python file-like object which does + not perform byte-to-character conversion) for this input + source. + + The SAX parser will ignore this if there is also a character + stream specified, but it will use a byte stream in preference + to opening a URI connection itself. + + If the application knows the character encoding of the byte + stream, it should set it with the setEncoding method.""" + self.__bytefile = bytefile + + def getByteStream(self): + """Get the byte stream for this input source. + + The getEncoding method will return the character encoding for + this byte stream, or None if unknown.""" + return self.__bytefile + + def setCharacterStream(self, charfile): + """Set the character stream for this input source. (The stream + must be a Python 2.0 Unicode-wrapped file-like that performs + conversion to Unicode strings.) + + If there is a character stream specified, the SAX parser will + ignore any byte stream and will not attempt to open a URI + connection to the system identifier.""" + self.__charfile = charfile + + def getCharacterStream(self): + "Get the character stream for this input source." + return self.__charfile + +# ===== ATTRIBUTESIMPL ===== + +class AttributesImpl: + + def __init__(self, attrs): + """Non-NS-aware implementation. + + attrs should be of the form {name : value}.""" + self._attrs = attrs + + def getLength(self): + return len(self._attrs) + + def getType(self, name): + return "CDATA" + + def getValue(self, name): + return self._attrs[name] + + def getValueByQName(self, name): + return self._attrs[name] + + def getNameByQName(self, name): + if name not in self._attrs: + raise KeyError(name) + return name + + def getQNameByName(self, name): + if name not in self._attrs: + raise KeyError(name) + return name + + def getNames(self): + return list(self._attrs.keys()) + + def getQNames(self): + return list(self._attrs.keys()) + + def __len__(self): + return len(self._attrs) + + def __getitem__(self, name): + return self._attrs[name] + + def keys(self): + return list(self._attrs.keys()) + + def __contains__(self, name): + return name in self._attrs + + def get(self, name, alternative=None): + return self._attrs.get(name, alternative) + + def copy(self): + return self.__class__(self._attrs) + + def items(self): + return list(self._attrs.items()) + + def values(self): + return list(self._attrs.values()) + +# ===== ATTRIBUTESNSIMPL ===== + +class AttributesNSImpl(AttributesImpl): + + def __init__(self, attrs, qnames): + """NS-aware implementation. + + attrs should be of the form {(ns_uri, lname): value, ...}. + qnames of the form {(ns_uri, lname): qname, ...}.""" + self._attrs = attrs + self._qnames = qnames + + def getValueByQName(self, name): + for (nsname, qname) in self._qnames.items(): + if qname == name: + return self._attrs[nsname] + + raise KeyError(name) + + def getNameByQName(self, name): + for (nsname, qname) in self._qnames.items(): + if qname == name: + return nsname + + raise KeyError(name) + + def getQNameByName(self, name): + return self._qnames[name] + + def getQNames(self): + return list(self._qnames.values()) + + def copy(self): + return self.__class__(self._attrs, self._qnames) + + +def _test(): + XMLReader() + IncrementalParser() + Locator() + +if __name__ == "__main__": + _test() diff --git a/main.py b/main.py index 6a6e9c8..a134046 100644 --- a/main.py +++ b/main.py @@ -5,10 +5,27 @@ import random import socket import ssl +import sys import aiohttp import certifi from aiohttp import ClientSession, TCPConnector import decky_plugin +import importlib.util +from pathlib import Path + +def get_plugin_dir(): + from pathlib import Path + return Path(__file__).parent.resolve() + +def add_plugin_to_path(): + import sys + + plugin_dir = get_plugin_dir() + directories = [["./"], ["dbussy"], ["defaults", "dbussy"], ["defaults", "lib"], ["lib"]] + for dir in directories: + sys.path.append(str(plugin_dir.joinpath(*dir))) +add_plugin_to_path() +import ravel CONFIG_PATH = os.path.join(decky_plugin.DECKY_PLUGIN_SETTINGS_DIR, 'config.json') ANIMATIONS_PATH = os.path.join(decky_plugin.DECKY_PLUGIN_RUNTIME_DIR, 'animations') @@ -23,6 +40,7 @@ VIDEO_TYPES = ['boot', 'suspend', 'throbber'] VIDEO_TARGETS = ['boot', 'suspend', 'suspend'] + REQUEST_RETRIES = 5 ssl_ctx = ssl.create_default_context(cafile=certifi.where()) @@ -34,6 +52,9 @@ unloaded = False + + + async def get_steamdeckrepo(): try: for _ in range(REQUEST_RETRIES): @@ -109,7 +130,8 @@ async def load_config(): 'custom_animations': [], 'custom_sets': [], 'shuffle_exclusions': [], - 'force_ipv4': False + 'force_ipv4': False, + 'randomize_sleep': '' } async def save_new(): @@ -292,7 +314,8 @@ async def getState(self): 'suspend': config['suspend'], 'throbber': config['throbber'], 'shuffle_exclusions': config['shuffle_exclusions'], - 'force_ipv4': config['force_ipv4'] + 'force_ipv4': config['force_ipv4'], + 'randomize_sleep': config['randomize_sleep'] } } except Exception as e: @@ -443,8 +466,42 @@ async def randomize(self, shuffle): decky_plugin.logger.error('Failed to randomize animations', exc_info=e) raise e + @ravel.signal(name="PrepareForSleep", in_signature="b", args_keyword="args") + def handle_sleep_signal(args): + sleeping = args[0] + if sleeping: + if config['randomize_sleep'] == 'all': + randomize_all() + decky_plugin.logger.info('Deck went to sleep! Randomizing all animations...') + if config['randomize_sleep'] == 'set': + randomize_current_set() + decky_plugin.logger.info('Deck went to sleep! Randomizing animation set...') + + apply_animations() + + else: + decky_plugin.logger.info('wakey') + + + + + async def _main(self): decky_plugin.logger.info('Initializing...') + try: + loop = asyncio.get_event_loop() + self.bus = ravel.system_bus() + self.bus.attach_asyncio(loop) + + self.bus.listen_signal( + path="/org/freedesktop/login1", + fallback=False, + interface="org.freedesktop.login1.Manager", + name="PrepareForSleep", + func=self.handle_sleep_signal + ) + except Exception as e: + decky_plugin.logger.error('Failed to make bus object', exc_info=e) try: os.makedirs(ANIMATIONS_PATH, exist_ok=True) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 6a6ccf4..958b542 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1,148 +1,101 @@ -lockfileVersion: '6.0' +lockfileVersion: '9.0' settings: autoInstallPeers: true excludeLinksFromLockfile: false -dependencies: - decky-frontend-lib: - specifier: ^3.25.0 - version: 3.25.0 - moment: - specifier: ^2.29.4 - version: 2.29.4 - react-icons: - specifier: ^4.4.0 - version: 4.4.0 - -devDependencies: - '@rollup/plugin-commonjs': - specifier: ^21.1.0 - version: 21.1.0(rollup@2.79.1) - '@rollup/plugin-json': - specifier: ^4.1.0 - version: 4.1.0(rollup@2.79.1) - '@rollup/plugin-node-resolve': - specifier: ^13.3.0 - version: 13.3.0(rollup@2.79.1) - '@rollup/plugin-replace': - specifier: ^4.0.0 - version: 4.0.0(rollup@2.79.1) - '@rollup/plugin-typescript': - specifier: ^8.5.0 - version: 8.5.0(rollup@2.79.1)(tslib@2.4.0)(typescript@4.8.4) - '@types/react': - specifier: 16.14.0 - version: 16.14.0 - '@types/webpack': - specifier: ^5.28.0 - version: 5.28.0 - rollup: - specifier: ^2.79.1 - version: 2.79.1 - rollup-plugin-import-assets: - specifier: ^1.1.1 - version: 1.1.1(rollup@2.79.1) - shx: - specifier: ^0.3.4 - version: 0.3.4 - tslib: - specifier: ^2.4.0 - version: 2.4.0 - typescript: - specifier: ^4.8.4 - version: 4.8.4 +importers: + + .: + dependencies: + decky-frontend-lib: + specifier: ^3.25.0 + version: 3.25.0 + moment: + specifier: ^2.29.4 + version: 2.30.1 + react-icons: + specifier: ^4.4.0 + version: 4.12.0(react@19.2.0) + devDependencies: + '@rollup/plugin-commonjs': + specifier: ^21.1.0 + version: 21.1.0(rollup@2.79.2) + '@rollup/plugin-json': + specifier: ^4.1.0 + version: 4.1.0(rollup@2.79.2) + '@rollup/plugin-node-resolve': + specifier: ^13.3.0 + version: 13.3.0(rollup@2.79.2) + '@rollup/plugin-replace': + specifier: ^4.0.0 + version: 4.0.0(rollup@2.79.2) + '@rollup/plugin-typescript': + specifier: ^8.5.0 + version: 8.5.0(rollup@2.79.2)(tslib@2.8.1)(typescript@4.9.5) + '@types/react': + specifier: 16.14.0 + version: 16.14.0 + '@types/webpack': + specifier: ^5.28.0 + version: 5.28.5 + rollup: + specifier: ^2.79.1 + version: 2.79.2 + rollup-plugin-import-assets: + specifier: ^1.1.1 + version: 1.1.1(rollup@2.79.2) + shx: + specifier: ^0.3.4 + version: 0.3.4 + tslib: + specifier: ^2.4.0 + version: 2.8.1 + typescript: + specifier: ^4.8.4 + version: 4.9.5 packages: - /@jridgewell/gen-mapping@0.3.2: - resolution: {integrity: sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==} - engines: {node: '>=6.0.0'} - dependencies: - '@jridgewell/set-array': 1.1.2 - '@jridgewell/sourcemap-codec': 1.4.14 - '@jridgewell/trace-mapping': 0.3.16 - dev: true + '@jridgewell/gen-mapping@0.3.13': + resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} - /@jridgewell/resolve-uri@3.1.0: - resolution: {integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==} + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} engines: {node: '>=6.0.0'} - dev: true - /@jridgewell/set-array@1.1.2: - resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==} - engines: {node: '>=6.0.0'} - dev: true + '@jridgewell/source-map@0.3.11': + resolution: {integrity: sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==} - /@jridgewell/source-map@0.3.2: - resolution: {integrity: sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==} - dependencies: - '@jridgewell/gen-mapping': 0.3.2 - '@jridgewell/trace-mapping': 0.3.16 - dev: true + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} - /@jridgewell/sourcemap-codec@1.4.14: - resolution: {integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==} - dev: true + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} - /@jridgewell/trace-mapping@0.3.16: - resolution: {integrity: sha512-LCQ+NeThyJ4k1W2d+vIKdxuSt9R3pQSZ4P92m7EakaYuXcVWbHuT5bjNcqLd4Rdgi6xYWYDvBJZJLZSLanjDcA==} - dependencies: - '@jridgewell/resolve-uri': 3.1.0 - '@jridgewell/sourcemap-codec': 1.4.14 - dev: true - - /@rollup/plugin-commonjs@21.1.0(rollup@2.79.1): + '@rollup/plugin-commonjs@21.1.0': resolution: {integrity: sha512-6ZtHx3VHIp2ReNNDxHjuUml6ur+WcQ28N1yHgCQwsbNkQg2suhxGMDQGJOn/KuDxKtd1xuZP5xSTwBA4GQ8hbA==} engines: {node: '>= 8.0.0'} peerDependencies: rollup: ^2.38.3 - dependencies: - '@rollup/pluginutils': 3.1.0(rollup@2.79.1) - commondir: 1.0.1 - estree-walker: 2.0.2 - glob: 7.2.3 - is-reference: 1.2.1 - magic-string: 0.25.9 - resolve: 1.22.1 - rollup: 2.79.1 - dev: true - /@rollup/plugin-json@4.1.0(rollup@2.79.1): + '@rollup/plugin-json@4.1.0': resolution: {integrity: sha512-yfLbTdNS6amI/2OpmbiBoW12vngr5NW2jCJVZSBEz+H5KfUJZ2M7sDjk0U6GOOdCWFVScShte29o9NezJ53TPw==} peerDependencies: rollup: ^1.20.0 || ^2.0.0 - dependencies: - '@rollup/pluginutils': 3.1.0(rollup@2.79.1) - rollup: 2.79.1 - dev: true - /@rollup/plugin-node-resolve@13.3.0(rollup@2.79.1): + '@rollup/plugin-node-resolve@13.3.0': resolution: {integrity: sha512-Lus8rbUo1eEcnS4yTFKLZrVumLPY+YayBdWXgFSHYhTT2iJbMhoaaBL3xl5NCdeRytErGr8tZ0L71BMRmnlwSw==} engines: {node: '>= 10.0.0'} peerDependencies: rollup: ^2.42.0 - dependencies: - '@rollup/pluginutils': 3.1.0(rollup@2.79.1) - '@types/resolve': 1.17.1 - deepmerge: 4.2.2 - is-builtin-module: 3.2.0 - is-module: 1.0.0 - resolve: 1.22.1 - rollup: 2.79.1 - dev: true - /@rollup/plugin-replace@4.0.0(rollup@2.79.1): + '@rollup/plugin-replace@4.0.0': resolution: {integrity: sha512-+rumQFiaNac9y64OHtkHGmdjm7us9bo1PlbgQfdihQtuNxzjpaB064HbRnewUOggLQxVCCyINfStkgmBeQpv1g==} peerDependencies: rollup: ^1.20.0 || ^2.0.0 - dependencies: - '@rollup/pluginutils': 3.1.0(rollup@2.79.1) - magic-string: 0.25.9 - rollup: 2.79.1 - dev: true - /@rollup/plugin-typescript@8.5.0(rollup@2.79.1)(tslib@2.4.0)(typescript@4.8.4): + '@rollup/plugin-typescript@8.5.0': resolution: {integrity: sha512-wMv1/scv0m/rXx21wD2IsBbJFba8wGF3ErJIr6IKRfRj49S85Lszbxb4DCo8iILpluTjk2GAAu9CoZt4G3ppgQ==} engines: {node: '>=8.0.0'} peerDependencies: @@ -152,680 +105,415 @@ packages: peerDependenciesMeta: tslib: optional: true - dependencies: - '@rollup/pluginutils': 3.1.0(rollup@2.79.1) - resolve: 1.22.1 - rollup: 2.79.1 - tslib: 2.4.0 - typescript: 4.8.4 - dev: true - /@rollup/pluginutils@3.1.0(rollup@2.79.1): + '@rollup/pluginutils@3.1.0': resolution: {integrity: sha512-GksZ6pr6TpIjHm8h9lSQ8pi8BE9VeubNT0OMJ3B5uZJ8pz73NPiqOtCog/x2/QzM1ENChPKxMDhiQuRHsqc+lg==} engines: {node: '>= 8.0.0'} peerDependencies: rollup: ^1.20.0||^2.0.0 - dependencies: - '@types/estree': 0.0.39 - estree-walker: 1.0.1 - picomatch: 2.3.1 - rollup: 2.79.1 - dev: true - /@types/eslint-scope@3.7.4: - resolution: {integrity: sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==} - dependencies: - '@types/eslint': 8.4.6 - '@types/estree': 0.0.51 - dev: true + '@types/eslint-scope@3.7.7': + resolution: {integrity: sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==} - /@types/eslint@8.4.6: - resolution: {integrity: sha512-/fqTbjxyFUaYNO7VcW5g+4npmqVACz1bB7RTHYuLj+PRjw9hrCwrUXVQFpChUS0JsyEFvMZ7U/PfmvWgxJhI9g==} - dependencies: - '@types/estree': 0.0.51 - '@types/json-schema': 7.0.11 - dev: true + '@types/eslint@9.6.1': + resolution: {integrity: sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==} - /@types/estree@0.0.39: + '@types/estree@0.0.39': resolution: {integrity: sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==} - dev: true - /@types/estree@0.0.51: - resolution: {integrity: sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==} - dev: true + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} - /@types/estree@1.0.0: - resolution: {integrity: sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ==} - dev: true + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} - /@types/json-schema@7.0.11: - resolution: {integrity: sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==} - dev: true + '@types/node@24.10.0': + resolution: {integrity: sha512-qzQZRBqkFsYyaSWXuEHc2WR9c0a0CXwiE5FWUvn7ZM+vdy1uZLfCunD38UzhuB7YN/J11ndbDBcTmOdxJo9Q7A==} - /@types/node@18.8.3: - resolution: {integrity: sha512-0os9vz6BpGwxGe9LOhgP/ncvYN5Tx1fNcd2TM3rD/aCGBkysb+ZWpXEocG24h6ZzOi13+VB8HndAQFezsSOw1w==} - dev: true + '@types/prop-types@15.7.15': + resolution: {integrity: sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==} - /@types/prop-types@15.7.5: - resolution: {integrity: sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==} - dev: true - - /@types/react@16.14.0: + '@types/react@16.14.0': resolution: {integrity: sha512-jJjHo1uOe+NENRIBvF46tJimUvPnmbQ41Ax0pEm7pRvhPg+wuj8VMOHHiMvaGmZRzRrCtm7KnL5OOE/6kHPK8w==} - dependencies: - '@types/prop-types': 15.7.5 - csstype: 3.1.1 - dev: true - /@types/resolve@1.17.1: + '@types/resolve@1.17.1': resolution: {integrity: sha512-yy7HuzQhj0dhGpD8RLXSZWEkLsV9ibvxvi6EiJ3bkqLAO1RGo0WbkWQiwpRlSFymTJRz0d3k5LM3kkx8ArDbLw==} - dependencies: - '@types/node': 18.8.3 - dev: true - /@types/webpack@5.28.0: - resolution: {integrity: sha512-8cP0CzcxUiFuA9xGJkfeVpqmWTk9nx6CWwamRGCj95ph1SmlRRk9KlCZ6avhCbZd4L68LvYT6l1kpdEnQXrF8w==} - dependencies: - '@types/node': 18.8.3 - tapable: 2.2.1 - webpack: 5.74.0 - transitivePeerDependencies: - - '@swc/core' - - esbuild - - uglify-js - - webpack-cli - dev: true + '@types/webpack@5.28.5': + resolution: {integrity: sha512-wR87cgvxj3p6D0Crt1r5avwqffqPXUkNlnQ1mjU93G7gCuFjufZR4I6j8cz5g1F1tTYpfOOFvly+cmIQwL9wvw==} - /@webassemblyjs/ast@1.11.1: - resolution: {integrity: sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==} - dependencies: - '@webassemblyjs/helper-numbers': 1.11.1 - '@webassemblyjs/helper-wasm-bytecode': 1.11.1 - dev: true + '@webassemblyjs/ast@1.14.1': + resolution: {integrity: sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==} - /@webassemblyjs/floating-point-hex-parser@1.11.1: - resolution: {integrity: sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==} - dev: true + '@webassemblyjs/floating-point-hex-parser@1.13.2': + resolution: {integrity: sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==} - /@webassemblyjs/helper-api-error@1.11.1: - resolution: {integrity: sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==} - dev: true + '@webassemblyjs/helper-api-error@1.13.2': + resolution: {integrity: sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==} - /@webassemblyjs/helper-buffer@1.11.1: - resolution: {integrity: sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==} - dev: true + '@webassemblyjs/helper-buffer@1.14.1': + resolution: {integrity: sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==} - /@webassemblyjs/helper-numbers@1.11.1: - resolution: {integrity: sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==} - dependencies: - '@webassemblyjs/floating-point-hex-parser': 1.11.1 - '@webassemblyjs/helper-api-error': 1.11.1 - '@xtuc/long': 4.2.2 - dev: true + '@webassemblyjs/helper-numbers@1.13.2': + resolution: {integrity: sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==} - /@webassemblyjs/helper-wasm-bytecode@1.11.1: - resolution: {integrity: sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==} - dev: true + '@webassemblyjs/helper-wasm-bytecode@1.13.2': + resolution: {integrity: sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==} - /@webassemblyjs/helper-wasm-section@1.11.1: - resolution: {integrity: sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==} - dependencies: - '@webassemblyjs/ast': 1.11.1 - '@webassemblyjs/helper-buffer': 1.11.1 - '@webassemblyjs/helper-wasm-bytecode': 1.11.1 - '@webassemblyjs/wasm-gen': 1.11.1 - dev: true + '@webassemblyjs/helper-wasm-section@1.14.1': + resolution: {integrity: sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==} - /@webassemblyjs/ieee754@1.11.1: - resolution: {integrity: sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==} - dependencies: - '@xtuc/ieee754': 1.2.0 - dev: true + '@webassemblyjs/ieee754@1.13.2': + resolution: {integrity: sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==} - /@webassemblyjs/leb128@1.11.1: - resolution: {integrity: sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==} - dependencies: - '@xtuc/long': 4.2.2 - dev: true - - /@webassemblyjs/utf8@1.11.1: - resolution: {integrity: sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==} - dev: true - - /@webassemblyjs/wasm-edit@1.11.1: - resolution: {integrity: sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==} - dependencies: - '@webassemblyjs/ast': 1.11.1 - '@webassemblyjs/helper-buffer': 1.11.1 - '@webassemblyjs/helper-wasm-bytecode': 1.11.1 - '@webassemblyjs/helper-wasm-section': 1.11.1 - '@webassemblyjs/wasm-gen': 1.11.1 - '@webassemblyjs/wasm-opt': 1.11.1 - '@webassemblyjs/wasm-parser': 1.11.1 - '@webassemblyjs/wast-printer': 1.11.1 - dev: true - - /@webassemblyjs/wasm-gen@1.11.1: - resolution: {integrity: sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==} - dependencies: - '@webassemblyjs/ast': 1.11.1 - '@webassemblyjs/helper-wasm-bytecode': 1.11.1 - '@webassemblyjs/ieee754': 1.11.1 - '@webassemblyjs/leb128': 1.11.1 - '@webassemblyjs/utf8': 1.11.1 - dev: true - - /@webassemblyjs/wasm-opt@1.11.1: - resolution: {integrity: sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==} - dependencies: - '@webassemblyjs/ast': 1.11.1 - '@webassemblyjs/helper-buffer': 1.11.1 - '@webassemblyjs/wasm-gen': 1.11.1 - '@webassemblyjs/wasm-parser': 1.11.1 - dev: true - - /@webassemblyjs/wasm-parser@1.11.1: - resolution: {integrity: sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==} - dependencies: - '@webassemblyjs/ast': 1.11.1 - '@webassemblyjs/helper-api-error': 1.11.1 - '@webassemblyjs/helper-wasm-bytecode': 1.11.1 - '@webassemblyjs/ieee754': 1.11.1 - '@webassemblyjs/leb128': 1.11.1 - '@webassemblyjs/utf8': 1.11.1 - dev: true - - /@webassemblyjs/wast-printer@1.11.1: - resolution: {integrity: sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==} - dependencies: - '@webassemblyjs/ast': 1.11.1 - '@xtuc/long': 4.2.2 - dev: true + '@webassemblyjs/leb128@1.13.2': + resolution: {integrity: sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==} + + '@webassemblyjs/utf8@1.13.2': + resolution: {integrity: sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==} + + '@webassemblyjs/wasm-edit@1.14.1': + resolution: {integrity: sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==} + + '@webassemblyjs/wasm-gen@1.14.1': + resolution: {integrity: sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==} + + '@webassemblyjs/wasm-opt@1.14.1': + resolution: {integrity: sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==} + + '@webassemblyjs/wasm-parser@1.14.1': + resolution: {integrity: sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==} - /@xtuc/ieee754@1.2.0: + '@webassemblyjs/wast-printer@1.14.1': + resolution: {integrity: sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==} + + '@xtuc/ieee754@1.2.0': resolution: {integrity: sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==} - dev: true - /@xtuc/long@4.2.2: + '@xtuc/long@4.2.2': resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} - dev: true - /acorn-import-assertions@1.8.0(acorn@8.8.0): - resolution: {integrity: sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==} + acorn-import-phases@1.0.4: + resolution: {integrity: sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==} + engines: {node: '>=10.13.0'} peerDependencies: - acorn: ^8 - dependencies: - acorn: 8.8.0 - dev: true + acorn: ^8.14.0 - /acorn@8.8.0: - resolution: {integrity: sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==} + acorn@8.15.0: + resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} engines: {node: '>=0.4.0'} hasBin: true - dev: true - /ajv-keywords@3.5.2(ajv@6.12.6): - resolution: {integrity: sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==} + ajv-formats@2.1.1: + resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} peerDependencies: - ajv: ^6.9.1 - dependencies: - ajv: 6.12.6 - dev: true + ajv: ^8.0.0 + peerDependenciesMeta: + ajv: + optional: true - /ajv@6.12.6: - resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} - dependencies: - fast-deep-equal: 3.1.3 - fast-json-stable-stringify: 2.1.0 - json-schema-traverse: 0.4.1 - uri-js: 4.4.1 - dev: true + ajv-keywords@5.1.0: + resolution: {integrity: sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==} + peerDependencies: + ajv: ^8.8.2 + + ajv@8.17.1: + resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} - /balanced-match@1.0.2: + balanced-match@1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} - dev: true - /brace-expansion@1.1.11: - resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} - dependencies: - balanced-match: 1.0.2 - concat-map: 0.0.1 - dev: true + baseline-browser-mapping@2.8.25: + resolution: {integrity: sha512-2NovHVesVF5TXefsGX1yzx1xgr7+m9JQenvz6FQY3qd+YXkKkYiv+vTCc7OriP9mcDZpTC5mAOYN4ocd29+erA==} + hasBin: true + + brace-expansion@1.1.12: + resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} - /browserslist@4.21.4: - resolution: {integrity: sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==} + browserslist@4.27.0: + resolution: {integrity: sha512-AXVQwdhot1eqLihwasPElhX2tAZiBjWdJ9i/Zcj2S6QYIjkx62OKSfnobkriB81C3l4w0rVy3Nt4jaTBltYEpw==} engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} hasBin: true - dependencies: - caniuse-lite: 1.0.30001418 - electron-to-chromium: 1.4.276 - node-releases: 2.0.6 - update-browserslist-db: 1.0.10(browserslist@4.21.4) - dev: true - /buffer-from@1.1.2: + buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} - dev: true - /builtin-modules@3.3.0: + builtin-modules@3.3.0: resolution: {integrity: sha512-zhaCDicdLuWN5UbN5IMnFqNMhNfo919sH85y2/ea+5Yg9TsTkeZxpL+JLbp6cgYFS4sRLp3YV4S6yDuqVWHYOw==} engines: {node: '>=6'} - dev: true - /caniuse-lite@1.0.30001418: - resolution: {integrity: sha512-oIs7+JL3K9JRQ3jPZjlH6qyYDp+nBTCais7hjh0s+fuBwufc7uZ7hPYMXrDOJhV360KGMTcczMRObk0/iMqZRg==} - dev: true + caniuse-lite@1.0.30001753: + resolution: {integrity: sha512-Bj5H35MD/ebaOV4iDLqPEtiliTN29qkGtEHCwawWn4cYm+bPJM2NsaP30vtZcnERClMzp52J4+aw2UNbK4o+zw==} - /chrome-trace-event@1.0.3: - resolution: {integrity: sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==} + chrome-trace-event@1.0.4: + resolution: {integrity: sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==} engines: {node: '>=6.0'} - dev: true - /commander@2.20.3: + commander@2.20.3: resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} - dev: true - /commondir@1.0.1: + commondir@1.0.1: resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} - dev: true - /concat-map@0.0.1: + concat-map@0.0.1: resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} - dev: true - /csstype@3.1.1: - resolution: {integrity: sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==} - dev: true + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} - /decky-frontend-lib@3.25.0: + decky-frontend-lib@3.25.0: resolution: {integrity: sha512-2lBoHS2AIRmuluq/bGdHBz+uyToQE7k3K/vDq1MQbDZ4eC+8CGDuh2T8yZOj3D0yjGP2MdikNNAWPA9Z5l2qDg==} - dev: false - /deepmerge@4.2.2: - resolution: {integrity: sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==} + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} engines: {node: '>=0.10.0'} - dev: true - /electron-to-chromium@1.4.276: - resolution: {integrity: sha512-EpuHPqu8YhonqLBXHoU6hDJCD98FCe6KDoet3/gY1qsQ6usjJoHqBH2YIVs8FXaAtHwVL8Uqa/fsYao/vq9VWQ==} - dev: true + electron-to-chromium@1.5.245: + resolution: {integrity: sha512-rdmGfW47ZhL/oWEJAY4qxRtdly2B98ooTJ0pdEI4jhVLZ6tNf8fPtov2wS1IRKwFJT92le3x4Knxiwzl7cPPpQ==} - /enhanced-resolve@5.10.0: - resolution: {integrity: sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==} + enhanced-resolve@5.18.3: + resolution: {integrity: sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww==} engines: {node: '>=10.13.0'} - dependencies: - graceful-fs: 4.2.10 - tapable: 2.2.1 - dev: true - /es-module-lexer@0.9.3: - resolution: {integrity: sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==} - dev: true + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} - /escalade@3.1.1: - resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} engines: {node: '>=6'} - dev: true - /eslint-scope@5.1.1: + eslint-scope@5.1.1: resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==} engines: {node: '>=8.0.0'} - dependencies: - esrecurse: 4.3.0 - estraverse: 4.3.0 - dev: true - /esrecurse@4.3.0: + esrecurse@4.3.0: resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} engines: {node: '>=4.0'} - dependencies: - estraverse: 5.3.0 - dev: true - /estraverse@4.3.0: + estraverse@4.3.0: resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} engines: {node: '>=4.0'} - dev: true - /estraverse@5.3.0: + estraverse@5.3.0: resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} engines: {node: '>=4.0'} - dev: true - /estree-walker@0.6.1: + estree-walker@0.6.1: resolution: {integrity: sha512-SqmZANLWS0mnatqbSfRP5g8OXZC12Fgg1IwNtLsyHDzJizORW4khDfjPqJZsemPWBB2uqykUah5YpQ6epsqC/w==} - dev: true - /estree-walker@1.0.1: + estree-walker@1.0.1: resolution: {integrity: sha512-1fMXF3YP4pZZVozF8j/ZLfvnR8NSIljt56UhbZ5PeeDmmGHpgpdwQt7ITlGvYaQukCvuBRMLEiKiYC+oeIg4cg==} - dev: true - /estree-walker@2.0.2: + estree-walker@2.0.2: resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} - dev: true - /events@3.3.0: + events@3.3.0: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} - dev: true - /fast-deep-equal@3.1.3: + fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} - dev: true - /fast-json-stable-stringify@2.1.0: - resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} - dev: true + fast-uri@3.1.0: + resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} - /fs.realpath@1.0.0: + fs.realpath@1.0.0: resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} - dev: true - /fsevents@2.3.2: - resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] - requiresBuild: true - dev: true - optional: true - /function-bind@1.1.1: - resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} - dev: true + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} - /glob-to-regexp@0.4.1: + glob-to-regexp@0.4.1: resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==} - dev: true - /glob@7.2.3: + glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} - dependencies: - fs.realpath: 1.0.0 - inflight: 1.0.6 - inherits: 2.0.4 - minimatch: 3.1.2 - once: 1.4.0 - path-is-absolute: 1.0.1 - dev: true + deprecated: Glob versions prior to v9 are no longer supported - /graceful-fs@4.2.10: - resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} - dev: true + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - /has-flag@4.0.0: + has-flag@4.0.0: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} - dev: true - /has@1.0.3: - resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} - engines: {node: '>= 0.4.0'} - dependencies: - function-bind: 1.1.1 - dev: true + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} - /inflight@1.0.6: + inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} - dependencies: - once: 1.4.0 - wrappy: 1.0.2 - dev: true + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. - /inherits@2.0.4: + inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} - dev: true - /interpret@1.4.0: + interpret@1.4.0: resolution: {integrity: sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==} engines: {node: '>= 0.10'} - dev: true - /is-builtin-module@3.2.0: - resolution: {integrity: sha512-phDA4oSGt7vl1n5tJvTWooWWAsXLY+2xCnxNqvKhGEzujg+A43wPlPOyDg3C8XQHN+6k/JTQWJ/j0dQh/qr+Hw==} + is-builtin-module@3.2.1: + resolution: {integrity: sha512-BSLE3HnV2syZ0FK0iMA/yUGplUeMmNz4AW5fnTunbCIqZi4vG3WjJT9FHMy5D69xmAYBHXQhJdALdpwVxV501A==} engines: {node: '>=6'} - dependencies: - builtin-modules: 3.3.0 - dev: true - /is-core-module@2.10.0: - resolution: {integrity: sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==} - dependencies: - has: 1.0.3 - dev: true + is-core-module@2.16.1: + resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} + engines: {node: '>= 0.4'} - /is-module@1.0.0: + is-module@1.0.0: resolution: {integrity: sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==} - dev: true - /is-reference@1.2.1: + is-reference@1.2.1: resolution: {integrity: sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==} - dependencies: - '@types/estree': 1.0.0 - dev: true - /jest-worker@27.5.1: + jest-worker@27.5.1: resolution: {integrity: sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==} engines: {node: '>= 10.13.0'} - dependencies: - '@types/node': 18.8.3 - merge-stream: 2.0.0 - supports-color: 8.1.1 - dev: true - /json-parse-even-better-errors@2.3.1: + json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} - dev: true - /json-schema-traverse@0.4.1: - resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} - dev: true + json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} - /loader-runner@4.3.0: - resolution: {integrity: sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==} + loader-runner@4.3.1: + resolution: {integrity: sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==} engines: {node: '>=6.11.5'} - dev: true - /magic-string@0.25.9: + magic-string@0.25.9: resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==} - dependencies: - sourcemap-codec: 1.4.8 - dev: true - /merge-stream@2.0.0: + merge-stream@2.0.0: resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} - dev: true - /mime-db@1.52.0: + mime-db@1.52.0: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} - dev: true - /mime-types@2.1.35: + mime-types@2.1.35: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} - dependencies: - mime-db: 1.52.0 - dev: true - /minimatch@3.1.2: + minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} - dependencies: - brace-expansion: 1.1.11 - dev: true - /minimist@1.2.6: - resolution: {integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==} - dev: true + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} - /moment@2.29.4: - resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} - dev: false + moment@2.30.1: + resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} - /neo-async@2.6.2: + neo-async@2.6.2: resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} - dev: true - /node-releases@2.0.6: - resolution: {integrity: sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==} - dev: true + node-releases@2.0.27: + resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==} - /once@1.4.0: + once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} - dependencies: - wrappy: 1.0.2 - dev: true - /path-is-absolute@1.0.1: + path-is-absolute@1.0.1: resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} engines: {node: '>=0.10.0'} - dev: true - /path-parse@1.0.7: + path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} - dev: true - /picocolors@1.0.0: - resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} - dev: true + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} - /picomatch@2.3.1: + picomatch@2.3.1: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} - dev: true - - /punycode@2.1.1: - resolution: {integrity: sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==} - engines: {node: '>=6'} - dev: true - /randombytes@2.1.0: + randombytes@2.1.0: resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} - dependencies: - safe-buffer: 5.2.1 - dev: true - /react-icons@4.4.0: - resolution: {integrity: sha512-fSbvHeVYo/B5/L4VhB7sBA1i2tS8MkT0Hb9t2H1AVPkwGfVHLJCqyr2Py9dKMxsyM63Eng1GkdZfbWj+Fmv8Rg==} + react-icons@4.12.0: + resolution: {integrity: sha512-IBaDuHiShdZqmfc/TwHu6+d6k2ltNCf3AszxNmjJc1KUfXdEeRJOKyNvLmAHaarhzGmTSVygNdyu8/opXv2gaw==} peerDependencies: react: '*' - peerDependenciesMeta: - react: - optional: true - dev: false - /rechoir@0.6.2: + react@19.2.0: + resolution: {integrity: sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==} + engines: {node: '>=0.10.0'} + + rechoir@0.6.2: resolution: {integrity: sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==} engines: {node: '>= 0.10'} - dependencies: - resolve: 1.22.1 - dev: true - /resolve@1.22.1: - resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} + require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + + resolve@1.22.11: + resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==} + engines: {node: '>= 0.4'} hasBin: true - dependencies: - is-core-module: 2.10.0 - path-parse: 1.0.7 - supports-preserve-symlinks-flag: 1.0.0 - dev: true - /rollup-plugin-import-assets@1.1.1(rollup@2.79.1): + rollup-plugin-import-assets@1.1.1: resolution: {integrity: sha512-u5zJwOjguTf2N+wETq2weNKGvNkuVc1UX/fPgg215p5xPvGOaI6/BTc024E9brvFjSQTfIYqgvwogQdipknu1g==} peerDependencies: rollup: '>=1.9.0' - dependencies: - rollup: 2.79.1 - rollup-pluginutils: 2.8.2 - url-join: 4.0.1 - dev: true - /rollup-pluginutils@2.8.2: + rollup-pluginutils@2.8.2: resolution: {integrity: sha512-EEp9NhnUkwY8aif6bxgovPHMoMoNr2FulJziTndpt5H9RdwC47GSGuII9XxpSdzVGM0GWrNPHV6ie1LTNJPaLQ==} - dependencies: - estree-walker: 0.6.1 - dev: true - /rollup@2.79.1: - resolution: {integrity: sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==} + rollup@2.79.2: + resolution: {integrity: sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ==} engines: {node: '>=10.0.0'} hasBin: true - optionalDependencies: - fsevents: 2.3.2 - dev: true - /safe-buffer@5.2.1: + safe-buffer@5.2.1: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} - dev: true - /schema-utils@3.1.1: - resolution: {integrity: sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==} + schema-utils@4.3.3: + resolution: {integrity: sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==} engines: {node: '>= 10.13.0'} - dependencies: - '@types/json-schema': 7.0.11 - ajv: 6.12.6 - ajv-keywords: 3.5.2(ajv@6.12.6) - dev: true - /serialize-javascript@6.0.0: - resolution: {integrity: sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==} - dependencies: - randombytes: 2.1.0 - dev: true + serialize-javascript@6.0.2: + resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} - /shelljs@0.8.5: + shelljs@0.8.5: resolution: {integrity: sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==} engines: {node: '>=4'} hasBin: true - dependencies: - glob: 7.2.3 - interpret: 1.4.0 - rechoir: 0.6.2 - dev: true - /shx@0.3.4: + shx@0.3.4: resolution: {integrity: sha512-N6A9MLVqjxZYcVn8hLmtneQWIJtp8IKzMP4eMnx+nqkvXoqinUPCbUFLp2UcWTEIUONhlk0ewxr/jaVGlc+J+g==} engines: {node: '>=6'} hasBin: true - dependencies: - minimist: 1.2.6 - shelljs: 0.8.5 - dev: true - /source-map-support@0.5.21: + source-map-support@0.5.21: resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} - dependencies: - buffer-from: 1.1.2 - source-map: 0.6.1 - dev: true - /source-map@0.6.1: + source-map@0.6.1: resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} engines: {node: '>=0.10.0'} - dev: true - /sourcemap-codec@1.4.8: + sourcemap-codec@1.4.8: resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==} - dev: true + deprecated: Please use @jridgewell/sourcemap-codec instead - /supports-color@8.1.1: + supports-color@8.1.1: resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} engines: {node: '>=10'} - dependencies: - has-flag: 4.0.0 - dev: true - /supports-preserve-symlinks-flag@1.0.0: + supports-preserve-symlinks-flag@1.0.0: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - dev: true - /tapable@2.2.1: - resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==} + tapable@2.3.0: + resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} engines: {node: '>=6'} - dev: true - /terser-webpack-plugin@5.3.6(webpack@5.74.0): - resolution: {integrity: sha512-kfLFk+PoLUQIbLmB1+PZDMRSZS99Mp+/MHqDNmMA6tOItzRt+Npe3E+fsMs5mfcM0wCtrrdU387UnV+vnSffXQ==} + terser-webpack-plugin@5.3.14: + resolution: {integrity: sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==} engines: {node: '>= 10.13.0'} peerDependencies: '@swc/core': '*' @@ -839,72 +527,42 @@ packages: optional: true uglify-js: optional: true - dependencies: - '@jridgewell/trace-mapping': 0.3.16 - jest-worker: 27.5.1 - schema-utils: 3.1.1 - serialize-javascript: 6.0.0 - terser: 5.15.1 - webpack: 5.74.0 - dev: true - - /terser@5.15.1: - resolution: {integrity: sha512-K1faMUvpm/FBxjBXud0LWVAGxmvoPbZbfTCYbSgaaYQaIXI3/TdI7a7ZGA73Zrou6Q8Zmz3oeUTsp/dj+ag2Xw==} + + terser@5.44.1: + resolution: {integrity: sha512-t/R3R/n0MSwnnazuPpPNVO60LX0SKL45pyl9YlvxIdkH0Of7D5qM2EVe+yASRIlY5pZ73nclYJfNANGWPwFDZw==} engines: {node: '>=10'} hasBin: true - dependencies: - '@jridgewell/source-map': 0.3.2 - acorn: 8.8.0 - commander: 2.20.3 - source-map-support: 0.5.21 - dev: true - /tslib@2.4.0: - resolution: {integrity: sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==} - dev: true + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} - /typescript@4.8.4: - resolution: {integrity: sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==} + typescript@4.9.5: + resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} engines: {node: '>=4.2.0'} hasBin: true - dev: true - /update-browserslist-db@1.0.10(browserslist@4.21.4): - resolution: {integrity: sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==} + undici-types@7.16.0: + resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} + + update-browserslist-db@1.1.4: + resolution: {integrity: sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==} hasBin: true peerDependencies: browserslist: '>= 4.21.0' - dependencies: - browserslist: 4.21.4 - escalade: 3.1.1 - picocolors: 1.0.0 - dev: true - /uri-js@4.4.1: - resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} - dependencies: - punycode: 2.1.1 - dev: true - - /url-join@4.0.1: + url-join@4.0.1: resolution: {integrity: sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==} - dev: true - /watchpack@2.4.0: - resolution: {integrity: sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==} + watchpack@2.4.4: + resolution: {integrity: sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==} engines: {node: '>=10.13.0'} - dependencies: - glob-to-regexp: 0.4.1 - graceful-fs: 4.2.10 - dev: true - /webpack-sources@3.2.3: - resolution: {integrity: sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==} + webpack-sources@3.3.3: + resolution: {integrity: sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg==} engines: {node: '>=10.13.0'} - dev: true - /webpack@5.74.0: - resolution: {integrity: sha512-A2InDwnhhGN4LYctJj6M1JEaGL7Luj6LOmyBHjcI8529cm5p6VXiTIW2sn6ffvEAKmveLzvu4jrihwXtPojlAA==} + webpack@5.102.1: + resolution: {integrity: sha512-7h/weGm9d/ywQ6qzJ+Xy+r9n/3qgp/thalBbpOi5i223dPXKi04IBtqPN9nTd+jBc7QKfvDbaBnFipYp4sJAUQ==} engines: {node: '>=10.13.0'} hasBin: true peerDependencies: @@ -912,37 +570,537 @@ packages: peerDependenciesMeta: webpack-cli: optional: true + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + +snapshots: + + '@jridgewell/gen-mapping@0.3.13': + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/source-map@0.3.11': + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@rollup/plugin-commonjs@21.1.0(rollup@2.79.2)': + dependencies: + '@rollup/pluginutils': 3.1.0(rollup@2.79.2) + commondir: 1.0.1 + estree-walker: 2.0.2 + glob: 7.2.3 + is-reference: 1.2.1 + magic-string: 0.25.9 + resolve: 1.22.11 + rollup: 2.79.2 + + '@rollup/plugin-json@4.1.0(rollup@2.79.2)': + dependencies: + '@rollup/pluginutils': 3.1.0(rollup@2.79.2) + rollup: 2.79.2 + + '@rollup/plugin-node-resolve@13.3.0(rollup@2.79.2)': + dependencies: + '@rollup/pluginutils': 3.1.0(rollup@2.79.2) + '@types/resolve': 1.17.1 + deepmerge: 4.3.1 + is-builtin-module: 3.2.1 + is-module: 1.0.0 + resolve: 1.22.11 + rollup: 2.79.2 + + '@rollup/plugin-replace@4.0.0(rollup@2.79.2)': + dependencies: + '@rollup/pluginutils': 3.1.0(rollup@2.79.2) + magic-string: 0.25.9 + rollup: 2.79.2 + + '@rollup/plugin-typescript@8.5.0(rollup@2.79.2)(tslib@2.8.1)(typescript@4.9.5)': + dependencies: + '@rollup/pluginutils': 3.1.0(rollup@2.79.2) + resolve: 1.22.11 + rollup: 2.79.2 + typescript: 4.9.5 + optionalDependencies: + tslib: 2.8.1 + + '@rollup/pluginutils@3.1.0(rollup@2.79.2)': dependencies: - '@types/eslint-scope': 3.7.4 - '@types/estree': 0.0.51 - '@webassemblyjs/ast': 1.11.1 - '@webassemblyjs/wasm-edit': 1.11.1 - '@webassemblyjs/wasm-parser': 1.11.1 - acorn: 8.8.0 - acorn-import-assertions: 1.8.0(acorn@8.8.0) - browserslist: 4.21.4 - chrome-trace-event: 1.0.3 - enhanced-resolve: 5.10.0 - es-module-lexer: 0.9.3 + '@types/estree': 0.0.39 + estree-walker: 1.0.1 + picomatch: 2.3.1 + rollup: 2.79.2 + + '@types/eslint-scope@3.7.7': + dependencies: + '@types/eslint': 9.6.1 + '@types/estree': 1.0.8 + + '@types/eslint@9.6.1': + dependencies: + '@types/estree': 1.0.8 + '@types/json-schema': 7.0.15 + + '@types/estree@0.0.39': {} + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@types/node@24.10.0': + dependencies: + undici-types: 7.16.0 + + '@types/prop-types@15.7.15': {} + + '@types/react@16.14.0': + dependencies: + '@types/prop-types': 15.7.15 + csstype: 3.1.3 + + '@types/resolve@1.17.1': + dependencies: + '@types/node': 24.10.0 + + '@types/webpack@5.28.5': + dependencies: + '@types/node': 24.10.0 + tapable: 2.3.0 + webpack: 5.102.1 + transitivePeerDependencies: + - '@swc/core' + - esbuild + - uglify-js + - webpack-cli + + '@webassemblyjs/ast@1.14.1': + dependencies: + '@webassemblyjs/helper-numbers': 1.13.2 + '@webassemblyjs/helper-wasm-bytecode': 1.13.2 + + '@webassemblyjs/floating-point-hex-parser@1.13.2': {} + + '@webassemblyjs/helper-api-error@1.13.2': {} + + '@webassemblyjs/helper-buffer@1.14.1': {} + + '@webassemblyjs/helper-numbers@1.13.2': + dependencies: + '@webassemblyjs/floating-point-hex-parser': 1.13.2 + '@webassemblyjs/helper-api-error': 1.13.2 + '@xtuc/long': 4.2.2 + + '@webassemblyjs/helper-wasm-bytecode@1.13.2': {} + + '@webassemblyjs/helper-wasm-section@1.14.1': + dependencies: + '@webassemblyjs/ast': 1.14.1 + '@webassemblyjs/helper-buffer': 1.14.1 + '@webassemblyjs/helper-wasm-bytecode': 1.13.2 + '@webassemblyjs/wasm-gen': 1.14.1 + + '@webassemblyjs/ieee754@1.13.2': + dependencies: + '@xtuc/ieee754': 1.2.0 + + '@webassemblyjs/leb128@1.13.2': + dependencies: + '@xtuc/long': 4.2.2 + + '@webassemblyjs/utf8@1.13.2': {} + + '@webassemblyjs/wasm-edit@1.14.1': + dependencies: + '@webassemblyjs/ast': 1.14.1 + '@webassemblyjs/helper-buffer': 1.14.1 + '@webassemblyjs/helper-wasm-bytecode': 1.13.2 + '@webassemblyjs/helper-wasm-section': 1.14.1 + '@webassemblyjs/wasm-gen': 1.14.1 + '@webassemblyjs/wasm-opt': 1.14.1 + '@webassemblyjs/wasm-parser': 1.14.1 + '@webassemblyjs/wast-printer': 1.14.1 + + '@webassemblyjs/wasm-gen@1.14.1': + dependencies: + '@webassemblyjs/ast': 1.14.1 + '@webassemblyjs/helper-wasm-bytecode': 1.13.2 + '@webassemblyjs/ieee754': 1.13.2 + '@webassemblyjs/leb128': 1.13.2 + '@webassemblyjs/utf8': 1.13.2 + + '@webassemblyjs/wasm-opt@1.14.1': + dependencies: + '@webassemblyjs/ast': 1.14.1 + '@webassemblyjs/helper-buffer': 1.14.1 + '@webassemblyjs/wasm-gen': 1.14.1 + '@webassemblyjs/wasm-parser': 1.14.1 + + '@webassemblyjs/wasm-parser@1.14.1': + dependencies: + '@webassemblyjs/ast': 1.14.1 + '@webassemblyjs/helper-api-error': 1.13.2 + '@webassemblyjs/helper-wasm-bytecode': 1.13.2 + '@webassemblyjs/ieee754': 1.13.2 + '@webassemblyjs/leb128': 1.13.2 + '@webassemblyjs/utf8': 1.13.2 + + '@webassemblyjs/wast-printer@1.14.1': + dependencies: + '@webassemblyjs/ast': 1.14.1 + '@xtuc/long': 4.2.2 + + '@xtuc/ieee754@1.2.0': {} + + '@xtuc/long@4.2.2': {} + + acorn-import-phases@1.0.4(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + + acorn@8.15.0: {} + + ajv-formats@2.1.1(ajv@8.17.1): + optionalDependencies: + ajv: 8.17.1 + + ajv-keywords@5.1.0(ajv@8.17.1): + dependencies: + ajv: 8.17.1 + fast-deep-equal: 3.1.3 + + ajv@8.17.1: + dependencies: + fast-deep-equal: 3.1.3 + fast-uri: 3.1.0 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + + balanced-match@1.0.2: {} + + baseline-browser-mapping@2.8.25: {} + + brace-expansion@1.1.12: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + browserslist@4.27.0: + dependencies: + baseline-browser-mapping: 2.8.25 + caniuse-lite: 1.0.30001753 + electron-to-chromium: 1.5.245 + node-releases: 2.0.27 + update-browserslist-db: 1.1.4(browserslist@4.27.0) + + buffer-from@1.1.2: {} + + builtin-modules@3.3.0: {} + + caniuse-lite@1.0.30001753: {} + + chrome-trace-event@1.0.4: {} + + commander@2.20.3: {} + + commondir@1.0.1: {} + + concat-map@0.0.1: {} + + csstype@3.1.3: {} + + decky-frontend-lib@3.25.0: {} + + deepmerge@4.3.1: {} + + electron-to-chromium@1.5.245: {} + + enhanced-resolve@5.18.3: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.3.0 + + es-module-lexer@1.7.0: {} + + escalade@3.2.0: {} + + eslint-scope@5.1.1: + dependencies: + esrecurse: 4.3.0 + estraverse: 4.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@4.3.0: {} + + estraverse@5.3.0: {} + + estree-walker@0.6.1: {} + + estree-walker@1.0.1: {} + + estree-walker@2.0.2: {} + + events@3.3.0: {} + + fast-deep-equal@3.1.3: {} + + fast-uri@3.1.0: {} + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + glob-to-regexp@0.4.1: {} + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + graceful-fs@4.2.11: {} + + has-flag@4.0.0: {} + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + interpret@1.4.0: {} + + is-builtin-module@3.2.1: + dependencies: + builtin-modules: 3.3.0 + + is-core-module@2.16.1: + dependencies: + hasown: 2.0.2 + + is-module@1.0.0: {} + + is-reference@1.2.1: + dependencies: + '@types/estree': 1.0.8 + + jest-worker@27.5.1: + dependencies: + '@types/node': 24.10.0 + merge-stream: 2.0.0 + supports-color: 8.1.1 + + json-parse-even-better-errors@2.3.1: {} + + json-schema-traverse@1.0.0: {} + + loader-runner@4.3.1: {} + + magic-string@0.25.9: + dependencies: + sourcemap-codec: 1.4.8 + + merge-stream@2.0.0: {} + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.12 + + minimist@1.2.8: {} + + moment@2.30.1: {} + + neo-async@2.6.2: {} + + node-releases@2.0.27: {} + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + path-is-absolute@1.0.1: {} + + path-parse@1.0.7: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + randombytes@2.1.0: + dependencies: + safe-buffer: 5.2.1 + + react-icons@4.12.0(react@19.2.0): + dependencies: + react: 19.2.0 + + react@19.2.0: {} + + rechoir@0.6.2: + dependencies: + resolve: 1.22.11 + + require-from-string@2.0.2: {} + + resolve@1.22.11: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + rollup-plugin-import-assets@1.1.1(rollup@2.79.2): + dependencies: + rollup: 2.79.2 + rollup-pluginutils: 2.8.2 + url-join: 4.0.1 + + rollup-pluginutils@2.8.2: + dependencies: + estree-walker: 0.6.1 + + rollup@2.79.2: + optionalDependencies: + fsevents: 2.3.3 + + safe-buffer@5.2.1: {} + + schema-utils@4.3.3: + dependencies: + '@types/json-schema': 7.0.15 + ajv: 8.17.1 + ajv-formats: 2.1.1(ajv@8.17.1) + ajv-keywords: 5.1.0(ajv@8.17.1) + + serialize-javascript@6.0.2: + dependencies: + randombytes: 2.1.0 + + shelljs@0.8.5: + dependencies: + glob: 7.2.3 + interpret: 1.4.0 + rechoir: 0.6.2 + + shx@0.3.4: + dependencies: + minimist: 1.2.8 + shelljs: 0.8.5 + + source-map-support@0.5.21: + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + + source-map@0.6.1: {} + + sourcemap-codec@1.4.8: {} + + supports-color@8.1.1: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + tapable@2.3.0: {} + + terser-webpack-plugin@5.3.14(webpack@5.102.1): + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + jest-worker: 27.5.1 + schema-utils: 4.3.3 + serialize-javascript: 6.0.2 + terser: 5.44.1 + webpack: 5.102.1 + + terser@5.44.1: + dependencies: + '@jridgewell/source-map': 0.3.11 + acorn: 8.15.0 + commander: 2.20.3 + source-map-support: 0.5.21 + + tslib@2.8.1: {} + + typescript@4.9.5: {} + + undici-types@7.16.0: {} + + update-browserslist-db@1.1.4(browserslist@4.27.0): + dependencies: + browserslist: 4.27.0 + escalade: 3.2.0 + picocolors: 1.1.1 + + url-join@4.0.1: {} + + watchpack@2.4.4: + dependencies: + glob-to-regexp: 0.4.1 + graceful-fs: 4.2.11 + + webpack-sources@3.3.3: {} + + webpack@5.102.1: + dependencies: + '@types/eslint-scope': 3.7.7 + '@types/estree': 1.0.8 + '@types/json-schema': 7.0.15 + '@webassemblyjs/ast': 1.14.1 + '@webassemblyjs/wasm-edit': 1.14.1 + '@webassemblyjs/wasm-parser': 1.14.1 + acorn: 8.15.0 + acorn-import-phases: 1.0.4(acorn@8.15.0) + browserslist: 4.27.0 + chrome-trace-event: 1.0.4 + enhanced-resolve: 5.18.3 + es-module-lexer: 1.7.0 eslint-scope: 5.1.1 events: 3.3.0 glob-to-regexp: 0.4.1 - graceful-fs: 4.2.10 + graceful-fs: 4.2.11 json-parse-even-better-errors: 2.3.1 - loader-runner: 4.3.0 + loader-runner: 4.3.1 mime-types: 2.1.35 neo-async: 2.6.2 - schema-utils: 3.1.1 - tapable: 2.2.1 - terser-webpack-plugin: 5.3.6(webpack@5.74.0) - watchpack: 2.4.0 - webpack-sources: 3.2.3 + schema-utils: 4.3.3 + tapable: 2.3.0 + terser-webpack-plugin: 5.3.14(webpack@5.102.1) + watchpack: 2.4.4 + webpack-sources: 3.3.3 transitivePeerDependencies: - '@swc/core' - esbuild - uglify-js - dev: true - /wrappy@1.0.2: - resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} - dev: true + wrappy@1.0.2: {} diff --git a/src/index.tsx b/src/index.tsx index 78a97de..13b532e 100755 --- a/src/index.tsx +++ b/src/index.tsx @@ -139,6 +139,14 @@ const Content: FC = () => { /> + + { saveSettings({ ...settings, randomize_sleep: (checked) ? 'all' : '' }) }} + checked={settings.randomize_sleep == 'all'} + /> + + = ({ serverAPI, childr boot: '', suspend: '', throbber: '', - force_ipv4: false + force_ipv4: false, + randomize_sleep: '' }); // When the context is mounted we load the current config. diff --git a/src/types/animation.ts b/src/types/animation.ts index a85bce2..d7df912 100644 --- a/src/types/animation.ts +++ b/src/types/animation.ts @@ -24,6 +24,7 @@ export interface PluginSettings { suspend: String; throbber: String; force_ipv4: boolean; + randomize_sleep: String } export interface Animation {